file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
upsta3.rs
#[doc = "Register `UPSTA3` reader"] pub struct R(crate::R<UPSTA3_SPEC>); impl core::ops::Deref for R { type Target = crate::R<UPSTA3_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<UPSTA3_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<UPSTA3_SPEC>) -> Self { R(reader) } } #[doc = "Field `RXINI` reader - Received IN Data Interrupt"] pub struct RXINI_R(crate::FieldReader<bool, bool>); impl RXINI_R { pub(crate) fn new(bits: bool) -> Self { RXINI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for RXINI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TXOUTI` reader - Transmitted OUT Data Interrupt"] pub struct TXOUTI_R(crate::FieldReader<bool, bool>); impl TXOUTI_R { pub(crate) fn new(bits: bool) -> Self { TXOUTI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TXOUTI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TXSTPI` reader - Transmitted SETUP Interrupt"] pub struct TXSTPI_R(crate::FieldReader<bool, bool>); impl TXSTPI_R { pub(crate) fn new(bits: bool) -> Self { TXSTPI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TXSTPI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PERRI` reader - Pipe Error Interrupt"] pub struct PERRI_R(crate::FieldReader<bool, bool>); impl PERRI_R { pub(crate) fn new(bits: bool) -> Self { PERRI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PERRI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `NAKEDI` reader - NAKed Interrupt"] pub struct NAKEDI_R(crate::FieldReader<bool, bool>); impl NAKEDI_R { pub(crate) fn new(bits: bool) -> Self { NAKEDI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for NAKEDI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `ERRORFI` reader - Errorflow Interrupt"] pub struct ERRORFI_R(crate::FieldReader<bool, bool>); impl ERRORFI_R { pub(crate) fn new(bits: bool) -> Self { ERRORFI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for ERRORFI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RXSTALLDI` reader - Received STALLed Interrupt"] pub struct RXSTALLDI_R(crate::FieldReader<bool, bool>); impl RXSTALLDI_R { pub(crate) fn new(bits: bool) -> Self { RXSTALLDI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for RXSTALLDI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DTSEQ` reader - Data Toggle Sequence"] pub struct DTSEQ_R(crate::FieldReader<u8, u8>); impl DTSEQ_R { pub(crate) fn new(bits: u8) -> Self { DTSEQ_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for DTSEQ_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RAMACERI` reader - Ram Access Error Interrupt"] pub struct RAMACERI_R(crate::FieldReader<bool, bool>); impl RAMACERI_R { pub(crate) fn new(bits: bool) -> Self { RAMACERI_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for RAMACERI_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `NBUSYBK` reader - Number of Busy Bank"] pub struct NBUSYBK_R(crate::FieldReader<u8, u8>); impl NBUSYBK_R { pub(crate) fn new(bits: u8) -> Self { NBUSYBK_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for NBUSYBK_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CURRBK` reader - Current Bank"] pub struct CURRBK_R(crate::FieldReader<u8, u8>); impl CURRBK_R { pub(crate) fn new(bits: u8) -> Self { CURRBK_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for CURRBK_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bit 0 - Received IN Data Interrupt"] #[inline(always)] pub fn rxini(&self) -> RXINI_R { RXINI_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Transmitted OUT Data Interrupt"] #[inline(always)] pub fn txouti(&self) -> TXOUTI_R { TXOUTI_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Transmitted SETUP Interrupt"] #[inline(always)] pub fn txstpi(&self) -> TXSTPI_R { TXSTPI_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Pipe Error Interrupt"] #[inline(always)] pub fn perri(&self) -> PERRI_R { PERRI_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - NAKed Interrupt"] #[inline(always)] pub fn nakedi(&self) -> NAKEDI_R { NAKEDI_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Errorflow Interrupt"] #[inline(always)] pub fn errorfi(&self) -> ERRORFI_R { ERRORFI_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Received STALLed Interrupt"] #[inline(always)] pub fn rxstalldi(&self) -> RXSTALLDI_R { RXSTALLDI_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bits 8:9 - Data Toggle Sequence"] #[inline(always)] pub fn dtseq(&self) -> DTSEQ_R { DTSEQ_R::new(((self.bits >> 8) & 0x03) as u8) } #[doc = "Bit 10 - Ram Access Error Interrupt"] #[inline(always)] pub fn ramaceri(&self) -> RAMACERI_R { RAMACERI_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bits 12:13 - Number of Busy Bank"] #[inline(always)] pub fn nbusybk(&self) -> NBUSYBK_R { NBUSYBK_R::new(((self.bits >> 12) & 0x03) as u8) } #[doc = "Bits 14:15 - Current Bank"] #[inline(always)] pub fn currbk(&self) -> CURRBK_R { CURRBK_R::new(((self.bits >> 14) & 0x03) as u8) } } #[doc = "Pipe Status Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [upsta3](index.html) module"] pub struct
; impl crate::RegisterSpec for UPSTA3_SPEC { type Ux = u32; } #[doc = "`read()` method returns [upsta3::R](R) reader structure"] impl crate::Readable for UPSTA3_SPEC { type Reader = R; } #[doc = "`reset()` method sets UPSTA3 to value 0"] impl crate::Resettable for UPSTA3_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
UPSTA3_SPEC
Account.js
/* Copyright 2019-present OmiseGO Pte Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import React, { useMemo, useCallback } from 'react'; import { useDispatch, useSelector } from 'react-redux'; import { isEqual } from 'lodash'; import truncate from 'truncate-middle'; import { Send, MergeType } from '@material-ui/icons'; import { selectLoading } from 'selectors/loadingSelector'; import { selectIsSynced } from 'selectors/statusSelector'; import { selectChildchainBalance, selectRootchainBalance } from 'selectors/balanceSelector'; import { selectPendingExits } from 'selectors/exitSelector'; import { selectChildchainTransactions } from 'selectors/transactionSelector'; import { openModal } from 'actions/uiAction'; import Copy from 'components/copy/Copy'; import Button from 'components/button/Button'; import { logAmount } from 'util/amountConvert'; import networkService from 'services/networkService'; import * as styles from './Account.module.scss'; function
() { const dispatch = useDispatch(); const isSynced = useSelector(selectIsSynced); const childBalance = useSelector(selectChildchainBalance, isEqual); const rootBalance = useSelector(selectRootchainBalance, isEqual); const pendingExits = useSelector(selectPendingExits, isEqual); const transactions = useSelector(selectChildchainTransactions, isEqual); const criticalTransactionLoading = useSelector(selectLoading([ 'EXIT/CREATE' ])); const exitPending = useMemo(() => pendingExits.some(i => i.status === 'Pending'), [ pendingExits ]); const transferPending = useMemo(() => transactions.some(i => i.status === 'Pending'), [ transactions ]); const disabled = !childBalance.length || !isSynced || exitPending || transferPending; const handleModalClick = useCallback( (name) => dispatch(openModal(name)), [ dispatch ] ); return ( <div className={styles.Account}> <h2>Account</h2> <div className={styles.wallet}> <span className={styles.address}>{`Wallet Address : ${networkService.account ? truncate(networkService.account, 10, 4, '...') : ''}`}</span> <Copy value={networkService.account} /> </div> <div className={styles.balances}> <div className={styles.box}> <div className={styles.header}> <div className={styles.title}> <span>Balance on Childchain</span> <span>OMG Network</span> </div> <div className={styles.actions}> <div onClick={() => handleModalClick('mergeModal')} className={[ styles.transfer, (disabled || criticalTransactionLoading) ? styles.disabled : '' ].join(' ')} > <MergeType /> <span>Merge</span> </div> <div onClick={() => handleModalClick('transferModal')} className={[ styles.transfer, (disabled || criticalTransactionLoading) ? styles.disabled : '' ].join(' ')} > <Send /> <span>Transfer</span> </div> </div> </div> {childBalance.map((i, index) => { return ( <div key={index} className={styles.row}> <div className={styles.token}> <span className={styles.symbol}>{i.name}</span> </div> <span>{logAmount(i.amount, i.decimals)}</span> </div> ); })} <div className={styles.buttons}> <Button onClick={() => handleModalClick('depositModal')} type='primary' disabled={!isSynced} > DEPOSIT </Button> <Button onClick={() => handleModalClick('exitModal')} type='secondary' disabled={disabled} > EXIT </Button> </div> </div> <div className={styles.box}> <div className={styles.header}> <div className={styles.title}> <span>Balance on Rootchain</span> <span>Ethereum Network</span> </div> </div> {rootBalance.map((i, index) => { return ( <div key={index} className={styles.row}> <div className={styles.token}> <span className={styles.symbol}>{i.name}</span> </div> <span>{logAmount(i.amount, i.decimals)}</span> </div> ); })} </div> </div> </div> ); } export default React.memo(Account);
Account
day_15.py
from __future__ import annotations import itertools from dataclasses import dataclass from typing import List, Optional import heapq import time import cProfile @dataclass class Edge: """Not to be used directly - does not validate nodes build inverse relationships. Use the node methods.""" source: Node target: Node cost: int def __init__(self, source: Node, target: Node, cost: int): self.source = source self.target = target self.cost = cost @dataclass class Node: name: str x: int y: int incoming_cost: int edges: Optional[List[Edge]] visited: bool tentative_distance: int previous: Optional[Node] def __init__(self, name: str, incoming_cost: int, x: int, y: int):
def add_edge(self, edge: Edge): self.edges.append(edge) def add_neighbor(self, target: Node): self.edges.append(Edge(self, target, self.incoming_cost)) def adjacent_nodes(self): return [n.target for n in self.edges] def set_previous(self, node: Node): self.previous = node def heuristic(a: Node, b: Node) -> int: return abs(a.x - b.x) + abs(a.y - b.y) def spf(start_node: Node, target_node: Node, all_nodes: List[Node]): start_node.tentative_distance = 0 _tiebreaker = itertools.count() unvisited_queue = [(start_node.tentative_distance, next(_tiebreaker), start_node)] heapq.heapify(unvisited_queue) while len(unvisited_queue): # Pops a node with the smallest distance current_node = heapq.heappop(unvisited_queue)[2] current_node.visited = True for next_node in current_node.adjacent_nodes(): if next_node.visited: continue new_distance = ( current_node.tentative_distance + next_node.incoming_cost ) # + heuristic(next_node, target_node) if new_distance < next_node.tentative_distance: next_node.tentative_distance = new_distance next_node.set_previous(current_node) heapq.heappush(unvisited_queue, (next_node.tentative_distance, next(_tiebreaker), next_node)) if next_node.name == target_node.name: return def spf_backtrace(node: Node, path: List) -> None: current_node = node while True: if current_node.previous: path.append(current_node) current_node = current_node.previous else: return def build_node_relationships(grid: List[List[Node]]) -> None: # Build Neighbor relationships - All rows are of equal length height = len(grid) - 1 width = len(grid[0]) - 1 print(f"Grid is {width + 1} x {height + 1}") for y in range(0, len(grid)): for x in range(0, len(grid[0])): # Left Top Corner node = grid[y][x] if x == 0 and y == 0: node.add_neighbor(grid[y][x + 1]) # > node.add_neighbor(grid[y + 1][x]) # V # Left Side elif x == 0 and y != 0 and y < height: node.add_neighbor(grid[y][x + 1]) # > node.add_neighbor(grid[y + 1][x]) # V node.add_neighbor(grid[y - 1][x]) # ^ # Left Bottom Corner elif x == 0 and y == height: node.add_neighbor(grid[y][x + 1]) # > node.add_neighbor(grid[y - 1][x]) # ^ # Bottom Side elif x != 0 and x < width and y == height: node.add_neighbor(grid[y][x + 1]) # > node.add_neighbor(grid[y - 1][x]) # ^ node.add_neighbor(grid[y][x - 1]) # < # Right Bottom Corner elif x == width and y == height: node.add_neighbor(grid[y][x - 1]) # < node.add_neighbor(grid[y - 1][x]) # ^ # Right Side elif x == width and y != 0 and y < height: node.add_neighbor(grid[y][x - 1]) # < node.add_neighbor(grid[y + 1][x]) # V node.add_neighbor(grid[y - 1][x]) # ^ # Top Side elif x != 0 and x < width and y == 0: node.add_neighbor(grid[y][x + 1]) # > node.add_neighbor(grid[y][x - 1]) # < node.add_neighbor(grid[y + 1][x]) # V # Right Top Corner elif x == width and y == 0: node.add_neighbor(grid[y][x - 1]) # < node.add_neighbor(grid[y + 1][x]) # V # Sweet Gooey Center else: node.add_neighbor(grid[y][x - 1]) # < node.add_neighbor(grid[y + 1][x]) # V node.add_neighbor(grid[y - 1][x]) # ^ node.add_neighbor(grid[y][x + 1]) # > def part_1(): with open("inputs/day_15.txt") as f: cave = f.read() lines = cave.replace("\r", "").split("\n") # Build node objects in a 2d grid - This will not be used by the transversal algo, it's a temporary thing grid = [] y = 0 for line in lines: x = 0 row = [] for cell in list(line): row.append(Node(name=f"{x}, {y}", incoming_cost=int(cell), x=x, y=y)) x += 1 grid.append(row) y += 1 build_node_relationships(grid) all_nodes = [] for row in grid: for cell in row: all_nodes.append(cell) height = len(grid) - 1 width = len(grid[0]) - 1 # Do the stuff. spf(start_node=grid[0][0], target_node=grid[height][width], all_nodes=all_nodes) path = [] spf_backtrace(grid[height][width], path) sum = 0 for node in path: sum += node.incoming_cost print(f"Part 1: {sum}") def add_wrap(a: int, b: int) -> int: if a + b > 9: return a + b - 9 else: return a + b def replicate_right(grid: List[List[int]], replicate_count: int) -> List[List[int]]: """Replicates the grid to the right while increasing cells by 1 with wrapping""" new_grid = [] for row in grid: new_row = [] for x in range(0, replicate_count): for cell in row: new_row.append(add_wrap(cell, x)) new_grid.append(new_row) return new_grid def replicate_down(grid: List[List[int]], replicate_count: int) -> List[List[int]]: """Replicates the grid downwards while increasing cells by 1 with wrapping""" new_grid = [] for x in range(0, replicate_count): for row in grid: new_row = [] for cell in row: new_row.append(add_wrap(cell, x)) new_grid.append(new_row) return new_grid def part_2(): with open("inputs/day_15.txt") as f: cave = f.read() lines = cave.replace("\r", "").split("\n") # Build node objects in a 2d grid - This will not be used by the transversal algo, it's a temporary thing grid = [] for line in lines: row = [] for cell in list(line): row.append(int(cell)) grid.append(row) grid = replicate_right(grid, 5) grid = replicate_down(grid, 5) node_grid = [] y = 0 for row in grid: x = 0 new_row = [] for cell in row: new_row.append(Node(name=f"{x},{y}", incoming_cost=cell, x=x, y=y)) x += 1 y += 1 node_grid.append(new_row) build_node_relationships(node_grid) all_nodes = [] for row in node_grid: for cell in row: all_nodes.append(cell) height = len(node_grid) - 1 width = len(node_grid[0]) - 1 # Do the stuff. spf(start_node=node_grid[0][0], target_node=node_grid[height][width], all_nodes=all_nodes) path = [] spf_backtrace(node_grid[width][height], path) sum = 0 for node in path: sum += node.incoming_cost print(f"Part 2: {sum}") part_1() part_2()
self.name = name self.x = x self.y = y self.incoming_cost = incoming_cost self.edges = [] self.visited = False self.tentative_distance = 9999 self.previous = None
node_helper.js
const mqtt = require("mqtt"); const NodeHelper = require("node_helper"); var servers = []; module.exports = NodeHelper.create({ log: function (...args) { if (this.config.logging) { console.log(args); } }, start: function () { console.log(this.name + ": Starting node helper"); this.loaded = false; }, makeServerKey: function (server) { return "" + server.address + ":" + (server.port | ("1883" + server.user)); }, addServer: function (server) { console.log(this.name + ": Adding server: ", server); var serverKey = this.makeServerKey(server); var mqttServer = {}; var foundServer = false; for (i = 0; i < servers.length; i++) { if (servers[i].serverKey === serverKey) { mqttServer = servers[i]; foundServer = true; } } if (!foundServer) { mqttServer.serverKey = serverKey; mqttServer.address = server.address; mqttServer.port = server.port; mqttServer.options = {}; mqttServer.topics = []; mqttServer.payloads = []; if (server.user) mqttServer.options.username = server.user; if (server.password) mqttServer.options.password = server.password; } for (i = 0; i < server.subscriptions.length; i++) { mqttServer.topics.push(server.subscriptions[i].topic); mqttServer.payloads.push(server.subscriptions[i].payload); } servers.push(mqttServer); this.startClient(mqttServer); }, addConfig: function (config) { for (i = 0; i < config.mqttServers.length; i++) { this.addServer(config.mqttServers[i]); } }, startClient: function (server) { console.log(this.name + ": Starting client for: ", server); var self = this; var mqttServer = (server.address.match(/^mqtts?:\/\//) ? "" : "mqtt://") + server.address; if (server.port) { mqttServer = mqttServer + ":" + server.port; } console.log(self.name + ": Connecting to " + mqttServer); server.client = mqtt.connect(mqttServer, server.options); server.client.on("error", function (err) { console.log(self.name + " " + server.serverKey + ": Error: " + err); }); server.client.on("reconnect", function (err) { server.value = "reconnecting"; // Hmmm... console.log(self.name + ": " + server.serverKey + " reconnecting"); }); server.client.on("connect", function (connack) { console.log(self.name + " connected to " + mqttServer); console.log(self.name + ": subscribing to " + server.topics); server.client.subscribe(server.topics); }); server.client.on("message", (topic, payload) => { // just to make sure... (but should also work only with subscribe selection) if (server.topics.includes(topic) && server.payloads.includes(payload.toString())) { // add some debug information console.log(self.name + ": MQTT EXIT message detected"); console.log(self.name + ": intentional stopping server"); // kill server process.exit(1); } this.sendSocketNotification("MQTT_PAYLOAD", { serverKey: server.serverKey, topic: topic, value: payload.toString(), time: Date.now(), }); }); }, socketNotificationReceived: function (notification, payload) { var self = this;
self.addConfig(config); self.loaded = true; } }, });
if (notification === "MQTT_CONFIG") { var config = payload;
tcd14_nbytes_mlno.rs
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::TCD14_NBYTES_MLNO { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get() } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct NBYTESR { bits: u32, } impl NBYTESR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Proxy"] pub struct _NBYTESW<'a> { w: &'a mut W, } impl<'a> _NBYTESW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:31 - Minor Byte Transfer Count"] #[inline] pub fn nbytes(&self) -> NBYTESR { let bits = { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; NBYTESR { bits } } } impl W { #[doc = r" Reset value of the register"]
#[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:31 - Minor Byte Transfer Count"] #[inline] pub fn nbytes(&mut self) -> _NBYTESW { _NBYTESW { w: self } } }
#[inline] pub fn reset_value() -> W { W { bits: 0 } }
system_test.go
package integrationtests import ( "context" "encoding/json" "fmt" "os/exec" "strings" "testing" "github.com/kubernetes-csi/csi-proxy/client/api/system/v1alpha1" v1alpha1client "github.com/kubernetes-csi/csi-proxy/client/groups/system/v1alpha1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestGetBIOSSerialNumber(t *testing.T) { t.Run("GetBIOSSerialNumber", func(t *testing.T) { client, err := v1alpha1client.NewClient() require.Nil(t, err) defer client.Close() request := &v1alpha1.GetBIOSSerialNumberRequest{} response, err := client.GetBIOSSerialNumber(context.TODO(), request) require.Nil(t, err) require.NotNil(t, response) result, err := exec.Command("wmic", "bios", "get", "serialnumber").Output() require.Nil(t, err) t.Logf("The serial number is %s", response.SerialNumber) resultString := string(result) require.True(t, strings.Contains(resultString, response.SerialNumber)) }) } func TestServiceCommands(t *testing.T)
func assertServiceStarted(t *testing.T, serviceName string) { assertServiceStatus(t, serviceName, "Running") } func assertServiceStopped(t *testing.T, serviceName string) { assertServiceStatus(t, serviceName, "Stopped") } func assertServiceStatus(t *testing.T, serviceName string, status string) { out, err := runPowershellCmd(t, fmt.Sprintf(`Get-Service -Name "%s" | `+ `Select-Object -ExpandProperty Status`, serviceName)) if !assert.NoError(t, err, "Failed getting service out=%s", out) { return } assert.Equal(t, strings.TrimSpace(out), status) }
{ t.Run("GetService", func(t *testing.T) { const ServiceName = "MSiSCSI" client, err := v1alpha1client.NewClient() require.Nil(t, err) defer client.Close() // Make sure service is stopped _, err = runPowershellCmd(t, fmt.Sprintf(`Stop-Service -Name "%s"`, ServiceName)) require.NoError(t, err) assertServiceStopped(t, ServiceName) request := &v1alpha1.GetServiceRequest{Name: ServiceName} response, err := client.GetService(context.TODO(), request) require.NoError(t, err) require.NotNil(t, response) out, err := runPowershellCmd(t, fmt.Sprintf(`Get-Service -Name "%s" `+ `| Select-Object DisplayName, Status, StartType | ConvertTo-Json`, ServiceName)) require.NoError(t, err) var serviceInfo = struct { DisplayName string `json:"DisplayName"` Status uint32 `json:"Status"` StartType uint32 `json:"StartType"` }{} err = json.Unmarshal([]byte(out), &serviceInfo) require.NoError(t, err, "failed unmarshalling json out=%v", out) assert.Equal(t, serviceInfo.Status, uint32(response.Status)) assert.Equal(t, v1alpha1.ServiceStatus_STOPPED, response.Status) assert.Equal(t, serviceInfo.StartType, uint32(response.StartType)) assert.Equal(t, serviceInfo.DisplayName, response.DisplayName) }) t.Run("Stop/Start Service", func(t *testing.T) { const ServiceName = "MSiSCSI" client, err := v1alpha1client.NewClient() require.Nil(t, err) defer client.Close() _, err = runPowershellCmd(t, fmt.Sprintf(`Stop-Service -Name "%s"`, ServiceName)) require.NoError(t, err) assertServiceStopped(t, ServiceName) startReq := &v1alpha1.StartServiceRequest{Name: ServiceName} startResp, err := client.StartService(context.TODO(), startReq) assert.NoError(t, err) assert.NotNil(t, startResp) assertServiceStarted(t, ServiceName) stopReq := &v1alpha1.StopServiceRequest{Name: ServiceName} stopResp, err := client.StopService(context.TODO(), stopReq) assert.NoError(t, err) assert.NotNil(t, stopResp) assertServiceStopped(t, ServiceName) }) }
network_suite_test.go
package network_test import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "testing" ) func TestNetwork(t *testing.T)
{ RegisterFailHandler(Fail) RunSpecs(t, "PCF Dev Network Suite") }
test.py
print('a b'.split('.s'))
t140_test_at_root_in_unknown_directive.rs
//! Tests auto-converted from "sass-spec/spec/libsass/at-root/140_test_at_root_in_unknown_directive.hrx" #[test] fn test()
{ assert_eq!( crate::rsass( "@fblthp {\ \n .foo {\ \n @at-root .bar {a: b}\ \n }\ \n}\ \n" ) .unwrap(), "@fblthp {\ \n .bar {\ \n a: b;\ \n }\ \n}\ \n" ); }
pods.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pod import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/types" ) // checkLatencySensitiveResourcesForAContainer checks if there are any latency sensitive resources like GPUs. func checkLatencySensitiveResourcesForAContainer(rl v1.ResourceList) bool { if rl == nil { return false } for rName := range rl { if rName == v1.ResourceNvidiaGPU { return true } // TODO: Add support for other high value resources like hugepages etc. once kube is rebased to 1.8. } return false } // IsLatencySensitivePod checks if a pod consumes high value devices like GPUs, hugepages or when cpu pinning enabled. func IsLatencySensitivePod(pod *v1.Pod) bool { for _, container := range pod.Spec.Containers { resourceList := container.Resources.Requests if checkLatencySensitiveResourcesForAContainer(resourceList) { return true } } return false } // IsEvictable checks if a pod is evictable or not. func IsEvictable(pod *v1.Pod, evictLocalStoragePods bool) bool { ownerRefList := OwnerRef(pod) //MirrorPod(Damonset在apiserver中的镜像,不能删除), if IsMirrorPod(pod) || (!evictLocalStoragePods && IsPodWithLocalStorage(pod)) || len(ownerRefList) == 0 || IsDaemonsetPod(ownerRefList) || IsCriticalPod(pod) { return false } return true } // ListEvictablePodsOnNode returns the list of evictable pods on node. func ListEvictablePodsOnNode(client clientset.Interface, node *v1.Node, evictLocalStoragePods bool) ([]*v1.Pod, error) { pods, err := ListPodsOnANode(client, node) if err != nil { return []*v1.Pod{}, err } evictablePods := make([]*v1.Pod, 0) for _, pod := range pods { if !IsEvictable(pod, evictLocalStoragePods) { continue } else { evictablePods = append(evictablePods, pod) } } return evictablePods, nil } func ListPodsOnANode(client clientset.Interface, node *v1.Node) ([]*v1.Pod, error) { fieldSelector, err := fields.ParseSelector("spec.nodeName=" + node.Name + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) if err != nil { return []*v1.Pod{}, err } podList, err := client.CoreV1().Pods(v1.NamespaceAll).List( metav1.ListOptions{FieldSelector: fieldSelector.String()}) if err != nil { return []*v1.Pod{}, err } pods := make([]*v1.Pod, 0) for i := range podList.Items { pods = append(pods, &podList.Items[i]) } return pods, nil } func IsCriticalPod(pod *v1.Pod) bool { return types.IsCriticalPod(pod) } func IsBestEffortPod(pod *v1.Pod) bool { return qos.GetPodQOS(pod) == v1.PodQOSBestEffort } func IsBurstablePod(pod *v1.Pod) bool { return qos.GetPodQ
od(pod *v1.Pod) bool { return qos.GetPodQOS(pod) == v1.PodQOSGuaranteed } func IsDaemonsetPod(ownerRefList []metav1.OwnerReference) bool { for _, ownerRef := range ownerRefList { if ownerRef.Kind == "DaemonSet" { return true } } return false } // IsMirrorPod checks whether the pod is a mirror pod. func IsMirrorPod(pod *v1.Pod) bool { _, found := pod.ObjectMeta.Annotations[types.ConfigMirrorAnnotationKey] return found } func IsPodWithLocalStorage(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { if volume.HostPath != nil || volume.EmptyDir != nil { return true } } return false } // OwnerRef returns the ownerRefList for the pod. func OwnerRef(pod *v1.Pod) []metav1.OwnerReference { return pod.ObjectMeta.GetOwnerReferences() }
OS(pod) == v1.PodQOSBurstable } func IsGuaranteedP
build.rs
use twine::build_translations; fn
() { println!("cargo:rerun-if-changed=build.rs"); build_translations(&["./src/i18n/localization.ini"], "i18n.rs").unwrap(); }
main
event.py
# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_service import service from aodh.evaluator import event from aodh import messaging from aodh import storage LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('event_alarm_topic', default='alarm.all', deprecated_group='DEFAULT', help='The topic that aodh uses for event alarm evaluation.'), cfg.IntOpt('batch_size', default=1, help='Number of notification messages to wait before ' 'dispatching them.'), cfg.IntOpt('batch_timeout', default=None, help='Number of seconds to wait before dispatching samples ' 'when batch_size is not reached (None means indefinitely).'), ] class EventAlarmEndpoint(object): def __init__(self, evaluator): self.evaluator = evaluator def sample(self, notifications):
class EventAlarmEvaluationService(service.Service): def __init__(self, conf): super(EventAlarmEvaluationService, self).__init__() self.conf = conf def start(self): super(EventAlarmEvaluationService, self).start() self.storage_conn = storage.get_connection_from_config(self.conf) self.evaluator = event.EventAlarmEvaluator(self.conf) self.listener = messaging.get_batch_notification_listener( messaging.get_transport(self.conf), [oslo_messaging.Target( topic=self.conf.listener.event_alarm_topic)], [EventAlarmEndpoint(self.evaluator)], False, self.conf.listener.batch_size, self.conf.listener.batch_timeout) self.listener.start() # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) def stop(self): if getattr(self, 'listener', None): self.listener.stop() self.listener.wait() super(EventAlarmEvaluationService, self).stop()
LOG.debug('Received %s messages in batch.', len(notifications)) for notification in notifications: self.evaluator.evaluate_events(notification['payload'])
lib.rs
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { anyhow::{format_err, Error}, fidl_fidl_examples_routing_echo::{self as fecho, EchoMarker as EchoClientStatsMarker}, fidl_fuchsia_data as fdata, fuchsia_async as fasync, fuchsia_component::server as fserver, fuchsia_component_test::{builder::*, mock, Moniker}, futures::{channel::oneshot, lock::Mutex, StreamExt, TryStreamExt}, std::sync::Arc, }; const V1_ECHO_CLIENT_URL: &'static str = "fuchsia-pkg://fuchsia.com/fuchsia-component-test-tests#meta/echo_client.cmx"; const V2_ECHO_CLIENT_ABSOLUTE_URL: &'static str = "fuchsia-pkg://fuchsia.com/fuchsia-component-test-tests#meta/echo_client.cm"; const V2_ECHO_CLIENT_RELATIVE_URL: &'static str = "#meta/echo_client.cm"; const V1_ECHO_SERVER_URL: &'static str = "fuchsia-pkg://fuchsia.com/fuchsia-component-test-tests#meta/echo_server.cmx"; const V2_ECHO_SERVER_ABSOLUTE_URL: &'static str = "fuchsia-pkg://fuchsia.com/fuchsia-component-test-tests#meta/echo_server.cm"; const V2_ECHO_SERVER_RELATIVE_URL: &'static str = "#meta/echo_server.cm"; const DEFAULT_ECHO_STR: &'static str = "Hippos rule!"; #[fasync::run_singlethreaded(test)] async fn protocol_with_uncle_test() -> Result<(), Error> { let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let mut builder = RealmBuilder::new().await?; builder .add_component( "echo-server", ComponentSource::mock(move |mock_handles: mock::MockHandles| { Box::pin(echo_server_mock(DEFAULT_ECHO_STR, sender.clone(), mock_handles)) }), ) .await? .add_eager_component( "parent/echo-client", ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL), ) .await? .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo-server"), targets: vec![RouteEndpoint::component("parent/echo-client")], })? .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::above_root(), targets: vec![ RouteEndpoint::component("echo-server"), RouteEndpoint::component("parent/echo-client"), ], })?; let _child_instance = builder.build().create().await?; receive_echo_server_called.await?; Ok(()) } #[fasync::run_singlethreaded(test)] async fn protocol_with_siblings_test() -> Result<(), Error>
#[fasync::run_singlethreaded(test)] async fn examples() -> Result<(), Error> { // This test exists purely to provide us with live snippets for the realm builder // documentation { // [START add_a_and_b_example] // Create a new RealmBuilder instance, which we will use to define a new realm let mut builder = RealmBuilder::new().await?; builder // Add component `a` to the realm, which will be fetched with a URL .add_component("a", ComponentSource::url("fuchsia-pkg://fuchsia.com/foo#meta/foo.cm")) .await? // Add component `b` to the realm, which will be fetched with a URL .add_component("b", ComponentSource::url("fuchsia-pkg://fuchsia.com/bar#meta/bar.cm")) .await?; // [END add_a_and_b_example] // [START route_from_a_to_b_example] // Add a new route for the protocol capability `fidl.examples.routing.echo.Echo` // from `a` to `b` builder.add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("a"), targets: vec![RouteEndpoint::component("b")], })?; // [END route_from_a_to_b_example] } { let mut builder = RealmBuilder::new().await?; builder .add_component("a", ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL)) .await? .add_component("b", ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL)) .await?; // [START route_logsink_example] // Routes `fuchsia.logger.LogSink` from above root to `a` and `b` builder.add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::above_root(), targets: vec![RouteEndpoint::component("a"), RouteEndpoint::component("b")], })?; // [END route_logsink_example] } { let mut builder = RealmBuilder::new().await?; builder.add_component("b", ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL)).await?; // [START route_to_above_root_example] // Adds a route for the protocol capability // `fidl.examples.routing.echo.EchoClientStats` from `b` to the realm's parent builder.add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.EchoClientStats"), source: RouteEndpoint::component("b"), targets: vec![RouteEndpoint::above_root()], })?; let realm = builder.build(); // [START create_realm] // Creates the realm, and add it to the collection to start its execution let realm_instance = realm.create().await?; // [END create_realm] // [START connect_to_protocol] // Connects to `fidl.examples.routing.echo.EchoClientStats`, which is provided // by `b` in the created realm let echo_client_stats_proxy = realm_instance.root.connect_to_protocol_at_exposed_dir::<EchoClientStatsMarker>()?; // [END connect_to_protocol] // [END route_to_above_root_example] drop(echo_client_stats_proxy); } #[allow(unused_mut)] { let mut builder = RealmBuilder::new().await?; builder.add_component("a/b", ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL)).await?; // [START mutate_generated_manifest_example] let mut realm = builder.build(); let mut root_manifest = realm.get_decl(&Moniker::root()).await?; // root_manifest is mutated in whatever way is needed realm.set_component(&Moniker::root(), root_manifest).await?; let mut a_manifest = realm.get_decl(&"a".into()).await?; // a_manifest is mutated in whatever way is needed realm.set_component(&"a".into(), a_manifest).await?; // [END mutate_generated_manifest_example] } Ok(()) } #[fasync::run_singlethreaded(test)] async fn protocol_with_cousins_test() -> Result<(), Error> { let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let mut builder = RealmBuilder::new().await?; builder .add_eager_component( "parent-1/echo-client", ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL), ) .await? .add_component( "parent-2/echo-server", ComponentSource::mock(move |mock_handles: mock::MockHandles| { Box::pin(echo_server_mock(DEFAULT_ECHO_STR, sender.clone(), mock_handles)) }), ) .await? .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("parent-2/echo-server"), targets: vec![RouteEndpoint::component("parent-1/echo-client")], })? .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::above_root(), targets: vec![ RouteEndpoint::component("parent-1/echo-client"), RouteEndpoint::component("parent-2/echo-server"), ], })?; let _child_instance = builder.build().create().await?; receive_echo_server_called.await?; Ok(()) } #[fasync::run_singlethreaded(test)] async fn mock_component_with_a_child() -> Result<(), Error> { let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let mut builder = RealmBuilder::new().await?; builder .add_component( "echo-server", ComponentSource::mock(move |mock_handles: mock::MockHandles| { Box::pin(echo_server_mock(DEFAULT_ECHO_STR, sender.clone(), mock_handles)) }), ) .await? .add_eager_component( "echo-server/echo-client", ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL), ) .await? .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo-server"), targets: vec![RouteEndpoint::component("echo-server/echo-client")], })? .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::above_root(), targets: vec![ RouteEndpoint::component("echo-server"), RouteEndpoint::component("echo-server/echo-client"), ], })?; let _child_instance = builder.build().create().await?; receive_echo_server_called.await?; Ok(()) } #[fasync::run_singlethreaded(test)] async fn relative_echo_realm() -> Result<(), Error> { let mut builder = RealmBuilder::new().await?; builder .add_component(Moniker::root(), ComponentSource::url("#meta/echo_realm.cm")) .await? // This route will result in the imported echo_realm exposing this protocol, whereas before // it only offered it to echo_client .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo_server"), targets: vec![RouteEndpoint::above_root()], })?; let realm_instance = builder.build().create().await?; let echo_proxy = realm_instance.root.connect_to_protocol_at_exposed_dir::<fecho::EchoMarker>()?; assert_eq!(Some("hello".to_string()), echo_proxy.echo_string(Some("hello")).await?); Ok(()) } #[fasync::run_singlethreaded(test)] async fn altered_echo_client_args() -> Result<(), Error> { let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let mut builder = RealmBuilder::new().await?; builder .add_component(Moniker::root(), ComponentSource::url("#meta/echo_realm.cm")) .await? .override_component( "echo_server", ComponentSource::mock(move |mock_handles: mock::MockHandles| { Box::pin(echo_server_mock("Whales rule!", sender.clone(), mock_handles)) }), ) .await? // echo_realm already has the offer we need, but we still need to add this route so that // the proper exposes are added to our mock component .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo_server"), targets: vec![RouteEndpoint::component("echo_client")], })?; // Change the program.args section of the manifest, to alter the string it will try to echo let mut realm = builder.build(); let mut echo_client_decl = realm.get_decl(&"echo_client".into()).await?; for entry in echo_client_decl.program.as_mut().unwrap().info.entries.as_mut().unwrap() { if entry.key.as_str() == "args" { entry.value = Some(Box::new(fdata::DictionaryValue::StrVec(vec![ "Whales".to_string(), "rule!".to_string(), ]))); } } realm.set_component(&"echo_client".into(), echo_client_decl).await?; let _realm_instance = realm.create().await?; receive_echo_server_called.await?; Ok(()) } #[fasync::run_singlethreaded(test)] async fn echo_clients() -> Result<(), Error> { // This test runs a series of echo clients from different sources against a mock echo server, // confirming that each client successfully connects to the server. let (send_echo_client_results, receive_echo_client_results) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_client_results))); let client_sources = vec![ ComponentSource::legacy_url(V1_ECHO_CLIENT_URL), ComponentSource::url(V2_ECHO_CLIENT_ABSOLUTE_URL), ComponentSource::url(V2_ECHO_CLIENT_RELATIVE_URL), ComponentSource::mock(move |h| Box::pin(echo_client_mock(sender.clone(), h))), ]; for client_source in client_sources { let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let mut builder = RealmBuilder::new().await?; builder .add_component( "echo-server", ComponentSource::mock(move |h| { Box::pin(echo_server_mock(DEFAULT_ECHO_STR, sender.clone(), h)) }), ) .await? .add_eager_component("echo-client", client_source) .await? .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo-server"), targets: vec![RouteEndpoint::component("echo-client")], })? .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::above_root(), targets: vec![ RouteEndpoint::component("echo-server"), RouteEndpoint::component("echo-client"), ], })?; let _child_instance = builder.build().create().await?; receive_echo_server_called.await?; } receive_echo_client_results.await?; Ok(()) } #[fasync::run_singlethreaded(test)] async fn echo_servers() -> Result<(), Error> { // This test runs a series of echo servers from different sources against a mock echo client, // confirming that the client can successfully connect to and use each server. let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let server_sources = vec![ ComponentSource::legacy_url(V1_ECHO_SERVER_URL), ComponentSource::url(V2_ECHO_SERVER_ABSOLUTE_URL), ComponentSource::url(V2_ECHO_SERVER_RELATIVE_URL), ComponentSource::mock(move |h| { Box::pin(echo_server_mock(DEFAULT_ECHO_STR, sender.clone(), h)) }), ]; for server_source in server_sources { let (send_echo_client_results, receive_echo_client_results) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_client_results))); let mut builder = RealmBuilder::new().await?; builder .add_component("echo-server", server_source) .await? .add_eager_component( "echo-client", ComponentSource::mock(move |h| Box::pin(echo_client_mock(sender.clone(), h))), ) .await? .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo-server"), targets: vec![RouteEndpoint::component("echo-client")], })? .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::above_root(), targets: vec![ RouteEndpoint::component("echo-server"), RouteEndpoint::component("echo-client"), ], })?; let _child_instance = builder.build().create().await?; receive_echo_client_results.await?; } receive_echo_server_called.await?; Ok(()) } #[fasync::run_singlethreaded(test)] async fn protocol_with_use_from_url_child_test() -> Result<(), Error> { let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let mut builder = RealmBuilder::new().await?; builder .add_eager_component( "echo-client", ComponentSource::mock(move |mock_handles: mock::MockHandles| { let sender = sender.clone(); Box::pin(async move { let echo_proxy = mock_handles.connect_to_service::<fecho::EchoMarker>()?; assert_eq!( Some("hello".to_string()), echo_proxy.echo_string(Some("hello")).await? ); sender.lock().await.take().unwrap().send(()).expect("failed to send results"); Ok(()) }) }), ) .await? .add_component("echo-client/echo-server", ComponentSource::url(V2_ECHO_SERVER_ABSOLUTE_URL)) .await? .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo-client/echo-server"), targets: vec![RouteEndpoint::component("echo-client")], })? .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::AboveRoot, targets: vec![ RouteEndpoint::component("echo-client"), RouteEndpoint::component("echo-client/echo-server"), ], })?; let _child_instance = builder.build().create().await?; receive_echo_server_called.await?; Ok(()) } #[fasync::run_singlethreaded(test)] async fn protocol_with_use_from_mock_child_test() -> Result<(), Error> { let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); let sender = Arc::new(Mutex::new(Some(send_echo_server_called))); let mut builder = RealmBuilder::new().await?; builder .add_eager_component( "echo-client", ComponentSource::mock(move |mock_handles: mock::MockHandles| { Box::pin(async move { let echo_proxy = mock_handles.connect_to_service::<fecho::EchoMarker>()?; let _ = echo_proxy.echo_string(Some(DEFAULT_ECHO_STR)).await?; Ok(()) }) }), ) .await? .add_component( "echo-client/echo-server", ComponentSource::mock(move |mock_handles: mock::MockHandles| { Box::pin(echo_server_mock(DEFAULT_ECHO_STR, sender.clone(), mock_handles)) }), ) .await? .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("echo-client/echo-server"), targets: vec![RouteEndpoint::component("echo-client")], })? .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::AboveRoot, targets: vec![ RouteEndpoint::component("echo-client"), RouteEndpoint::component("echo-client/echo-server"), ], })?; let _child_instance = builder.build().create().await?; receive_echo_server_called.await?; Ok(()) } // [START echo_server_mock] // A mock echo server implementation, that will crash if it doesn't receive anything other than the // contents of `expected_echo_str`. It takes and sends a message over `send_echo_server_called` // once it receives one echo request. async fn echo_server_mock( expected_echo_string: &'static str, send_echo_server_called: Arc<Mutex<Option<oneshot::Sender<()>>>>, mock_handles: mock::MockHandles, ) -> Result<(), Error> { // Create a new ServiceFs to host FIDL protocols from let mut fs = fserver::ServiceFs::new(); let mut tasks = vec![]; // Add the echo protocol to the ServiceFs fs.dir("svc").add_fidl_service(move |mut stream: fecho::EchoRequestStream| { let send_echo_server_called = send_echo_server_called.clone(); tasks.push(fasync::Task::local(async move { while let Some(fecho::EchoRequest::EchoString { value, responder }) = stream.try_next().await.expect("failed to serve echo service") { assert_eq!(Some(expected_echo_string.to_string()), value); // Send the received string back to the client responder.send(value.as_ref().map(|s| &**s)).expect("failed to send echo response"); // Take the sender from send_echo_server_called and pass a // message through it send_echo_server_called .lock() .await .take() .unwrap() .send(()) .expect("failed to send results"); } })); }); // Run the ServiceFs on the outgoing directory handle from the mock handles fs.serve_connection(mock_handles.outgoing_dir.into_channel())?; fs.collect::<()>().await; Ok(()) } // [END echo_server_mock] async fn echo_client_mock( send_echo_client_results: Arc<Mutex<Option<oneshot::Sender<()>>>>, mock_handles: mock::MockHandles, ) -> Result<(), Error> { let echo = mock_handles.connect_to_service::<fecho::EchoMarker>()?; let out = echo.echo_string(Some(DEFAULT_ECHO_STR)).await?; send_echo_client_results.lock().await.take().unwrap().send(()).expect("failed to send results"); if Some(DEFAULT_ECHO_STR.to_string()) != out { return Err(format_err!("unexpected echo result: {:?}", out)); } Ok(()) }
{ // [START mock_component_example] // Create a new oneshot for passing a message from the echo server function let (send_echo_server_called, receive_echo_server_called) = oneshot::channel(); // Wrap the sender in an Arc, Mutex, and Option so that it can safely be sent // across threads, only interacted with by one thread at a time, and removed // from the mutex to be consumed. let send_echo_server_called = Arc::new(Mutex::new(Some(send_echo_server_called))); // Build a new realm let mut builder = RealmBuilder::new().await?; builder // Add the echo server, which is implemented by the echo_server_mock // function (defined below). Give this function access to the oneshot // created above, along with the mock component's handles .add_component( "a", ComponentSource::mock(move |mock_handles: mock::MockHandles| { Box::pin(echo_server_mock( DEFAULT_ECHO_STR, send_echo_server_called.clone(), mock_handles, )) }), ) .await? // Add the echo client with a URL source .add_eager_component( "b", ComponentSource::url( "fuchsia-pkg://fuchsia.com/fuchsia-component-test-tests#meta/echo_client.cm", ), ) .await? // Route the fidl.examples.routing.echo.Echo protocol from a to b .add_route(CapabilityRoute { capability: Capability::protocol("fidl.examples.routing.echo.Echo"), source: RouteEndpoint::component("a"), targets: vec![RouteEndpoint::component("b")], })? // Route the logsink to `b`, so it can inform us of any issues .add_route(CapabilityRoute { capability: Capability::protocol("fuchsia.logger.LogSink"), source: RouteEndpoint::above_root(), targets: vec![RouteEndpoint::component("b")], })?; // Create the realm let _child_instance = builder.build().create().await?; // Wait for the oneshot we created above to receive a message receive_echo_server_called.await?; // [END mock_component_example] Ok(()) }
skylark-benchpress.js
/** * skylark-benchpress - A version of benchpress.js that ported to running on skylarkjs * @author Hudaokeji, Inc. * @version v0.9.0 * @link https://github.com/skylark-integration/skylark-benchpress/ * @license MIT */ (function(factory,globals) { var define = globals.define, require = globals.require, isAmd = (typeof define === 'function' && define.amd), isCmd = (!isAmd && typeof exports !== 'undefined'); if (!isAmd && !define) { var map = {}; function absolute(relative, base) { if (relative[0]!==".") { return relative; } var stack = base.split("/"), parts = relative.split("/"); stack.pop(); for (var i=0; i<parts.length; i++) { if (parts[i] == ".") continue; if (parts[i] == "..") stack.pop(); else stack.push(parts[i]); } return stack.join("/"); } define = globals.define = function(id, deps, factory) { if (typeof factory == 'function') { map[id] = { factory: factory, deps: deps.map(function(dep){ return absolute(dep,id); }), resolved: false, exports: null }; require(id); } else { map[id] = { factory : null, resolved : true, exports : factory }; } }; require = globals.require = function(id) { if (!map.hasOwnProperty(id)) { throw new Error('Module ' + id + ' has not been defined'); } var module = map[id]; if (!module.resolved) { var args = []; module.deps.forEach(function(dep){ args.push(require(dep)); }) module.exports = module.factory.apply(globals, args) || null; module.resolved = true; } return module.exports; }; } if (!define) { throw new Error("The module utility (ex: requirejs or skylark-utils) is not loaded!"); } factory(define,require); if (!isAmd) { var skylarkjs = require("skylark-langx-ns"); if (isCmd) { module.exports = skylarkjs; } else { globals.skylarkjs = skylarkjs; } } })(function(define,require) { define('skylark-benchpress/benchpress',[ "skylark-langx-ns" ],function (skylark) { var runtime = function () {
/** * Convert null and undefined values to empty strings * @param {any} value * @returns {string} */ function guard(value) { return value == null || Array.isArray(value) && value.length === 0 ? '' : value; } /** * Iterate over an object or array * @param {string[]} obj - Iteratee object / array * @param {function} each - Callback to execute on each item * @return {string} */ function iter(obj, each) { if (!obj || typeof obj !== 'object') { return ''; } var output = ''; var keys = Object.keys(obj); var length = keys.length; for (var i = 0; i < length; i += 1) { var key = keys[i]; output += each(key, i, length, obj[key]); } return output; } /** * Execute a helper * @param {object} context - Base data object * @param {object} helpers - Map of helper functions * @param {string} helperName - Name of helper to execute * @param {any[]} args - Array of arguments * @returns {string} */ function helper(context, helpers, helperName, args) { if (typeof helpers[helperName] !== 'function') { return ''; } try { var out = helpers[helperName].apply(context, args); return out || ''; } catch (e) { return ''; } } /** * Run a compiled template function * @param {object} helpers - Map of helper functions * @param {object} context - Base data object * @param {function} templateFunction - Compiled template function * @returns {string} */ function runtime(helpers, context, templateFunction) { return guard(templateFunction(helpers, context, guard, iter, helper)).toString(); } // polyfill for Promise.try if (typeof Promise.try !== 'function') { Promise.try = { try: function _try(fn) { return new Promise(function (resolve) { return resolve(fn()); }); } }.try; } return runtime; }(); 'use strict'; /** @exports Benchpress */ var Benchpress = {}; Benchpress.runtime = runtime; Benchpress.helpers = {}; /** * Register a helper function * @param {string} name - Helper name * @param {function} fn - Helper function */ Benchpress.registerHelper = function registerHelper(name, fn) { Benchpress.helpers[name] = fn; }; // add default escape function for escaping HTML entities var escapeCharMap = { '&': '&amp;', '<': '&lt;', '>': '&gt;', '"': '&quot;', "'": '&#x27;', '`': '&#x60;', '=': '&#x3D;' }; var replaceChar = function replaceChar(c) { return escapeCharMap[c]; }; var escapeChars = /[&<>"'`=]/g; Benchpress.registerHelper('__escape', function (str) { if (str == null) { return ''; } if (!str) { return String(str); } return str.toString().replace(escapeChars, replaceChar); }); Benchpress.cache = {}; Benchpress.globals = {}; /** * Set a global data value * @param {string} key - Property key * @param {Object} value - Property value */ Benchpress.setGlobal = function setGlobal(key, value) { Benchpress.globals[key] = value; }; var assign = Object.assign || jQuery.extend; // eslint-disable-line /** * @private */ Benchpress.addGlobals = function addGlobals(data) { return assign({}, Benchpress.globals, data); }; /** * Clear the template cache */ Benchpress.flush = function flush() { Benchpress.cache = {}; }; // necessary to support both promises and callbacks // can remove when `parse` methods are removed function load(template) { return new Promise(function (resolve, reject) { var promise = Benchpress.loader(template, function (templateFunction) { resolve(templateFunction); }); if (promise && promise.then) { promise.then(resolve, reject); } }); } /** * Fetch and run the given template * @param {string} template - Name of template to fetch * @param {Object} data - Data with which to run the template * @param {string} [block] - Parse only this block in the template * @returns {Promise<string>} - Rendered output */ function render(template, data, block) { data = Benchpress.addGlobals(data || {}); return Promise.try(function () { Benchpress.cache[template] = Benchpress.cache[template] || load(template); return Benchpress.cache[template]; }).then(function (templateFunction) { if (block) { templateFunction = templateFunction.blocks && templateFunction.blocks[block]; } if (!templateFunction) { return ''; } return runtime(Benchpress.helpers, data, templateFunction); }); } /** * Alias for {@link render}, but uses a callback * @param {string} template - Name of template to fetch * @param {string} [block] - Render only this block in the template * @param {Object} data - Data with which to run the template * @param {function} callback - callback(output) * * @deprecated - Use {@link render} instead */ function parse(template, block, data, callback) { if (!callback && typeof block === 'object' && typeof data === 'function') { callback = data; data = block; block = null; } if (typeof callback !== 'function') { // Calling parse synchronously with no callback is discontinued throw TypeError('Invalid Arguments: callback must be a function'); } if (!template) { callback(''); return; } render(template, data, block).then(function (output) { return setTimeout(callback, 0, output); }, function (err) { return console.error(err); } // eslint-disable-line no-console ); } Benchpress.render = render; Benchpress.parse = parse; /** * Register a loader function to fetch templates * - `loader(name, callback) => callback(templateFunction)` * - `loader(name) => Promise<templateFunction>` * @param {function} loader */ Benchpress.registerLoader = function registerLoader(loader) { Benchpress.loader = loader; }; return skylark.attach("itg.benchpress",Benchpress); }); define('skylark-benchpress/main',[ "./benchpress" ],function(benchpress) { return benchpress; }); define('skylark-benchpress', ['skylark-benchpress/main'], function (main) { return main; }); },this); //# sourceMappingURL=sourcemaps/skylark-benchpress.js.map
'use strict';
fusion_detecting.py
import cv2 import sys import os import numpy as np import time # Initialize the parameters confThreshold = 0.5 # Confidence threshold nmsThreshold = 0.4 # Non-maximum suppression threshold inpWidth = 416 # Width of network's input image inpHeight = 416 # Height of network's input image starting_time = 0 frame_id = 0 font = cv2.FONT_HERSHEY_PLAIN # Load names of classes classesFile = "coco.names" classes = None with open(classesFile, 'rt') as f: classes = f.read().rstrip('\n').split('\n') # Give the configuration and weight files for the model and load the network using them. modelConfiguration = "yolov3.cfg" modelWeights = "yolov3.weights" net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights) net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) inputFile = "presen_T.mp4" inputFile2 = "presen_R.mp4" outputFile = "yolo_out_py.avi" # Open the video file if not os.path.isfile(inputFile): print("Input video file ", inputFile, " doesn't exist") sys.exit(1) cap = cv2.VideoCapture(inputFile) cap2 = cv2.VideoCapture(inputFile2) outputFile = inputFile[:-4] + "_yolo_out_py.avi" # Get the video writer initialized to save the output video vid_writer = cv2.VideoWriter(outputFile, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (round(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), round(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))) # Get the names of the output layers def getOutputsNames(net): # Get the names of all the layers in the network layersNames = net.getLayerNames() # Get the names of the output layers, i.e. the layers with unconnected outputs return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()] # Draw the predicted bounding box def drawPred(classId, conf, left, top, right, bottom): # Draw a bounding box.
# Remove the bounding boxes with low confidence using non-maxima suppression def postprocess(frame, outs): frameHeight = frame.shape[0] frameWidth = frame.shape[1] # Scan through all the bounding boxes output from the network and keep only the # ones with high confidence scores. Assign the box's class label as the class with the highest score. classIds = [] confidences = [] boxes = [] for out in outs: for detection in out: scores = detection[5:] classId = np.argmax(scores) confidence = scores[classId] if confidence > confThreshold: center_x = int(detection[0] * frameWidth) center_y = int(detection[1] * frameHeight) width = int(detection[2] * frameWidth) height = int(detection[3] * frameHeight) left = int(center_x - width / 2) top = int(center_y - height / 2) classIds.append(classId) confidences.append(float(confidence)) boxes.append([left, top, width, height]) # Perform non maximum suppression to eliminate redundant overlapping boxes with # lower confidences. indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold) for i in indices: i = i[0] box = boxes[i] left = box[0] top = box[1] width = box[2] height = box[3] drawPred(classIds[i], confidences[i], left, top, left + width, top + height) # Main while True: # get frame from the video hasFrame, frame = cap.read() hasFrame2, frame2 = cap2.read() frame = cv2.resize(frame, dsize=(600, 402)) frame2 = cv2.resize(frame2, dsize=(600, 402)) cv2.imshow("Camera", frame) cv2.imshow("Thermal_Camera", frame2) # Stop the program if reached end of video if not hasFrame: print("Done processing !!!") cv2.waitKey(3000) break # Create a 4D blob from a frame. blob = cv2.dnn.blobFromImage(frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False) # Sets the input to the network net.setInput(blob) # Runs the forward pass to get output of the output layers outs = net.forward(getOutputsNames(net)) # Remove the bounding boxes with low confidence postprocess(frame, outs) # Print the FPS current_time = time.time() sec = current_time - starting_time starting_time = current_time fps = 1 / (sec) str2 = "FPS : %0.1f" % fps # cv2.putText(frame, str2, (10, 50), font, 2, (0, 255, 0), 2) # Write the frame with the detection boxes vid_writer.write(frame.astype(np.uint8)) # CAMERA RESULT cv2.imshow("CAMERA_Detection", frame) img2 = None fast = cv2.FastFeatureDetector_create(30) fast.setNonmaxSuppression(0) kp = fast.detect(frame2, None) img2 = cv2.drawKeypoints(frame2, kp, img2, (0, 255, 255)) # cv2.imshow("THERMAL", img2) hsv = cv2.cvtColor(frame2, cv2.COLOR_BGR2HSV) car_prediction = 30 lower_white = np.array([0, 0, 255 - car_prediction], dtype=np.uint8) upper_white = np.array([255, car_prediction, 255], dtype=np.uint8) mask_white = cv2.inRange(hsv, lower_white, upper_white) res = cv2.bitwise_and(frame2, frame2, mask=mask_white) # cv2.imshow("THERMAL_CAR", res) res2 = None res2 = res igray = cv2.cvtColor(res2, cv2.COLOR_BGR2GRAY) iret, ibinary = cv2.threshold(igray, 127, 255, cv2.THRESH_BINARY) contours, hierachy = cv2.findContours(ibinary, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) for i in range(len(contours)): cv2.drawContours(res2, [contours[i]], 0, (255, 255, 255), 2) cv2.putText(res2, "car", tuple(contours[i][0][0]), font, 1, (0, 255, 0), 1) # cv2.imshow("THERMAL_CONTOUR", res2) # THERMAL PROCESSING RESULT dst = cv2.addWeighted(res2, 1, frame2, 1, 0) #cv2.imshow('THERMAL_RES',dst) #cv2.imshow("THERMAL",frame2) # FINAL RESULT dst2 = cv2.addWeighted(res2, 1, frame, 1, 0) cv2.imshow("RESULT",dst2) # End the video with "Esc" key = cv2.waitKey(1) if key == 27: break cap.release() cv2.destroyAllWindows()
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0)) label = '%.2f' % conf # Get the label for the class name and its confidence if classes: assert (classId < len(classes)) label = '%s:%s' % (classes[classId], label) # Display the label at the top of the bounding box labelSize, baseLine = cv2.getTextSize(label, font, 0.5, 1) top = max(top, labelSize[1]) cv2.putText(frame, label, (left, top), font, 1, (0, 255, 0), 2)
feed_store.go
package feedstore import ( "context" "fmt" "net/http" "github.com/ProgrammingLab/prolab-accounts/infra/store" "github.com/mmcdole/gofeed" "github.com/pkg/errors" ) type feedStoreImpl struct { ctx context.Context } // NewFeedStore returns new feed store func NewFeedStore(ctx context.Context) store.FeedStore { return &feedStoreImpl{ ctx: ctx, } } type feedURLGetter func(blogURL string, cli *http.Client) (feed string, err error) var ( // ErrFeedURLNotFound will be returned when feed url not found ErrFeedURLNotFound = fmt.Errorf("feed url not found") feedURLGetters = []feedURLGetter{ getMediumFeed, getFeedURLWithSuffixes, } ) func (s *feedStoreImpl) GetFeedURL(url string) (string, error) { for _, g := range feedURLGetters { u, err := g(url, &http.Client{}) if err == nil { return u, nil } } return "", ErrFeedURLNotFound } func (s *feedStoreImpl) IsValidFeedURL(feedURL string) error { _, err := s.GetFeed(feedURL) return err } func (s *feedStoreImpl) GetFeed(feedURL string) (*gofeed.Feed, error) { p := gofeed.NewParser()
f, err := p.ParseURL(feedURL) if err != nil { return nil, errors.WithStack(err) } return f, nil }
main.py
''' Created on Jul 17, 2013 @author: Yubin Bai ''' import time from multiprocessing.pool import Pool parallelSolve = False INF = 1 << 31 def solve(par): M, pairs = par pairs.sort() pairs1 = [] for p in pairs: if p[0] >= M or p[1] <= 0: continue pairs1.append(tuple(p)) if not pairs1: return 0 pairs = [pairs1[0]] left, right = pairs1[0] for p in pairs1: p1 = pairs[-1] if p[0] == p1[0] and p[1] > p[0]: pairs.pop() pairs.append(p) if p[1] > right: pairs.append(p) right = p[1] if right < M: return 0 return '\n'.join('%d %d' % (e[0], e[1]) for e in pairs) class Solver: def getInput(self): self.numOfTests = int(self.fIn.readline()) self.input = [] for itertest in range(self.numOfTests): line = self.fIn.readline().strip() M = int(self.fIn.readline()) pairs = [] while True: pair = map(int, self.fIn.readline().split()) if pair[0] == 0 and pair[1] == 0: break pairs.append(pair) self.input.append((M, pairs)) def __init__(self): self.fIn = open('input.txt') self.fOut = open('output.txt', 'w') self.results = [] def parallel(self): self.getInput() p = Pool(4) millis1 = int(round(time.time() * 1000)) self.results = p.map(solve, self.input) millis2 = int(round(time.time() * 1000)) print("Time in milliseconds: %d " % (millis2 - millis1)) self.makeOutput() def sequential(self):
def makeOutput(self): for test in range(self.numOfTests): self.fOut.write("%s\n\n" % self.results[test]) self.fIn.close() self.fOut.close() if __name__ == '__main__': solver = Solver() if parallelSolve: solver.parallel() else: solver.sequential()
self.getInput() millis1 = int(round(time.time() * 1000)) for i in self.input: self.results.append(solve(i)) millis2 = int(round(time.time() * 1000)) print("Time in milliseconds: %d " % (millis2 - millis1)) self.makeOutput()
clean.py
from django.core.management.base import BaseCommand from ksiazkaadresowa.models import Person class Command(BaseCommand): help = 'Moj tekst pomocy' def add_arguments(self, parser):
def handle(self, *args, **options): filename = options['file'] format = options['format'] content = [] with open(filename) as file: for line in file: line = self.parse_line(line) content.append(line) print('\n'.join(content)) return for p in Person.objects.all(): p.first_name = p.first_name.title() p.last_name = p.last_name.title() p.save() def parse_line(self, line): return line.upper()
parser.add_argument( '--file', dest='file', nargs='?', help='Log File', ) parser.add_argument( '--format', nargs='?', dest='format', help='Log File Format', )
sql.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import types import itertools import warnings import decimal import datetime import keyword import warnings from array import array from operator import itemgetter from pyspark.rdd import RDD from pyspark.serializers import BatchedSerializer, PickleSerializer, CloudPickleSerializer from pyspark.storagelevel import StorageLevel from pyspark.traceback_utils import SCCallSiteSync from itertools import chain, ifilter, imap from py4j.protocol import Py4JError from py4j.java_collections import ListConverter, MapConverter __all__ = [ "StringType", "BinaryType", "BooleanType", "TimestampType", "DecimalType", "DoubleType", "FloatType", "ByteType", "IntegerType", "LongType", "ShortType", "ArrayType", "MapType", "StructField", "StructType", "SQLContext", "HiveContext", "SchemaRDD", "Row"] class DataType(object): """Spark SQL DataType""" def __repr__(self): return self.__class__.__name__ def __hash__(self): return hash(str(self)) def __eq__(self, other): return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__) def __ne__(self, other): return not self.__eq__(other) class PrimitiveTypeSingleton(type): """Metaclass for PrimitiveType""" _instances = {} def __call__(cls): if cls not in cls._instances: cls._instances[cls] = super(PrimitiveTypeSingleton, cls).__call__() return cls._instances[cls] class PrimitiveType(DataType): """Spark SQL PrimitiveType""" __metaclass__ = PrimitiveTypeSingleton def __eq__(self, other): # because they should be the same object return self is other class StringType(PrimitiveType): """Spark SQL StringType The data type representing string values. """ class BinaryType(PrimitiveType): """Spark SQL BinaryType The data type representing bytearray values. """ class BooleanType(PrimitiveType): """Spark SQL BooleanType The data type representing bool values. """ class TimestampType(PrimitiveType): """Spark SQL TimestampType The data type representing datetime.datetime values. """ class DecimalType(PrimitiveType): """Spark SQL DecimalType The data type representing decimal.Decimal values. """ class DoubleType(PrimitiveType): """Spark SQL DoubleType The data type representing float values. """ class FloatType(PrimitiveType): """Spark SQL FloatType The data type representing single precision floating-point values. """ class ByteType(PrimitiveType): """Spark SQL ByteType The data type representing int values with 1 singed byte. """ class IntegerType(PrimitiveType): """Spark SQL IntegerType The data type representing int values. """ class LongType(PrimitiveType): """Spark SQL LongType The data type representing long values. If the any value is beyond the range of [-9223372036854775808, 9223372036854775807], please use DecimalType. """ class ShortType(PrimitiveType): """Spark SQL ShortType The data type representing int values with 2 signed bytes. """ class ArrayType(DataType): """Spark SQL ArrayType The data type representing list values. An ArrayType object comprises two fields, elementType (a DataType) and containsNull (a bool). The field of elementType is used to specify the type of array elements. The field of containsNull is used to specify if the array has None values. """ def __init__(self, elementType, containsNull=True): """Creates an ArrayType :param elementType: the data type of elements. :param containsNull: indicates whether the list contains None values. >>> ArrayType(StringType) == ArrayType(StringType, True) True >>> ArrayType(StringType, False) == ArrayType(StringType) False """ self.elementType = elementType self.containsNull = containsNull def __str__(self): return "ArrayType(%s,%s)" % (self.elementType, str(self.containsNull).lower()) class MapType(DataType): """Spark SQL MapType The data type representing dict values. A MapType object comprises three fields, keyType (a DataType), valueType (a DataType) and valueContainsNull (a bool). The field of keyType is used to specify the type of keys in the map. The field of valueType is used to specify the type of values in the map. The field of valueContainsNull is used to specify if values of this map has None values. For values of a MapType column, keys are not allowed to have None values. """ def __init__(self, keyType, valueType, valueContainsNull=True): """Creates a MapType :param keyType: the data type of keys. :param valueType: the data type of values. :param valueContainsNull: indicates whether values contains null values. >>> (MapType(StringType, IntegerType) ... == MapType(StringType, IntegerType, True)) True >>> (MapType(StringType, IntegerType, False) ... == MapType(StringType, FloatType)) False """ self.keyType = keyType self.valueType = valueType self.valueContainsNull = valueContainsNull def __repr__(self): return "MapType(%s,%s,%s)" % (self.keyType, self.valueType, str(self.valueContainsNull).lower()) class StructField(DataType): """Spark SQL StructField Represents a field in a StructType. A StructField object comprises three fields, name (a string), dataType (a DataType) and nullable (a bool). The field of name is the name of a StructField. The field of dataType specifies the data type of a StructField. The field of nullable specifies if values of a StructField can contain None values. """ def __init__(self, name, dataType, nullable): """Creates a StructField :param name: the name of this field. :param dataType: the data type of this field. :param nullable: indicates whether values of this field can be null. >>> (StructField("f1", StringType, True) ... == StructField("f1", StringType, True)) True >>> (StructField("f1", StringType, True) ... == StructField("f2", StringType, True)) False """ self.name = name self.dataType = dataType self.nullable = nullable def __repr__(self): return "StructField(%s,%s,%s)" % (self.name, self.dataType, str(self.nullable).lower()) class StructType(DataType): """Spark SQL StructType The data type representing rows. A StructType object comprises a list of L{StructField}. """ def __init__(self, fields): """Creates a StructType >>> struct1 = StructType([StructField("f1", StringType, True)]) >>> struct2 = StructType([StructField("f1", StringType, True)]) >>> struct1 == struct2 True >>> struct1 = StructType([StructField("f1", StringType, True)]) >>> struct2 = StructType([StructField("f1", StringType, True), ... [StructField("f2", IntegerType, False)]]) >>> struct1 == struct2 False """ self.fields = fields def __repr__(self): return ("StructType(List(%s))" % ",".join(str(field) for field in self.fields)) def _parse_datatype_list(datatype_list_string): """Parses a list of comma separated data types.""" index = 0 datatype_list = [] start = 0 depth = 0 while index < len(datatype_list_string): if depth == 0 and datatype_list_string[index] == ",": datatype_string = datatype_list_string[start:index].strip() datatype_list.append(_parse_datatype_string(datatype_string)) start = index + 1 elif datatype_list_string[index] == "(": depth += 1 elif datatype_list_string[index] == ")": depth -= 1 index += 1 # Handle the last data type datatype_string = datatype_list_string[start:index].strip() datatype_list.append(_parse_datatype_string(datatype_string)) return datatype_list _all_primitive_types = dict((k, v) for k, v in globals().iteritems() if type(v) is PrimitiveTypeSingleton and v.__base__ == PrimitiveType) def _parse_datatype_string(datatype_string): """Parses the given data type string. >>> def check_datatype(datatype): ... scala_datatype = sqlCtx._ssql_ctx.parseDataType(str(datatype)) ... python_datatype = _parse_datatype_string( ... scala_datatype.toString()) ... return datatype == python_datatype >>> all(check_datatype(cls()) for cls in _all_primitive_types.values()) True >>> # Simple ArrayType. >>> simple_arraytype = ArrayType(StringType(), True) >>> check_datatype(simple_arraytype) True >>> # Simple MapType. >>> simple_maptype = MapType(StringType(), LongType()) >>> check_datatype(simple_maptype) True >>> # Simple StructType. >>> simple_structtype = StructType([ ... StructField("a", DecimalType(), False), ... StructField("b", BooleanType(), True), ... StructField("c", LongType(), True), ... StructField("d", BinaryType(), False)]) >>> check_datatype(simple_structtype) True >>> # Complex StructType. >>> complex_structtype = StructType([ ... StructField("simpleArray", simple_arraytype, True), ... StructField("simpleMap", simple_maptype, True), ... StructField("simpleStruct", simple_structtype, True), ... StructField("boolean", BooleanType(), False)]) >>> check_datatype(complex_structtype) True >>> # Complex ArrayType. >>> complex_arraytype = ArrayType(complex_structtype, True) >>> check_datatype(complex_arraytype) True >>> # Complex MapType. >>> complex_maptype = MapType(complex_structtype, ... complex_arraytype, False) >>> check_datatype(complex_maptype) True """ index = datatype_string.find("(") if index == -1: # It is a primitive type. index = len(datatype_string) type_or_field = datatype_string[:index] rest_part = datatype_string[index + 1:len(datatype_string) - 1].strip() if type_or_field in _all_primitive_types: return _all_primitive_types[type_or_field]() elif type_or_field == "ArrayType": last_comma_index = rest_part.rfind(",") containsNull = True if rest_part[last_comma_index + 1:].strip().lower() == "false": containsNull = False elementType = _parse_datatype_string( rest_part[:last_comma_index].strip()) return ArrayType(elementType, containsNull) elif type_or_field == "MapType": last_comma_index = rest_part.rfind(",") valueContainsNull = True if rest_part[last_comma_index + 1:].strip().lower() == "false": valueContainsNull = False keyType, valueType = _parse_datatype_list( rest_part[:last_comma_index].strip()) return MapType(keyType, valueType, valueContainsNull) elif type_or_field == "StructField": first_comma_index = rest_part.find(",") name = rest_part[:first_comma_index].strip() last_comma_index = rest_part.rfind(",") nullable = True if rest_part[last_comma_index + 1:].strip().lower() == "false": nullable = False dataType = _parse_datatype_string( rest_part[first_comma_index + 1:last_comma_index].strip()) return StructField(name, dataType, nullable) elif type_or_field == "StructType": # rest_part should be in the format like # List(StructField(field1,IntegerType,false)). field_list_string = rest_part[rest_part.find("(") + 1:-1] fields = _parse_datatype_list(field_list_string) return StructType(fields) # Mapping Python types to Spark SQL DateType _type_mappings = { bool: BooleanType, int: IntegerType, long: LongType, float: DoubleType, str: StringType, unicode: StringType, bytearray: BinaryType, decimal.Decimal: DecimalType, datetime.datetime: TimestampType, datetime.date: TimestampType, datetime.time: TimestampType, } def _infer_type(obj): """Infer the DataType from obj""" if obj is None: raise ValueError("Can not infer type for None") dataType = _type_mappings.get(type(obj)) if dataType is not None: return dataType() if isinstance(obj, dict): if not obj: raise ValueError("Can not infer type for empty dict") key, value = obj.iteritems().next() return MapType(_infer_type(key), _infer_type(value), True) elif isinstance(obj, (list, array)): if not obj: raise ValueError("Can not infer type for empty list/array") return ArrayType(_infer_type(obj[0]), True) else: try: return _infer_schema(obj) except ValueError: raise ValueError("not supported type: %s" % type(obj)) def _infer_schema(row): """Infer the schema from dict/namedtuple/object""" if isinstance(row, dict): items = sorted(row.items()) elif isinstance(row, tuple): if hasattr(row, "_fields"): # namedtuple items = zip(row._fields, tuple(row)) elif hasattr(row, "__FIELDS__"): # Row items = zip(row.__FIELDS__, tuple(row)) elif all(isinstance(x, tuple) and len(x) == 2 for x in row): items = row else: raise ValueError("Can't infer schema from tuple") elif hasattr(row, "__dict__"): # object items = sorted(row.__dict__.items()) else: raise ValueError("Can not infer schema for type: %s" % type(row)) fields = [StructField(k, _infer_type(v), True) for k, v in items] return StructType(fields) def _create_converter(obj, dataType): """Create an converter to drop the names of fields in obj """ if isinstance(dataType, ArrayType): conv = _create_converter(obj[0], dataType.elementType) return lambda row: map(conv, row) elif isinstance(dataType, MapType): value = obj.values()[0] conv = _create_converter(value, dataType.valueType) return lambda row: dict((k, conv(v)) for k, v in row.iteritems()) elif not isinstance(dataType, StructType): return lambda x: x # dataType must be StructType names = [f.name for f in dataType.fields] if isinstance(obj, dict): conv = lambda o: tuple(o.get(n) for n in names) elif isinstance(obj, tuple): if hasattr(obj, "_fields"): # namedtuple conv = tuple elif hasattr(obj, "__FIELDS__"): conv = tuple elif all(isinstance(x, tuple) and len(x) == 2 for x in obj): conv = lambda o: tuple(v for k, v in o) else: raise ValueError("unexpected tuple") elif hasattr(obj, "__dict__"): # object conv = lambda o: [o.__dict__.get(n, None) for n in names] if all(isinstance(f.dataType, PrimitiveType) for f in dataType.fields): return conv row = conv(obj) convs = [_create_converter(v, f.dataType) for v, f in zip(row, dataType.fields)] def nested_conv(row): return tuple(f(v) for f, v in zip(convs, conv(row))) return nested_conv def _drop_schema(rows, schema): """ all the names of fields, becoming tuples""" iterator = iter(rows) row = iterator.next() converter = _create_converter(row, schema) yield converter(row) for i in iterator: yield converter(i) _BRACKETS = {'(': ')', '[': ']', '{': '}'} def _split_schema_abstract(s): """ split the schema abstract into fields >>> _split_schema_abstract("a b c") ['a', 'b', 'c'] >>> _split_schema_abstract("a(a b)") ['a(a b)'] >>> _split_schema_abstract("a b[] c{a b}") ['a', 'b[]', 'c{a b}'] >>> _split_schema_abstract(" ") [] """ r = [] w = '' brackets = [] for c in s: if c == ' ' and not brackets: if w: r.append(w) w = '' else: w += c if c in _BRACKETS: brackets.append(c) elif c in _BRACKETS.values(): if not brackets or c != _BRACKETS[brackets.pop()]: raise ValueError("unexpected " + c) if brackets: raise ValueError("brackets not closed: %s" % brackets) if w: r.append(w) return r def _parse_field_abstract(s): """ Parse a field in schema abstract >>> _parse_field_abstract("a") StructField(a,None,true) >>> _parse_field_abstract("b(c d)") StructField(b,StructType(...c,None,true),StructField(d... >>> _parse_field_abstract("a[]") StructField(a,ArrayType(None,true),true) >>> _parse_field_abstract("a{[]}") StructField(a,MapType(None,ArrayType(None,true),true),true) """ if set(_BRACKETS.keys()) & set(s): idx = min((s.index(c) for c in _BRACKETS if c in s)) name = s[:idx] return StructField(name, _parse_schema_abstract(s[idx:]), True) else: return StructField(s, None, True) def _parse_schema_abstract(s): """ parse abstract into schema >>> _parse_schema_abstract("a b c") StructType...a...b...c... >>> _parse_schema_abstract("a[b c] b{}") StructType...a,ArrayType...b...c...b,MapType... >>> _parse_schema_abstract("c{} d{a b}") StructType...c,MapType...d,MapType...a...b... >>> _parse_schema_abstract("a b(t)").fields[1] StructField(b,StructType(List(StructField(t,None,true))),true) """ s = s.strip() if not s: return elif s.startswith('('): return _parse_schema_abstract(s[1:-1]) elif s.startswith('['): return ArrayType(_parse_schema_abstract(s[1:-1]), True) elif s.startswith('{'): return MapType(None, _parse_schema_abstract(s[1:-1])) parts = _split_schema_abstract(s) fields = [_parse_field_abstract(p) for p in parts] return StructType(fields) def _infer_schema_type(obj, dataType): """ Fill the dataType with types infered from obj >>> schema = _parse_schema_abstract("a b c") >>> row = (1, 1.0, "str") >>> _infer_schema_type(row, schema) StructType...IntegerType...DoubleType...StringType... >>> row = [[1], {"key": (1, 2.0)}] >>> schema = _parse_schema_abstract("a[] b{c d}") >>> _infer_schema_type(row, schema) StructType...a,ArrayType...b,MapType(StringType,...c,IntegerType... """ if dataType is None: return _infer_type(obj) if not obj: raise ValueError("Can not infer type from empty value") if isinstance(dataType, ArrayType): eType = _infer_schema_type(obj[0], dataType.elementType) return ArrayType(eType, True) elif isinstance(dataType, MapType): k, v = obj.iteritems().next() return MapType(_infer_type(k), _infer_schema_type(v, dataType.valueType)) elif isinstance(dataType, StructType): fs = dataType.fields assert len(fs) == len(obj), \ "Obj(%s) have different length with fields(%s)" % (obj, fs) fields = [StructField(f.name, _infer_schema_type(o, f.dataType), True) for o, f in zip(obj, fs)] return StructType(fields) else: raise ValueError("Unexpected dataType: %s" % dataType) _acceptable_types = { BooleanType: (bool,), ByteType: (int, long), ShortType: (int, long), IntegerType: (int, long), LongType: (int, long), FloatType: (float,), DoubleType: (float,), DecimalType: (decimal.Decimal,), StringType: (str, unicode), BinaryType: (bytearray,), TimestampType: (datetime.datetime,), ArrayType: (list, tuple, array), MapType: (dict,), StructType: (tuple, list), } def _verify_type(obj, dataType): """ Verify the type of obj against dataType, raise an exception if they do not match. >>> _verify_type(None, StructType([])) >>> _verify_type("", StringType()) >>> _verify_type(0, IntegerType()) >>> _verify_type(range(3), ArrayType(ShortType())) >>> _verify_type(set(), ArrayType(StringType())) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError:... >>> _verify_type({}, MapType(StringType(), IntegerType())) >>> _verify_type((), StructType([])) >>> _verify_type([], StructType([])) >>> _verify_type([1], StructType([])) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... """ # all objects are nullable if obj is None: return _type = type(dataType) assert _type in _acceptable_types, "unkown datatype: %s" % dataType # subclass of them can not be deserialized in JVM if type(obj) not in _acceptable_types[_type]: raise TypeError("%s can not accept abject in type %s" % (dataType, type(obj))) if isinstance(dataType, ArrayType): for i in obj: _verify_type(i, dataType.elementType) elif isinstance(dataType, MapType): for k, v in obj.iteritems(): _verify_type(k, dataType.keyType) _verify_type(v, dataType.valueType) elif isinstance(dataType, StructType): if len(obj) != len(dataType.fields): raise ValueError("Length of object (%d) does not match with" "length of fields (%d)" % (len(obj), len(dataType.fields))) for v, f in zip(obj, dataType.fields): _verify_type(v, f.dataType) _cached_cls = {} def _restore_object(dataType, obj): """ Restore object during unpickling. """ # use id(dataType) as key to speed up lookup in dict # Because of batched pickling, dataType will be the # same object in mose cases. k = id(dataType) cls = _cached_cls.get(k) if cls is None: # use dataType as key to avoid create multiple class cls = _cached_cls.get(dataType) if cls is None: cls = _create_cls(dataType) _cached_cls[dataType] = cls _cached_cls[k] = cls return cls(obj) def _create_object(cls, v): """ Create an customized object with class `cls`. """ return cls(v) if v is not None else v def _create_getter(dt, i): """ Create a getter for item `i` with schema """ cls = _create_cls(dt) def
(self): return _create_object(cls, self[i]) return getter def _has_struct(dt): """Return whether `dt` is or has StructType in it""" if isinstance(dt, StructType): return True elif isinstance(dt, ArrayType): return _has_struct(dt.elementType) elif isinstance(dt, MapType): return _has_struct(dt.valueType) return False def _create_properties(fields): """Create properties according to fields""" ps = {} for i, f in enumerate(fields): name = f.name if (name.startswith("__") and name.endswith("__") or keyword.iskeyword(name)): warnings.warn("field name %s can not be accessed in Python," "use position to access it instead" % name) if _has_struct(f.dataType): # delay creating object until accessing it getter = _create_getter(f.dataType, i) else: getter = itemgetter(i) ps[name] = property(getter) return ps def _create_cls(dataType): """ Create an class by dataType The created class is similar to namedtuple, but can have nested schema. >>> schema = _parse_schema_abstract("a b c") >>> row = (1, 1.0, "str") >>> schema = _infer_schema_type(row, schema) >>> obj = _create_cls(schema)(row) >>> import pickle >>> pickle.loads(pickle.dumps(obj)) Row(a=1, b=1.0, c='str') >>> row = [[1], {"key": (1, 2.0)}] >>> schema = _parse_schema_abstract("a[] b{c d}") >>> schema = _infer_schema_type(row, schema) >>> obj = _create_cls(schema)(row) >>> pickle.loads(pickle.dumps(obj)) Row(a=[1], b={'key': Row(c=1, d=2.0)}) >>> pickle.loads(pickle.dumps(obj.a)) [1] >>> pickle.loads(pickle.dumps(obj.b)) {'key': Row(c=1, d=2.0)} """ if isinstance(dataType, ArrayType): cls = _create_cls(dataType.elementType) def List(l): if l is None: return return [_create_object(cls, v) for v in l] return List elif isinstance(dataType, MapType): cls = _create_cls(dataType.valueType) def Dict(d): if d is None: return return dict((k, _create_object(cls, v)) for k, v in d.items()) return Dict elif not isinstance(dataType, StructType): raise Exception("unexpected data type: %s" % dataType) class Row(tuple): """ Row in SchemaRDD """ __DATATYPE__ = dataType __FIELDS__ = tuple(f.name for f in dataType.fields) __slots__ = () # create property for fast access locals().update(_create_properties(dataType.fields)) def __repr__(self): # call collect __repr__ for nested objects return ("Row(%s)" % ", ".join("%s=%r" % (n, getattr(self, n)) for n in self.__FIELDS__)) def __reduce__(self): return (_restore_object, (self.__DATATYPE__, tuple(self))) return Row class SQLContext(object): """Main entry point for Spark SQL functionality. A SQLContext can be used create L{SchemaRDD}, register L{SchemaRDD} as tables, execute SQL over tables, cache tables, and read parquet files. """ def __init__(self, sparkContext, sqlContext=None): """Create a new SQLContext. @param sparkContext: The SparkContext to wrap. @param sqlContext: An optional JVM Scala SQLContext. If set, we do not instatiate a new SQLContext in the JVM, instead we make all calls to this object. >>> srdd = sqlCtx.inferSchema(rdd) >>> sqlCtx.inferSchema(srdd) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... TypeError:... >>> bad_rdd = sc.parallelize([1,2,3]) >>> sqlCtx.inferSchema(bad_rdd) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... >>> from datetime import datetime >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1L, ... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1), ... time=datetime(2014, 8, 1, 14, 1, 5))]) >>> srdd = sqlCtx.inferSchema(allTypes) >>> srdd.registerTempTable("allTypes") >>> sqlCtx.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a ' ... 'from allTypes where b and i > 0').collect() [Row(c0=2, c1=2.0, c2=False, c3=2, c4=0...8, 1, 14, 1, 5), a=1)] >>> srdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, ... x.row.a, x.list)).collect() [(1, u'string', 1.0, 1, True, ...(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])] """ self._sc = sparkContext self._jsc = self._sc._jsc self._jvm = self._sc._jvm self._pythonToJava = self._jvm.PythonRDD.pythonToJavaArray self._scala_SQLContext = sqlContext @property def _ssql_ctx(self): """Accessor for the JVM Spark SQL context. Subclasses can override this property to provide their own JVM Contexts. """ if self._scala_SQLContext is None: self._scala_SQLContext = self._jvm.SQLContext(self._jsc.sc()) return self._scala_SQLContext def registerFunction(self, name, f, returnType=StringType()): """Registers a lambda function as a UDF so it can be used in SQL statements. In addition to a name and the function itself, the return type can be optionally specified. When the return type is not given it default to a string and conversion will automatically be done. For any other return type, the produced object must match the specified type. >>> sqlCtx.registerFunction("stringLengthString", lambda x: len(x)) >>> sqlCtx.sql("SELECT stringLengthString('test')").collect() [Row(c0=u'4')] >>> sqlCtx.registerFunction("stringLengthInt", lambda x: len(x), IntegerType()) >>> sqlCtx.sql("SELECT stringLengthInt('test')").collect() [Row(c0=4)] """ func = lambda _, it: imap(lambda x: f(*x), it) command = (func, BatchedSerializer(PickleSerializer(), 1024), BatchedSerializer(PickleSerializer(), 1024)) ser = CloudPickleSerializer() pickled_command = ser.dumps(command) if pickled_command > (1 << 20): # 1M broadcast = self._sc.broadcast(pickled_command) pickled_command = ser.dumps(broadcast) broadcast_vars = ListConverter().convert( [x._jbroadcast for x in self._sc._pickled_broadcast_vars], self._sc._gateway._gateway_client) self._sc._pickled_broadcast_vars.clear() env = MapConverter().convert(self._sc.environment, self._sc._gateway._gateway_client) includes = ListConverter().convert(self._sc._python_includes, self._sc._gateway._gateway_client) self._ssql_ctx.registerPython(name, bytearray(pickled_command), env, includes, self._sc.pythonExec, broadcast_vars, self._sc._javaAccumulator, str(returnType)) def inferSchema(self, rdd): """Infer and apply a schema to an RDD of L{Row}. We peek at the first row of the RDD to determine the fields' names and types. Nested collections are supported, which include array, dict, list, Row, tuple, namedtuple, or object. All the rows in `rdd` should have the same type with the first one, or it will cause runtime exceptions. Each row could be L{pyspark.sql.Row} object or namedtuple or objects, using dict is deprecated. >>> rdd = sc.parallelize( ... [Row(field1=1, field2="row1"), ... Row(field1=2, field2="row2"), ... Row(field1=3, field2="row3")]) >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.collect()[0] Row(field1=1, field2=u'row1') >>> NestedRow = Row("f1", "f2") >>> nestedRdd1 = sc.parallelize([ ... NestedRow(array('i', [1, 2]), {"row1": 1.0}), ... NestedRow(array('i', [2, 3]), {"row2": 2.0})]) >>> srdd = sqlCtx.inferSchema(nestedRdd1) >>> srdd.collect() [Row(f1=[1, 2], f2={u'row1': 1.0}), ..., f2={u'row2': 2.0})] >>> nestedRdd2 = sc.parallelize([ ... NestedRow([[1, 2], [2, 3]], [1, 2]), ... NestedRow([[2, 3], [3, 4]], [2, 3])]) >>> srdd = sqlCtx.inferSchema(nestedRdd2) >>> srdd.collect() [Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), ..., f2=[2, 3])] """ if isinstance(rdd, SchemaRDD): raise TypeError("Cannot apply schema to SchemaRDD") first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") if type(first) is dict: warnings.warn("Using RDD of dict to inferSchema is deprecated," "please use pyspark.sql.Row instead") schema = _infer_schema(first) rdd = rdd.mapPartitions(lambda rows: _drop_schema(rows, schema)) return self.applySchema(rdd, schema) def applySchema(self, rdd, schema): """ Applies the given schema to the given RDD of L{tuple} or L{list}. These tuples or lists can contain complex nested structures like lists, maps or nested rows. The schema should be a StructType. It is important that the schema matches the types of the objects in each row or exceptions could be thrown at runtime. >>> rdd2 = sc.parallelize([(1, "row1"), (2, "row2"), (3, "row3")]) >>> schema = StructType([StructField("field1", IntegerType(), False), ... StructField("field2", StringType(), False)]) >>> srdd = sqlCtx.applySchema(rdd2, schema) >>> sqlCtx.registerRDDAsTable(srdd, "table1") >>> srdd2 = sqlCtx.sql("SELECT * from table1") >>> srdd2.collect() [Row(field1=1, field2=u'row1'),..., Row(field1=3, field2=u'row3')] >>> from datetime import datetime >>> rdd = sc.parallelize([(127, -128L, -32768, 32767, 2147483647L, 1.0, ... datetime(2010, 1, 1, 1, 1, 1), ... {"a": 1}, (2,), [1, 2, 3], None)]) >>> schema = StructType([ ... StructField("byte1", ByteType(), False), ... StructField("byte2", ByteType(), False), ... StructField("short1", ShortType(), False), ... StructField("short2", ShortType(), False), ... StructField("int", IntegerType(), False), ... StructField("float", FloatType(), False), ... StructField("time", TimestampType(), False), ... StructField("map", ... MapType(StringType(), IntegerType(), False), False), ... StructField("struct", ... StructType([StructField("b", ShortType(), False)]), False), ... StructField("list", ArrayType(ByteType(), False), False), ... StructField("null", DoubleType(), True)]) >>> srdd = sqlCtx.applySchema(rdd, schema) >>> results = srdd.map( ... lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int, x.float, x.time, ... x.map["a"], x.struct.b, x.list, x.null)) >>> results.collect()[0] (127, -128, -32768, 32767, 2147483647, 1.0, ...(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None) >>> srdd.registerTempTable("table2") >>> sqlCtx.sql( ... "SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " + ... "short1 + 1 AS short1, short2 - 1 AS short2, int - 1 AS int, " + ... "float + 1.5 as float FROM table2").collect() [Row(byte1=126, byte2=-127, short1=-32767, short2=32766, int=2147483646, float=2.5)] >>> rdd = sc.parallelize([(127, -32768, 1.0, ... datetime(2010, 1, 1, 1, 1, 1), ... {"a": 1}, (2,), [1, 2, 3])]) >>> abstract = "byte short float time map{} struct(b) list[]" >>> schema = _parse_schema_abstract(abstract) >>> typedSchema = _infer_schema_type(rdd.first(), schema) >>> srdd = sqlCtx.applySchema(rdd, typedSchema) >>> srdd.collect() [Row(byte=127, short=-32768, float=1.0, time=..., list=[1, 2, 3])] """ if isinstance(rdd, SchemaRDD): raise TypeError("Cannot apply schema to SchemaRDD") if not isinstance(schema, StructType): raise TypeError("schema should be StructType") # take the first few rows to verify schema rows = rdd.take(10) # Row() cannot been deserialized by Pyrolite if rows and isinstance(rows[0], tuple) and rows[0].__class__.__name__ == 'Row': rdd = rdd.map(tuple) rows = rdd.take(10) for row in rows: _verify_type(row, schema) batched = isinstance(rdd._jrdd_deserializer, BatchedSerializer) jrdd = self._pythonToJava(rdd._jrdd, batched) srdd = self._ssql_ctx.applySchemaToPythonRDD(jrdd.rdd(), str(schema)) return SchemaRDD(srdd.toJavaSchemaRDD(), self) def registerRDDAsTable(self, rdd, tableName): """Registers the given RDD as a temporary table in the catalog. Temporary tables exist only during the lifetime of this instance of SQLContext. >>> srdd = sqlCtx.inferSchema(rdd) >>> sqlCtx.registerRDDAsTable(srdd, "table1") """ if (rdd.__class__ is SchemaRDD): srdd = rdd._jschema_rdd.baseSchemaRDD() self._ssql_ctx.registerRDDAsTable(srdd, tableName) else: raise ValueError("Can only register SchemaRDD as table") def parquetFile(self, path): """Loads a Parquet file, returning the result as a L{SchemaRDD}. >>> import tempfile, shutil >>> parquetFile = tempfile.mkdtemp() >>> shutil.rmtree(parquetFile) >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.saveAsParquetFile(parquetFile) >>> srdd2 = sqlCtx.parquetFile(parquetFile) >>> sorted(srdd.collect()) == sorted(srdd2.collect()) True """ jschema_rdd = self._ssql_ctx.parquetFile(path).toJavaSchemaRDD() return SchemaRDD(jschema_rdd, self) def jsonFile(self, path, schema=None): """ Loads a text file storing one JSON object per line as a L{SchemaRDD}. If the schema is provided, applies the given schema to this JSON dataset. Otherwise, it goes through the entire dataset once to determine the schema. >>> import tempfile, shutil >>> jsonFile = tempfile.mkdtemp() >>> shutil.rmtree(jsonFile) >>> ofn = open(jsonFile, 'w') >>> for json in jsonStrings: ... print>>ofn, json >>> ofn.close() >>> srdd1 = sqlCtx.jsonFile(jsonFile) >>> sqlCtx.registerRDDAsTable(srdd1, "table1") >>> srdd2 = sqlCtx.sql( ... "SELECT field1 AS f1, field2 as f2, field3 as f3, " ... "field6 as f4 from table1") >>> for r in srdd2.collect(): ... print r Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None) Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')]) Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None) >>> srdd3 = sqlCtx.jsonFile(jsonFile, srdd1.schema()) >>> sqlCtx.registerRDDAsTable(srdd3, "table2") >>> srdd4 = sqlCtx.sql( ... "SELECT field1 AS f1, field2 as f2, field3 as f3, " ... "field6 as f4 from table2") >>> for r in srdd4.collect(): ... print r Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None) Row(f1=2, f2=None, f3=Row(field4=22,..., f4=[Row(field7=u'row2')]) Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None) >>> schema = StructType([ ... StructField("field2", StringType(), True), ... StructField("field3", ... StructType([ ... StructField("field5", ... ArrayType(IntegerType(), False), True)]), False)]) >>> srdd5 = sqlCtx.jsonFile(jsonFile, schema) >>> sqlCtx.registerRDDAsTable(srdd5, "table3") >>> srdd6 = sqlCtx.sql( ... "SELECT field2 AS f1, field3.field5 as f2, " ... "field3.field5[0] as f3 from table3") >>> srdd6.collect() [Row(f1=u'row1', f2=None, f3=None)...Row(f1=u'row3', f2=[], f3=None)] """ if schema is None: srdd = self._ssql_ctx.jsonFile(path) else: scala_datatype = self._ssql_ctx.parseDataType(str(schema)) srdd = self._ssql_ctx.jsonFile(path, scala_datatype) return SchemaRDD(srdd.toJavaSchemaRDD(), self) def jsonRDD(self, rdd, schema=None): """Loads an RDD storing one JSON object per string as a L{SchemaRDD}. If the schema is provided, applies the given schema to this JSON dataset. Otherwise, it goes through the entire dataset once to determine the schema. >>> srdd1 = sqlCtx.jsonRDD(json) >>> sqlCtx.registerRDDAsTable(srdd1, "table1") >>> srdd2 = sqlCtx.sql( ... "SELECT field1 AS f1, field2 as f2, field3 as f3, " ... "field6 as f4 from table1") >>> for r in srdd2.collect(): ... print r Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None) Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')]) Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None) >>> srdd3 = sqlCtx.jsonRDD(json, srdd1.schema()) >>> sqlCtx.registerRDDAsTable(srdd3, "table2") >>> srdd4 = sqlCtx.sql( ... "SELECT field1 AS f1, field2 as f2, field3 as f3, " ... "field6 as f4 from table2") >>> for r in srdd4.collect(): ... print r Row(f1=1, f2=u'row1', f3=Row(field4=11, field5=None), f4=None) Row(f1=2, f2=None, f3=Row(field4=22..., f4=[Row(field7=u'row2')]) Row(f1=None, f2=u'row3', f3=Row(field4=33, field5=[]), f4=None) >>> schema = StructType([ ... StructField("field2", StringType(), True), ... StructField("field3", ... StructType([ ... StructField("field5", ... ArrayType(IntegerType(), False), True)]), False)]) >>> srdd5 = sqlCtx.jsonRDD(json, schema) >>> sqlCtx.registerRDDAsTable(srdd5, "table3") >>> srdd6 = sqlCtx.sql( ... "SELECT field2 AS f1, field3.field5 as f2, " ... "field3.field5[0] as f3 from table3") >>> srdd6.collect() [Row(f1=u'row1', f2=None,...Row(f1=u'row3', f2=[], f3=None)] >>> sqlCtx.jsonRDD(sc.parallelize(['{}', ... '{"key0": {"key1": "value1"}}'])).collect() [Row(key0=None), Row(key0=Row(key1=u'value1'))] >>> sqlCtx.jsonRDD(sc.parallelize(['{"key0": null}', ... '{"key0": {"key1": "value1"}}'])).collect() [Row(key0=None), Row(key0=Row(key1=u'value1'))] """ def func(iterator): for x in iterator: if not isinstance(x, basestring): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = rdd.mapPartitions(func) keyed._bypass_serializer = True jrdd = keyed._jrdd.map(self._jvm.BytesToString()) if schema is None: srdd = self._ssql_ctx.jsonRDD(jrdd.rdd()) else: scala_datatype = self._ssql_ctx.parseDataType(str(schema)) srdd = self._ssql_ctx.jsonRDD(jrdd.rdd(), scala_datatype) return SchemaRDD(srdd.toJavaSchemaRDD(), self) def sql(self, sqlQuery): """Return a L{SchemaRDD} representing the result of the given query. >>> srdd = sqlCtx.inferSchema(rdd) >>> sqlCtx.registerRDDAsTable(srdd, "table1") >>> srdd2 = sqlCtx.sql("SELECT field1 AS f1, field2 as f2 from table1") >>> srdd2.collect() [Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] """ return SchemaRDD(self._ssql_ctx.sql(sqlQuery).toJavaSchemaRDD(), self) def table(self, tableName): """Returns the specified table as a L{SchemaRDD}. >>> srdd = sqlCtx.inferSchema(rdd) >>> sqlCtx.registerRDDAsTable(srdd, "table1") >>> srdd2 = sqlCtx.table("table1") >>> sorted(srdd.collect()) == sorted(srdd2.collect()) True """ return SchemaRDD(self._ssql_ctx.table(tableName).toJavaSchemaRDD(), self) def cacheTable(self, tableName): """Caches the specified table in-memory.""" self._ssql_ctx.cacheTable(tableName) def uncacheTable(self, tableName): """Removes the specified table from the in-memory cache.""" self._ssql_ctx.uncacheTable(tableName) class HiveContext(SQLContext): """A variant of Spark SQL that integrates with data stored in Hive. Configuration for Hive is read from hive-site.xml on the classpath. It supports running both SQL and HiveQL commands. """ def __init__(self, sparkContext, hiveContext=None): """Create a new HiveContext. @param sparkContext: The SparkContext to wrap. @param hiveContext: An optional JVM Scala HiveContext. If set, we do not instatiate a new HiveContext in the JVM, instead we make all calls to this object. """ SQLContext.__init__(self, sparkContext) if hiveContext: self._scala_HiveContext = hiveContext @property def _ssql_ctx(self): try: if not hasattr(self, '_scala_HiveContext'): self._scala_HiveContext = self._get_hive_ctx() return self._scala_HiveContext except Py4JError as e: raise Exception("You must build Spark with Hive. " "Export 'SPARK_HIVE=true' and run " "sbt/sbt assembly", e) def _get_hive_ctx(self): return self._jvm.HiveContext(self._jsc.sc()) def hiveql(self, hqlQuery): """ DEPRECATED: Use sql() """ warnings.warn("hiveql() is deprecated as the sql function now parses using HiveQL by" + "default. The SQL dialect for parsing can be set using 'spark.sql.dialect'", DeprecationWarning) return SchemaRDD(self._ssql_ctx.hiveql(hqlQuery).toJavaSchemaRDD(), self) def hql(self, hqlQuery): """ DEPRECATED: Use sql() """ warnings.warn("hql() is deprecated as the sql function now parses using HiveQL by" + "default. The SQL dialect for parsing can be set using 'spark.sql.dialect'", DeprecationWarning) return self.hiveql(hqlQuery) class LocalHiveContext(HiveContext): """Starts up an instance of hive where metadata is stored locally. An in-process metadata data is created with data stored in ./metadata. Warehouse data is stored in in ./warehouse. >>> import os >>> hiveCtx = LocalHiveContext(sc) >>> try: ... supress = hiveCtx.sql("DROP TABLE src") ... except Exception: ... pass >>> kv1 = os.path.join(os.environ["SPARK_HOME"], ... 'examples/src/main/resources/kv1.txt') >>> supress = hiveCtx.sql( ... "CREATE TABLE IF NOT EXISTS src (key INT, value STRING)") >>> supress = hiveCtx.sql("LOAD DATA LOCAL INPATH '%s' INTO TABLE src" ... % kv1) >>> results = hiveCtx.sql("FROM src SELECT value" ... ).map(lambda r: int(r.value.split('_')[1])) >>> num = results.count() >>> reduce_sum = results.reduce(lambda x, y: x + y) >>> num 500 >>> reduce_sum 130091 """ def __init__(self, sparkContext, sqlContext=None): HiveContext.__init__(self, sparkContext, sqlContext) warnings.warn("LocalHiveContext is deprecated. " "Use HiveContext instead.", DeprecationWarning) def _get_hive_ctx(self): return self._jvm.LocalHiveContext(self._jsc.sc()) class TestHiveContext(HiveContext): def _get_hive_ctx(self): return self._jvm.TestHiveContext(self._jsc.sc()) def _create_row(fields, values): row = Row(*values) row.__FIELDS__ = fields return row class Row(tuple): """ A row in L{SchemaRDD}. The fields in it can be accessed like attributes. Row can be used to create a row object by using named arguments, the fields will be sorted by names. >>> row = Row(name="Alice", age=11) >>> row Row(age=11, name='Alice') >>> row.name, row.age ('Alice', 11) Row also can be used to create another Row like class, then it could be used to create Row objects, such as >>> Person = Row("name", "age") >>> Person <Row(name, age)> >>> Person("Alice", 11) Row(name='Alice', age=11) """ def __new__(self, *args, **kwargs): if args and kwargs: raise ValueError("Can not use both args " "and kwargs to create Row") if args: # create row class or objects return tuple.__new__(self, args) elif kwargs: # create row objects names = sorted(kwargs.keys()) values = tuple(kwargs[n] for n in names) row = tuple.__new__(self, values) row.__FIELDS__ = names return row else: raise ValueError("No args or kwargs") # let obect acs like class def __call__(self, *args): """create new Row object""" return _create_row(self, args) def __getattr__(self, item): if item.startswith("__"): raise AttributeError(item) try: # it will be slow when it has many fields, # but this will not be used in normal cases idx = self.__FIELDS__.index(item) return self[idx] except IndexError: raise AttributeError(item) def __reduce__(self): if hasattr(self, "__FIELDS__"): return (_create_row, (self.__FIELDS__, tuple(self))) else: return tuple.__reduce__(self) def __repr__(self): if hasattr(self, "__FIELDS__"): return "Row(%s)" % ", ".join("%s=%r" % (k, v) for k, v in zip(self.__FIELDS__, self)) else: return "<Row(%s)>" % ", ".join(self) def inherit_doc(cls): for name, func in vars(cls).items(): # only inherit docstring for public functions if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): func.__doc__ = parent_func.__doc__ break return cls @inherit_doc class SchemaRDD(RDD): """An RDD of L{Row} objects that has an associated schema. The underlying JVM object is a SchemaRDD, not a PythonRDD, so we can utilize the relational query api exposed by Spark SQL. For normal L{pyspark.rdd.RDD} operations (map, count, etc.) the L{SchemaRDD} is not operated on directly, as it's underlying implementation is an RDD composed of Java objects. Instead it is converted to a PythonRDD in the JVM, on which Python operations can be done. This class receives raw tuples from Java but assigns a class to it in all its data-collection methods (mapPartitionsWithIndex, collect, take, etc) so that PySpark sees them as Row objects with named fields. """ def __init__(self, jschema_rdd, sql_ctx): self.sql_ctx = sql_ctx self._sc = sql_ctx._sc clsName = jschema_rdd.getClass().getName() assert clsName.endswith("JavaSchemaRDD"), "jschema_rdd must be JavaSchemaRDD" self._jschema_rdd = jschema_rdd self._id = None self.is_cached = False self.is_checkpointed = False self.ctx = self.sql_ctx._sc # the _jrdd is created by javaToPython(), serialized by pickle self._jrdd_deserializer = BatchedSerializer(PickleSerializer()) @property def _jrdd(self): """Lazy evaluation of PythonRDD object. Only done when a user calls methods defined by the L{pyspark.rdd.RDD} super class (map, filter, etc.). """ if not hasattr(self, '_lazy_jrdd'): self._lazy_jrdd = self._jschema_rdd.baseSchemaRDD().javaToPython() return self._lazy_jrdd def id(self): if self._id is None: self._id = self._jrdd.id() return self._id def limit(self, num): """Limit the result count to the number specified. >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.limit(2).collect() [Row(field1=1, field2=u'row1'), Row(field1=2, field2=u'row2')] >>> srdd.limit(0).collect() [] """ rdd = self._jschema_rdd.baseSchemaRDD().limit(num).toJavaSchemaRDD() return SchemaRDD(rdd, self.sql_ctx) def saveAsParquetFile(self, path): """Save the contents as a Parquet file, preserving the schema. Files that are written out using this method can be read back in as a SchemaRDD using the L{SQLContext.parquetFile} method. >>> import tempfile, shutil >>> parquetFile = tempfile.mkdtemp() >>> shutil.rmtree(parquetFile) >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.saveAsParquetFile(parquetFile) >>> srdd2 = sqlCtx.parquetFile(parquetFile) >>> sorted(srdd2.collect()) == sorted(srdd.collect()) True """ self._jschema_rdd.saveAsParquetFile(path) def registerTempTable(self, name): """Registers this RDD as a temporary table using the given name. The lifetime of this temporary table is tied to the L{SQLContext} that was used to create this SchemaRDD. >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.registerTempTable("test") >>> srdd2 = sqlCtx.sql("select * from test") >>> sorted(srdd.collect()) == sorted(srdd2.collect()) True """ self._jschema_rdd.registerTempTable(name) def registerAsTable(self, name): """DEPRECATED: use registerTempTable() instead""" warnings.warn("Use registerTempTable instead of registerAsTable.", DeprecationWarning) self.registerTempTable(name) def insertInto(self, tableName, overwrite=False): """Inserts the contents of this SchemaRDD into the specified table. Optionally overwriting any existing data. """ self._jschema_rdd.insertInto(tableName, overwrite) def saveAsTable(self, tableName): """Creates a new table with the contents of this SchemaRDD.""" self._jschema_rdd.saveAsTable(tableName) def schema(self): """Returns the schema of this SchemaRDD (represented by a L{StructType}).""" return _parse_datatype_string(self._jschema_rdd.baseSchemaRDD().schema().toString()) def schemaString(self): """Returns the output schema in the tree format.""" return self._jschema_rdd.schemaString() def printSchema(self): """Prints out the schema in the tree format.""" print self.schemaString() def count(self): """Return the number of elements in this RDD. Unlike the base RDD implementation of count, this implementation leverages the query optimizer to compute the count on the SchemaRDD, which supports features such as filter pushdown. >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.count() 3L >>> srdd.count() == srdd.map(lambda x: x).count() True """ return self._jschema_rdd.count() def collect(self): """Return a list that contains all of the rows in this RDD. Each object in the list is a Row, the fields can be accessed as attributes. Unlike the base RDD implementation of collect, this implementation leverages the query optimizer to perform a collect on the SchemaRDD, which supports features such as filter pushdown. >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.collect() [Row(field1=1, field2=u'row1'), ..., Row(field1=3, field2=u'row3')] """ with SCCallSiteSync(self.context) as css: bytesInJava = self._jschema_rdd.baseSchemaRDD().collectToPython().iterator() cls = _create_cls(self.schema()) return map(cls, self._collect_iterator_through_file(bytesInJava)) def take(self, num): """Take the first num rows of the RDD. Each object in the list is a Row, the fields can be accessed as attributes. Unlike the base RDD implementation of take, this implementation leverages the query optimizer to perform a collect on a SchemaRDD, which supports features such as filter pushdown. >>> srdd = sqlCtx.inferSchema(rdd) >>> srdd.take(2) [Row(field1=1, field2=u'row1'), Row(field1=2, field2=u'row2')] """ return self.limit(num).collect() # Convert each object in the RDD to a Row with the right class # for this SchemaRDD, so that fields can be accessed as attributes. def mapPartitionsWithIndex(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithIndex(f).sum() 6 """ rdd = RDD(self._jrdd, self._sc, self._jrdd_deserializer) schema = self.schema() def applySchema(_, it): cls = _create_cls(schema) return itertools.imap(cls, it) objrdd = rdd.mapPartitionsWithIndex(applySchema, preservesPartitioning) return objrdd.mapPartitionsWithIndex(f, preservesPartitioning) # We override the default cache/persist/checkpoint behavior # as we want to cache the underlying SchemaRDD object in the JVM, # not the PythonRDD checkpointed by the super class def cache(self): self.is_cached = True self._jschema_rdd.cache() return self def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER): self.is_cached = True javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel) self._jschema_rdd.persist(javaStorageLevel) return self def unpersist(self, blocking=True): self.is_cached = False self._jschema_rdd.unpersist(blocking) return self def checkpoint(self): self.is_checkpointed = True self._jschema_rdd.checkpoint() def isCheckpointed(self): return self._jschema_rdd.isCheckpointed() def getCheckpointFile(self): checkpointFile = self._jschema_rdd.getCheckpointFile() if checkpointFile.isPresent(): return checkpointFile.get() def coalesce(self, numPartitions, shuffle=False): rdd = self._jschema_rdd.coalesce(numPartitions, shuffle) return SchemaRDD(rdd, self.sql_ctx) def distinct(self, numPartitions=None): if numPartitions is None: rdd = self._jschema_rdd.distinct() else: rdd = self._jschema_rdd.distinct(numPartitions) return SchemaRDD(rdd, self.sql_ctx) def intersection(self, other): if (other.__class__ is SchemaRDD): rdd = self._jschema_rdd.intersection(other._jschema_rdd) return SchemaRDD(rdd, self.sql_ctx) else: raise ValueError("Can only intersect with another SchemaRDD") def repartition(self, numPartitions): rdd = self._jschema_rdd.repartition(numPartitions) return SchemaRDD(rdd, self.sql_ctx) def subtract(self, other, numPartitions=None): if (other.__class__ is SchemaRDD): if numPartitions is None: rdd = self._jschema_rdd.subtract(other._jschema_rdd) else: rdd = self._jschema_rdd.subtract(other._jschema_rdd, numPartitions) return SchemaRDD(rdd, self.sql_ctx) else: raise ValueError("Can only subtract another SchemaRDD") def _test(): import doctest from array import array from pyspark.context import SparkContext # let doctest run in pyspark.sql, so DataTypes can be picklable import pyspark.sql from pyspark.sql import Row, SQLContext globs = pyspark.sql.__dict__.copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: sc = SparkContext('local[4]', 'PythonTest', batchSize=2) globs['sc'] = sc globs['sqlCtx'] = SQLContext(sc) globs['rdd'] = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")] ) jsonStrings = [ '{"field1": 1, "field2": "row1", "field3":{"field4":11}}', '{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},' '"field6":[{"field7": "row2"}]}', '{"field1" : null, "field2": "row3", ' '"field3":{"field4":33, "field5": []}}' ] globs['jsonStrings'] = jsonStrings globs['json'] = sc.parallelize(jsonStrings) (failure_count, test_count) = doctest.testmod( pyspark.sql, globs=globs, optionflags=doctest.ELLIPSIS) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
getter
main.rs
mod game; use crate::game::Game; fn main()
{ morristown::print_intro("FLIPFLOP"); println!("THE OBJECT OF THIS PUZZLE IS TO CHANGE THIS:\n"); println!("X X X X X X X X X X\n"); println!("TO THIS:\n"); println!("O O O O O O O O O O\n"); println!("BY TYPING THE NUMBER CORRESPONDING TO THE POSITION OF THE"); println!("LETTER ON SOME NUMBERS, ONE POSITION WILL CHANGE, ON"); println!("OTHERS, TWO WILL CHANGE. TO RESET LINE TO ALL X'S, TYPE 0"); println!("(ZERO) AND TO START OVER IN THE MIDDLE OF A GAME, TYPE "); println!("11 (ELEVEN)."); let mut game = Game::new(); loop { if !game.play() { break; } } }
index.js
"use strict"; var __importStar = (this && this.__importStar) || function (mod) { if (mod && mod.__esModule) return mod; var result = {}; if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k]; result["default"] = mod; return result; }; Object.defineProperty(exports, "__esModule", { value: true }); const repl = __importStar(require("repl")); const building_1 = require("./building"); let building = new building_1.Building('Luna\'s Tower', [-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 1); exports.panel = repl.start({ prompt: ':-) ' }); exports.panelC = exports.panel.context; exports.panelC.buildings = [building]; exports.panelC.currentLocation = exports.panelC.buildings[0]; exports.panelC.currentCar = exports.panelC.currentLocation.elevatorBay[0]; exports.panel.defineCommand('whereami', { help: 'Tells you where you are', action() { console.log(exports.panelC.currentLocation.name); exports.panel.displayPrompt(); } }); exports.panel.defineCommand('whichfloor', { help: 'Tells you which floor you\'re on', action() { console.log('You\'re on floor ' + exports.panelC.currentCar.whichFloor() + '.'); exports.panel.displayPrompt(); } }); exports.panel.defineCommand('gotofloor', { help: 'Takes you to another floor in a building', action(floor) { let floorI = parseInt(floor, 10); if (exports.panelC.currentLocation.hasFloor(floorI)) { pressButton(floorI); } else { console.log('Sorry, we don\'t have that floor.'); exports.panel.displayPrompt(); } } }); function keepGoing(stats) { setTimeout(() => { exports.panelC.currentCar.arriveAtTarget(); console.log(stats.message); exports.panel.displayPrompt(); }, stats.travelTime); } function
(floor) { let carTalk; let difference; difference = floor - exports.panelC.currentCar.currentFloor; if (difference === 0) { carTalk = { message: 'You\'re already there. ;-)', travelTime: 0 }; } else { carTalk = exports.panelC.currentCar.goToFloor(floor); } keepGoing(carTalk); }
pressButton
message.rs
// Copyright 2019, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::convert::{TryFrom, TryInto}; use chrono::{DateTime, Utc}; use prost::Message;
envelope::datetime_to_timestamp, proto::{ envelope::DhtHeader, store_forward::{StoredMessage, StoredMessagesRequest, StoredMessagesResponse}, }, store_forward::{database, StoreAndForwardError}, }; impl StoredMessagesRequest { pub fn new() -> Self { Self { since: None, request_id: OsRng.next_u32(), } } #[allow(unused)] pub fn since(since: DateTime<Utc>) -> Self { Self { since: Some(datetime_to_timestamp(since)), request_id: OsRng.next_u32(), } } } #[cfg(test)] impl StoredMessage { pub fn new( version: u32, dht_header: crate::envelope::DhtMessageHeader, body: Vec<u8>, stored_at: DateTime<Utc>, ) -> Self { Self { version, dht_header: Some(dht_header.into()), body, stored_at: Some(datetime_to_timestamp(stored_at)), } } } impl TryFrom<database::StoredMessage> for StoredMessage { type Error = StoreAndForwardError; fn try_from(message: database::StoredMessage) -> Result<Self, Self::Error> { let dht_header = DhtHeader::decode(message.header.as_slice())?; Ok(Self { stored_at: Some(datetime_to_timestamp(DateTime::from_utc(message.stored_at, Utc))), version: message .version .try_into() .map_err(|_| StoreAndForwardError::InvalidEnvelopeVersion)?, body: message.body, dht_header: Some(dht_header), }) } } impl StoredMessagesResponse { pub fn messages(&self) -> &Vec<StoredMessage> { &self.messages } } #[derive(Debug, Copy, Clone)] pub enum StoredMessagePriority { Low = 1, High = 10, }
use rand::{rngs::OsRng, RngCore}; use crate::{
modify_push_all_task.go
package sas //Licensed under the Apache License, Version 2.0 (the "License"); //you may not use this file except in compliance with the License. //You may obtain a copy of the License at // //http://www.apache.org/licenses/LICENSE-2.0 // //Unless required by applicable law or agreed to in writing, software //distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //See the License for the specific language governing permissions and //limitations under the License. // // Code generated by Alibaba Cloud SDK Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "github.com/CRORCR/alibaba-cloud-sdk-go/sdk/requests" "github.com/CRORCR/alibaba-cloud-sdk-go/sdk/responses" ) // ModifyPushAllTask invokes the sas.ModifyPushAllTask API synchronously // api document: https://help.aliyun.com/api/sas/modifypushalltask.html func (client *Client) ModifyPushAllTask(request *ModifyPushAllTaskRequest) (response *ModifyPushAllTaskResponse, err error) { response = CreateModifyPushAllTaskResponse() err = client.DoAction(request, response) return } // ModifyPushAllTaskWithChan invokes the sas.ModifyPushAllTask API asynchronously // api document: https://help.aliyun.com/api/sas/modifypushalltask.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) ModifyPushAllTaskWithChan(request *ModifyPushAllTaskRequest) (<-chan *ModifyPushAllTaskResponse, <-chan error) { responseChan := make(chan *ModifyPushAllTaskResponse, 1) errChan := make(chan error, 1) err := client.AddAsyncTask(func() { defer close(responseChan) defer close(errChan) response, err := client.ModifyPushAllTask(request) if err != nil { errChan <- err } else { responseChan <- response } }) if err != nil { errChan <- err close(responseChan) close(errChan) } return responseChan, errChan } // ModifyPushAllTaskWithCallback invokes the sas.ModifyPushAllTask API asynchronously // api document: https://help.aliyun.com/api/sas/modifypushalltask.html // asynchronous document: https://help.aliyun.com/document_detail/66220.html func (client *Client) ModifyPushAllTaskWithCallback(request *ModifyPushAllTaskRequest, callback func(response *ModifyPushAllTaskResponse, err error)) <-chan int { result := make(chan int, 1) err := client.AddAsyncTask(func() { var response *ModifyPushAllTaskResponse var err error defer close(result) response, err = client.ModifyPushAllTask(request) callback(response, err) result <- 1 }) if err != nil { defer close(result) callback(nil, err) result <- 0 } return result } // ModifyPushAllTaskRequest is the request struct for api ModifyPushAllTask type ModifyPushAllTaskRequest struct { *requests.RpcRequest SourceIp string `position:"Query" name:"SourceIp"` Tasks string `position:"Query" name:"Tasks"` Uuids string `position:"Query" name:"Uuids"` } // ModifyPushAllTaskResponse is the response struct for api ModifyPushAllTask type ModifyPushAllTaskResponse struct { *responses.BaseResponse RequestId string `json:"RequestId" xml:"RequestId"` PushTaskRsp PushTaskRsp `json:"PushTaskRsp" xml:"PushTaskRsp"` } // CreateModifyPushAllTaskRequest creates a request to invoke ModifyPushAllTask API func CreateModifyPushAllTaskRequest() (request *ModifyPushAllTaskRequest) { request = &ModifyPushAllTaskRequest{ RpcRequest: &requests.RpcRequest{}, } request.InitWithApiInfo("Sas", "2018-12-03", "ModifyPushAllTask", "sas", "openAPI") return } // CreateModifyPushAllTaskResponse creates a response to parse from ModifyPushAllTask response func CreateModifyPushAllTaskResponse() (response *ModifyPushAllTaskResponse)
{ response = &ModifyPushAllTaskResponse{ BaseResponse: &responses.BaseResponse{}, } return }
tasks.py
from time import sleep from urllib.request import urlopen, Request from bs4 import BeautifulSoup from celery.schedules import crontab from celery.task import periodic_task from crypto.models import Cryptocurrency # @shared_task @periodic_task( run_every=(crontab(minute="*/15")), name="create_cryptocurrency", # ignore_result=True ) def create_cryptocurrency(): print("Crawling data and creating objects in database ..") req = Request("https://coinranking.com", headers={"User-Agent": "Mozilla/5.0"}) html = urlopen(req).read() bs = BeautifulSoup(html, "html.parser") # Find first 5 table rows rows = bs.find("tbody", class_="table__body").find_all("tr", class_="table__row")[ 0:5 ] for row in rows: cryptocurrency = ( row.find("span", class_="profile__name") .get_text() .strip() .replace("\n", "") ) values = row.find_all("div", class_="valuta") price = values[0].get_text().strip().replace("\n", "") market_cap = values[1].get_text().strip().replace("\n", "") change = ( row.find("div", class_="change") .find("span") .get_text() .strip() .replace("\n", "") ) print( { "cryptocurrency": cryptocurrency, "price": price, "market_cap": market_cap, "change": change, } ) # Create object in database from crawled data Cryptocurrency.objects.create( cryptocurrency=cryptocurrency, price=price, market_cap=market_cap, change=change, ) # Sleep 3 seconds to avoid any errors sleep(3) # @shared_task
run_every=(crontab(minute="*/15")), name="update_cryptocurrency", ) def update_cryptocurrency(): print("Updating data ..") req = Request("https://coinranking.com", headers={"User-Agent": "Mozilla/5.0"}) html = urlopen(req).read() bs = BeautifulSoup(html, "html.parser") rows = bs.find("tbody", class_="table__body").find_all("tr", class_="table__row")[ 0:5 ] for row in rows: cryptocurrency = ( row.find("span", class_="profile__name") .get_text() .strip() .replace("\n", "") ) values = row.find_all("div", class_="valuta") price = values[0].get_text().strip().replace("\n", "") market_cap = values[1].get_text().strip().replace("\n", "") change = ( row.find("div", class_="change") .find("span") .get_text() .strip() .replace("\n", "") ) print( { "cryptocurrency": cryptocurrency, "price": price, "market_cap": market_cap, "change": change, } ) data = { "cryptocurrency": cryptocurrency, "price": price, "market_cap": market_cap, "change": change, } Cryptocurrency.objects.filter(cryptocurrency=cryptocurrency).update(**data) sleep(3)
@periodic_task(
karma.conf.js
// Karma configuration file, see link for more information // https://karma-runner.github.io/1.0/config/configuration-file.html module.exports = function (config) {
frameworks: ['jasmine', '@angular-devkit/build-angular'], plugins: [ require('karma-jasmine'), require('karma-chrome-launcher'), require('karma-jasmine-html-reporter'), require('karma-coverage-istanbul-reporter'), require('@angular-devkit/build-angular/plugins/karma') ], client: { clearContext: false // leave Jasmine Spec Runner output visible in browser }, coverageIstanbulReporter: { dir: require('path').join(__dirname, './coverage/lazer-fcamara'), reports: ['html', 'lcovonly', 'text-summary'], fixWebpackSourcePaths: true }, reporters: ['progress', 'kjhtml'], port: 9876, colors: true, logLevel: config.LOG_INFO, autoWatch: true, browsers: ['Chrome'], singleRun: false, restartOnFileChange: true }); };
config.set({ basePath: '',
logical_type_test.go
// Copyright [2019] LinkedIn Corp. Licensed under the Apache License, Version // 2.0 (the "License"); you may not use this file except in compliance with the // License. You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. package goavro import ( "fmt" "math/big" "testing" "time" ) func TestSchemaLogicalType(t *testing.T) { testSchemaValid(t, `{"type": "long", "logicalType": "timestamp-millis"}`) testSchemaInvalid(t, `{"type": "bytes", "logicalType": "decimal"}`, "precision") testSchemaInvalid(t, `{"type": "fixed", "size": 16, "logicalType": "decimal"}`, "precision") } func TestStringLogicalTypeFallback(t *testing.T) { schema := `{"type": "string", "logicalType": "this_logical_type_does_not_exist"}` testSchemaValid(t, schema) testBinaryCodecPass(t, schema, "test string", []byte("\x16\x74\x65\x73\x74\x20\x73\x74\x72\x69\x6e\x67")) } func TestLongLogicalTypeFallback(t *testing.T) { schema := `{"type": "long", "logicalType": "this_logical_type_does_not_exist"}` testSchemaValid(t, schema) testBinaryCodecPass(t, schema, 12345, []byte("\xf2\xc0\x01")) } func TestTimeStampMillisLogicalTypeEncode(t *testing.T) { schema := `{"type": "long", "logicalType": "timestamp-millis"}` testBinaryDecodeFail(t, schema, []byte(""), "short buffer") testBinaryEncodeFail(t, schema, "test", "cannot transform binary timestamp-millis, expected time.Time") testBinaryCodecPass(t, schema, time.Date(2006, 1, 2, 15, 04, 05, 565000000, time.UTC), []byte("\xfa\x82\xac\xba\x91\x42")) } func TestTimeStampMillisLogicalTypeUnionEncode(t *testing.T) { schema := `{"type": ["null", {"type": "long", "logicalType": "timestamp-millis"}]}` testBinaryEncodeFail(t, schema, "test", "cannot transform binary timestamp-millis, expected time.Time, received string") testBinaryCodecPass(t, schema, nil, []byte("\x00")) testBinaryCodecPass(t, schema, time.Date(2006, 1, 2, 15, 04, 05, 565000000, time.UTC), []byte("\x02\xfa\x82\xac\xba\x91\x42")) } func TestTimeStampMicrosLogicalTypeEncode(t *testing.T) { schema := `{"type": "long", "logicalType": "timestamp-micros"}` testBinaryDecodeFail(t, schema, []byte(""), "short buffer") testBinaryEncodeFail(t, schema, "test", "cannot transform binary timestamp-micros, expected time.Time") testBinaryCodecPass(t, schema, time.Date(2006, 1, 2, 15, 04, 05, 565283000, time.UTC), []byte("\xc6\x8d\xf7\xe7\xaf\xd8\x84\x04")) } func TestTimeStampMicrosLogicalTypeUnionEncode(t *testing.T) { schema := `{"type": ["null", {"type": "long", "logicalType": "timestamp-micros"}]}` testBinaryEncodeFail(t, schema, "test", "cannot transform binary timestamp-micros, expected time.Time, received string") testBinaryCodecPass(t, schema, time.Date(2006, 1, 2, 15, 04, 05, 565283000, time.UTC), []byte("\x02\xc6\x8d\xf7\xe7\xaf\xd8\x84\x04")) } func TestTimeMillisLogicalTypeEncode(t *testing.T) { schema := `{"type": "int", "logicalType": "time-millis"}` testBinaryDecodeFail(t, schema, []byte(""), "short buffer") testBinaryEncodeFail(t, schema, "test", "cannot transform to binary time-millis, expected time.Duration") testBinaryCodecPass(t, schema, 66904022*time.Millisecond, []byte("\xac\xff\xe6\x3f")) } func TestTimeMillisLogicalTypeUnionEncode(t *testing.T) { schema := `{"type": ["null", {"type": "int", "logicalType": "time-millis"}]}` testBinaryEncodeFail(t, schema, "test", "cannot transform to binary time-millis, expected time.Duration, received string") testBinaryCodecPass(t, schema, 66904022*time.Millisecond, []byte("\x02\xac\xff\xe6\x3f")) } func TestTimeMicrosLogicalTypeEncode(t *testing.T) { schema := `{"type": "long", "logicalType": "time-micros"}` testBinaryDecodeFail(t, schema, []byte(""), "short buffer") testBinaryEncodeFail(t, schema, "test", "cannot transform to binary time-micros, expected time.Duration") testBinaryCodecPass(t, schema, 66904022566*time.Microsecond, []byte("\xcc\xf8\xd2\xbc\xf2\x03")) } func
(t *testing.T) { schema := `{"type": ["null", {"type": "long", "logicalType": "time-micros"}]}` testBinaryEncodeFail(t, schema, "test", "cannot transform to binary time-micros, expected time.Duration, received string") testBinaryCodecPass(t, schema, 66904022566*time.Microsecond, []byte("\x02\xcc\xf8\xd2\xbc\xf2\x03")) } func TestDateLogicalTypeEncode(t *testing.T) { schema := `{"type": "int", "logicalType": "date"}` testBinaryDecodeFail(t, schema, []byte(""), "short buffer") testBinaryEncodeFail(t, schema, "test", "cannot transform to binary date, expected time.Time, received string") testBinaryCodecPass(t, schema, time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), []byte("\xbc\xcd\x01")) } func TestDecimalBytesLogicalTypeEncode(t *testing.T) { schema := `{"type": "bytes", "logicalType": "decimal", "precision": 4, "scale": 2}` testBinaryCodecPass(t, schema, big.NewRat(617, 50), []byte("\x04\x04\xd2")) testBinaryCodecPass(t, schema, big.NewRat(-617, 50), []byte("\x04\xfb\x2e")) testBinaryCodecPass(t, schema, big.NewRat(0, 1), []byte("\x02\x00")) } func TestDecimalFixedLogicalTypeEncode(t *testing.T) { schema := `{"type": "fixed", "size": 12, "logicalType": "decimal", "precision": 4, "scale": 2}` testBinaryCodecPass(t, schema, big.NewRat(617, 50), []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\xd2")) testBinaryCodecPass(t, schema, big.NewRat(-617, 50), []byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfb\x2e")) testBinaryCodecPass(t, schema, big.NewRat(25, 4), []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x71")) testBinaryCodecPass(t, schema, big.NewRat(33, 100), []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x21")) schema0scale := `{"type": "fixed", "size": 12, "logicalType": "decimal", "precision": 4, "scale": 0}` // Encodes to 12 due to scale: 0 testBinaryEncodePass(t, schema0scale, big.NewRat(617, 50), []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c")) testBinaryDecodePass(t, schema0scale, big.NewRat(12, 1), []byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0c")) } func TestDecimalBytesLogicalTypeInRecordEncode(t *testing.T) { schema := `{"type": "record", "name": "myrecord", "fields" : [ {"name": "mydecimal", "type": "bytes", "logicalType": "decimal", "precision": 4, "scale": 2}]}` testBinaryCodecPass(t, schema, map[string]interface{}{"mydecimal": big.NewRat(617, 50)}, []byte("\x04\x04\xd2")) } func ExampleUnion_logicalType() { // Supported logical types and their native go types: // * timestamp-millis - time.Time // * timestamp-micros - time.Time // * time-millis - time.Duration // * time-micros - time.Duration // * date - int // * decimal - big.Rat codec, err := NewCodec(`["null", {"type": "long", "logicalType": "timestamp-millis"}]`) if err != nil { fmt.Println(err) } // Note the usage of type.logicalType i.e. `long.timestamp-millis` to denote the type in a union. This is due to the single string naming format // used by goavro. Decimal can be both bytes.decimal or fixed.decimal bytes, err := codec.BinaryFromNative(nil, time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC)) if err != nil { fmt.Println(err) } decoded, _, err := codec.NativeFromBinary(bytes) if err != nil { fmt.Println(err) } fmt.Printf("%#v\n", decoded.(time.Time).String()) // Output: "2006-01-02 15:04:05 +0000 UTC" }
TestTimeMicrosLogicalTypeUnionEncode
test_battery.py
ADD_PATH() import unittest import psutil from battery import Battery class TestBattery(unittest.TestCase): """ Test battry module """ def test_Battery_constructor(self): if not (has_battery := psutil.sensors_battery()): with self.assertRaises(Exception): Battery() else: self.assertTrue(has_battery.percent > 0) def test_create_details_text(self): if not psutil.sensors_battery(): pass else: self.assertTrue(isinstance(Batter().create_details_text(), str)) if __name__ == '__main__': unittest.main()
# add path to the main package and test battery.py if __name__ == '__main__': from __access import ADD_PATH
log.py
from django import template from django.contrib.admin.models import LogEntry register = template.Library() class AdminLogNode(template.Node): def __init__(self, limit, varname, user): self.limit, self.varname, self.user = limit, varname, user def __repr__(self): return "<GetAdminLog Node>" def render(self, context): if self.user is None: context[self.varname] = LogEntry.objects.all().select_related('content_type', 'user')[:self.limit] else: user_id = self.user if not user_id.isdigit(): user_id = context[self.user].pk context[self.varname] = LogEntry.objects.filter(user__pk__exact=user_id).select_related('content_type', 'user')[:int(self.limit)] return '' @register.tag def get_admin_log(parser, token): """ Populates a template variable with the admin log for the given criteria. Usage:: {% get_admin_log [limit] as [varname] for_user [context_var_containing_user_obj] %} Examples:: {% get_admin_log 10 as admin_log for_user 23 %} {% get_admin_log 10 as admin_log for_user user %} {% get_admin_log 10 as admin_log %} Note that ``context_var_containing_user_obj`` can be a hard-coded integer (user ID) or the name of a template context variable containing the user object whose ID you want. """ tokens = token.contents.split() if len(tokens) < 4: raise template.TemplateSyntaxError( "'get_admin_log' statements require two arguments") if not tokens[1].isdigit(): raise template.TemplateSyntaxError( "First argument to 'get_admin_log' must be an integer") if tokens[2] != 'as':
if len(tokens) > 4: if tokens[4] != 'for_user': raise template.TemplateSyntaxError( "Fourth argument to 'get_admin_log' must be 'for_user'") return AdminLogNode(limit=tokens[1], varname=tokens[3], user=(len(tokens) > 5 and tokens[5] or None))
raise template.TemplateSyntaxError( "Second argument to 'get_admin_log' must be 'as'")
listWmflabsdotorgRecordsets.py
import yaml from keystoneclient.session import Session as KeystoneSession from keystoneclient.auth.identity.v3 import Password as KeystonePassword from keystoneclient.v3 import Client as KeystoneClient from designateclient.v2 import client as designateclient def get_keystone_session(project): return KeystoneSession(auth=KeystonePassword( auth_url="http://cloudcontrol1003.wikimedia.org:5000/v3", username="novaobserver", password=open('novaobserver_password').read(), project_name=project, user_domain_name='default',
client = designateclient.Client(session=get_keystone_session('wmflabsdotorg')) zone = client.zones.get('wmflabs.org.') for recordset in client.recordsets.list(zone['id']): if recordset['type'] != 'A' or recordset['records'] != ['185.15.56.49']: print('|' + recordset['name'] + '|' + recordset['type'] + '|' + repr(recordset['records']) + '|' + repr(recordset['description']) + '|')
project_domain_name='default' ))
c.go
package main // github.com/EndlessCheng/codeforces-go func
(a [][]int) []int { n, m := len(a), len(a[0]) ds := make([][]int, n+1) // 主对角线前缀和 as := make([][]int, n+1) // 反对角线前缀和 for i := range ds { ds[i] = make([]int, m+1) as[i] = make([]int, m+1) } for i, r := range a { for j, v := range r { ds[i+1][j+1] = ds[i][j] + v // ↘ as[i+1][j] = as[i][j+1] + v // ↙ } } // 从 x,y 开始,向 ↘,连续的 k 个数的和 queryDiagonal := func(x, y, k int) int { return ds[x+k][y+k] - ds[x][y] } // 从 x,y 开始,向 ↙,连续的 k 个数的和 queryAntiDiagonal := func(x, y, k int) int { return as[x+k][y+1-k] - as[x][y+1] } var x, y, z int // 最大,次大,第三大 update := func(v int) { if v > x { x, y, z = v, x, y } else if v < x && v > y { y, z = v, y } else if v < y && v > z { z = v } } for i, r := range a { for j, v := range r { update(v) for k := 1; k <= i && i+k < n && k <= j && j+k < m; k++ { a := queryDiagonal(i-k, j, k) // 菱形右上 b := queryAntiDiagonal(i-k+1, j-1, k-1) // 菱形左上 c := queryDiagonal(i, j-k, k) // 菱形左下 d := queryAntiDiagonal(i, j+k, k+1) // 菱形右下 update(a + b + c + d) } } } ans := []int{x, y, z} for ans[len(ans)-1] == 0 { ans = ans[:len(ans)-1] } return ans }
getBiggestThree
artifact.go
// Copyright 2021 Red Hat, Inc. and/or its affiliates
// You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1beta1 // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:csv:customresourcedefinitions:displayName="Final Artifact" type Artifact struct { //Indicates the unique identifier of the organization or group that created the project. // + optional GroupID string `json:"groupId,omitempty"` //Indicates the unique base name of the primary artifact being generated. // + optional ArtifactID string `json:"artifactId,omitempty"` //Indicates the version of the artifact generated by the project. // + optional Version string `json:"version,omitempty"` } // GetGroupID ... func (a *Artifact) GetGroupID() string { return a.GroupID } // SetGroupID ... func (a *Artifact) SetGroupID(groupID string) { a.GroupID = groupID } // GetArtifactID ... func (a *Artifact) GetArtifactID() string { return a.ArtifactID } // SetArtifactID ... func (a *Artifact) SetArtifactID(artifactID string) { a.ArtifactID = artifactID } // GetVersion ... func (a *Artifact) GetVersion() string { return a.Version } // SetVersion ... func (a *Artifact) SetVersion(version string) { a.Version = version }
// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License.
carstate.py
from cereal import car from common.conversions import Conversions as CV from opendbc.can.parser import CANParser from opendbc.can.can_define import CANDefine from selfdrive.car.interfaces import CarStateBase from selfdrive.car.chrysler.values import DBC, STEER_THRESHOLD class CarState(CarStateBase): def __init__(self, CP): super().__init__(CP) can_define = CANDefine(DBC[CP.carFingerprint]["pt"]) self.shifter_values = can_define.dv["GEAR"]["PRNDL"] def update(self, cp, cp_cam): ret = car.CarState.new_message() self.frame = int(cp.vl["EPS_STATUS"]["COUNTER"]) ret.doorOpen = any([cp.vl["BCM_1"]["DOOR_OPEN_FL"], cp.vl["BCM_1"]["DOOR_OPEN_FR"], cp.vl["BCM_1"]["DOOR_OPEN_RL"], cp.vl["BCM_1"]["DOOR_OPEN_RR"]]) ret.seatbeltUnlatched = cp.vl["SEATBELT_STATUS"]["SEATBELT_DRIVER_UNLATCHED"] == 1 # brake pedal ret.brake = 0 ret.brakePressed = cp.vl["ESP_1"]['Brake_Pedal_State'] == 1 # Physical brake pedal switch # gas pedal ret.gas = cp.vl["ECM_5"]["Accelerator_Position"] ret.gasPressed = ret.gas > 1e-5 ret.espDisabled = (cp.vl["TRACTION_BUTTON"]["TRACTION_OFF"] == 1) ret.wheelSpeeds = self.get_wheel_speeds( cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FL"], cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FR"], cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RL"], cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RR"], unit=1, ) ret.vEgoRaw = (cp.vl["SPEED_1"]["SPEED_LEFT"] + cp.vl["SPEED_1"]["SPEED_RIGHT"]) / 2. ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw) ret.standstill = not ret.vEgoRaw > 0.001 ret.leftBlinker = cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 1 ret.rightBlinker = cp.vl["STEERING_LEVERS"]["TURN_SIGNALS"] == 2 ret.steeringAngleDeg = cp.vl["STEERING"]["STEER_ANGLE"] ret.steeringRateDeg = cp.vl["STEERING"]["STEERING_RATE"] ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl["GEAR"]["PRNDL"], None)) ret.cruiseState.available = cp.vl["DAS_3"]["ACC_AVAILABLE"] == 1 # ACC is white ret.cruiseState.enabled = cp.vl["DAS_3"]["ACC_ACTIVE"] == 1 # ACC is green ret.cruiseState.speed = cp.vl["DASHBOARD"]["ACC_SPEED_CONFIG_KPH"] * CV.KPH_TO_MS # CRUISE_STATE is a three bit msg, 0 is off, 1 and 2 are Non-ACC mode, 3 and 4 are ACC mode, find if there are other states too ret.cruiseState.nonAdaptive = cp.vl["DASHBOARD"]["CRUISE_STATE"] in (1, 2) ret.accFaulted = cp.vl["DAS_3"]["ACC_FAULTED"] != 0 ret.steeringTorque = cp.vl["EPS_STATUS"]["TORQUE_DRIVER"] ret.steeringTorqueEps = cp.vl["EPS_STATUS"]["TORQUE_MOTOR"] ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD steer_state = cp.vl["EPS_STATUS"]["LKAS_STATE"] ret.steerFaultPermanent = steer_state == 4 or (steer_state == 0 and ret.vEgo > self.CP.minSteerSpeed) ret.genericToggle = bool(cp.vl["STEERING_LEVERS"]["HIGH_BEAM_FLASH"]) if self.CP.enableBsm: ret.leftBlindspot = cp.vl["BLIND_SPOT_WARNINGS"]["BLIND_SPOT_LEFT"] == 1 ret.rightBlindspot = cp.vl["BLIND_SPOT_WARNINGS"]["BLIND_SPOT_RIGHT"] == 1 self.lkas_counter = cp_cam.vl["LKAS_COMMAND"]["COUNTER"] self.lkas_car_model = cp_cam.vl["LKAS_HUD"]["CAR_MODEL"] self.lkas_status_ok = cp_cam.vl["LKAS_HEARTBIT"]["LKAS_STATUS_OK"] self.button_counter = cp.vl["WHEEL_BUTTONS"]["COUNTER"] return ret @staticmethod def get_can_parser(CP): signals = [ # sig_name, sig_address ("PRNDL", "GEAR"), ("DOOR_OPEN_FL", "BCM_1"), ("DOOR_OPEN_FR", "BCM_1"), ("DOOR_OPEN_RL", "BCM_1"), ("DOOR_OPEN_RR", "BCM_1"), ("Brake_Pedal_State", "ESP_1"), ("Accelerator_Position", "ECM_5"), ("SPEED_LEFT", "SPEED_1"), ("SPEED_RIGHT", "SPEED_1"), ("WHEEL_SPEED_FL", "WHEEL_SPEEDS"), ("WHEEL_SPEED_RR", "WHEEL_SPEEDS"), ("WHEEL_SPEED_RL", "WHEEL_SPEEDS"), ("WHEEL_SPEED_FR", "WHEEL_SPEEDS"), ("STEER_ANGLE", "STEERING"), ("STEERING_RATE", "STEERING"), ("TURN_SIGNALS", "STEERING_LEVERS"), ("ACC_AVAILABLE", "DAS_3"), ("ACC_ACTIVE", "DAS_3"), ("ACC_FAULTED", "DAS_3"), ("HIGH_BEAM_FLASH", "STEERING_LEVERS"), ("ACC_SPEED_CONFIG_KPH", "DASHBOARD"), ("CRUISE_STATE", "DASHBOARD"), ("TORQUE_DRIVER", "EPS_STATUS"), ("TORQUE_MOTOR", "EPS_STATUS"), ("LKAS_STATE", "EPS_STATUS"), ("COUNTER", "EPS_STATUS",), ("TRACTION_OFF", "TRACTION_BUTTON"), ("SEATBELT_DRIVER_UNLATCHED", "SEATBELT_STATUS"), ("COUNTER", "WHEEL_BUTTONS"), ] checks = [ # sig_address, frequency ("ESP_1", 50), ("EPS_STATUS", 100), ("SPEED_1", 100), ("WHEEL_SPEEDS", 50), ("STEERING", 100), ("DAS_3", 50), ("GEAR", 50), ("ECM_5", 50), ("WHEEL_BUTTONS", 50), ("DASHBOARD", 15), ("STEERING_LEVERS", 10), ("SEATBELT_STATUS", 2), ("BCM_1", 1), ("TRACTION_BUTTON", 1), ] if CP.enableBsm: signals += [ ("BLIND_SPOT_RIGHT", "BLIND_SPOT_WARNINGS"), ("BLIND_SPOT_LEFT", "BLIND_SPOT_WARNINGS"), ] checks.append(("BLIND_SPOT_WARNINGS", 2)) return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 0) @staticmethod def get_cam_can_parser(CP):
signals = [ # sig_name, sig_address ("COUNTER", "LKAS_COMMAND"), ("CAR_MODEL", "LKAS_HUD"), ("LKAS_STATUS_OK", "LKAS_HEARTBIT") ] checks = [ ("LKAS_COMMAND", 100), ("LKAS_HEARTBIT", 10), ("LKAS_HUD", 4), ] return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
code.py
from numpy import pi
def do_something(): π = pi print(π)# this will make it much easier in future problems to see that something is actually happening
trace_listener.rs
use std::rc::Weak; use parser::BaseParser; pub struct TraceListener { parser: Box<BaseParser>, } impl TraceListener { fn
(parser: Box<BaseParser>) -> * TraceListener { unimplemented!() } fn visit_error_node(&self, _: ErrorNode) { unimplemented!() } fn enter_every_rule(&self, ctx: ParserRuleContext) { unimplemented!() } fn visit_terminal(&self, node: TerminalNode) { unimplemented!() } fn exit_every_rule(&self, ctx: ParserRuleContext) { unimplemented!() } }
new_trace_listener
crypto.go
package calendar import ( "crypto/rand" ) // randomBytes returns securely-generated random bytes. It will return an error // if the system's secure random number generator fails to function correctly,
b := make([]byte, n) _, err := rand.Read(b) if err != nil { return nil, err } return b, nil } // randomStringURLSafe returns a securely-generated URL-safe random string. // It will return an error if the system's secure random number generator // fails to function correctly, in which case the caller should not continue. func randomStringURLSafe(n int) (string, error) { bytes, err := randomBytes(n) if err != nil { return "", err } // noinspection ALL const symbols = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-" for i, b := range bytes { bytes[i] = symbols[b%byte(len(symbols))] } return string(bytes), nil }
// in which case the caller should not continue. func randomBytes(n int) ([]byte, error) {
DesktopAccessDisabledRounded.js
"use strict"; var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault"); Object.defineProperty(exports, "__esModule", { value: true }); exports.default = void 0; var _createSvgIcon = _interopRequireDefault(require("./utils/createSvgIcon")); var _jsxRuntime = require("react/jsx-runtime");
var _default = (0, _createSvgIcon.default)( /*#__PURE__*/(0, _jsxRuntime.jsx)("path", { d: "M.31 2c-.39.39-.39 1.02 0 1.41l.69.68V16c0 1.1.9 2 2 2h7v2H9c-.55 0-1 .45-1 1s.45 1 1 1h6c.55 0 1-.45 1-1s-.45-1-1-1h-1v-2h.9l5.29 5.29c.39.39 1.02.39 1.41 0 .39-.39.39-1.02 0-1.41L1.72 2A.9959.9959 0 0 0 .31 2zm2.68 13V6.09L12.9 16H3.99c-.55 0-1-.45-1-1zM4.55 2l2 2H20c.55 0 1 .45 1 1v10c0 .55-.45 1-1 1h-1.45l2 2h.44c1.1 0 2-.9 2-2V4c0-1.1-.9-2-2-2H4.55z" }), 'DesktopAccessDisabledRounded'); exports.default = _default;
5416.js
/** * @param {string} sentence * @param {string} searchWord * @return {number} */ var isPrefixOfWord = function(sentence, searchWord) { const reg = new RegExp(`^${searchWord}`) const arr = sentence.split(' ') for (let i = 0; i < arr.length; ++i) { if (reg.test(arr[i])) { return i
} } return -1 };
lib.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ fat_type::{FatStructType, FatType}, resolver::Resolver, }; use anyhow::{anyhow, bail, Result}; use diem_state_view::StateView; use diem_types::{ access_path::AccessPath, account_address::AccountAddress, account_state::AccountState, contract_event::ContractEvent, }; use move_binary_format::{ errors::{Location, PartialVMError, PartialVMResult, VMResult}, file_format::{Ability, AbilitySet}, }; use move_core_types::{ identifier::Identifier, language_storage::{ModuleId, StructTag, TypeTag}, value::{MoveStruct, MoveValue}, }; use move_vm_runtime::data_cache::MoveStorage; use std::{ collections::btree_map::BTreeMap, convert::TryInto, fmt::{Display, Formatter}, }; mod fat_type; mod module_cache; mod resolver; #[derive(Debug)] pub struct AnnotatedAccountStateBlob(BTreeMap<StructTag, AnnotatedMoveStruct>); #[derive(Clone, Debug)] pub struct AnnotatedMoveStruct { pub abilities: AbilitySet, pub type_: StructTag, pub value: Vec<(Identifier, AnnotatedMoveValue)>, } /// AnnotatedMoveValue is a fully expanded version of on chain Move data. This should only be used /// for debugging/client purpose right now and just for a better visualization of on chain data. In /// the long run, we would like to transform this struct to a Json value so that we can have a cross /// platform interpretation of the on chain data. #[derive(Clone, Debug)] pub enum AnnotatedMoveValue { U8(u8), U64(u64), U128(u128), Bool(bool), Address(AccountAddress), Vector(TypeTag, Vec<AnnotatedMoveValue>), Bytes(Vec<u8>), Struct(AnnotatedMoveStruct), } impl AnnotatedMoveValue { pub fn get_type(&self) -> TypeTag { use AnnotatedMoveValue::*; match self { U8(_) => TypeTag::U8, U64(_) => TypeTag::U64, U128(_) => TypeTag::U128, Bool(_) => TypeTag::Bool, Address(_) => TypeTag::Address, Vector(t, _) => t.clone(), Bytes(_) => TypeTag::Vector(Box::new(TypeTag::U8)), Struct(s) => TypeTag::Struct(s.type_.clone()), } } } pub struct MoveValueAnnotator<'a> { cache: Resolver<'a>, _data_view: &'a dyn MoveStorage, } impl<'a> MoveValueAnnotator<'a> { pub fn new(view: &'a dyn MoveStorage) -> Self { Self { cache: Resolver::new(view, true), _data_view: view, } } pub fn new_no_stdlib(view: &'a dyn MoveStorage) -> Self { Self { cache: Resolver::new(view, false), _data_view: view, } } pub fn get_resource_bytes(&self, addr: &AccountAddress, tag: &StructTag) -> Option<Vec<u8>> { self.cache .state .get_resource(addr, tag) .map_err(|e: PartialVMError| e.finish(Location::Undefined).into_vm_status()) .ok()? } pub fn view_access_path( &self, access_path: AccessPath, blob: &[u8], ) -> Result<AnnotatedMoveStruct> { match access_path.get_struct_tag() { Some(tag) => self.view_resource(&tag, blob), None => bail!("Bad resource access path"), } } pub fn view_resource(&self, tag: &StructTag, blob: &[u8]) -> Result<AnnotatedMoveStruct> { let ty = self.cache.resolve_struct(tag)?; let struct_def = (&ty) .try_into() .map_err(|e: PartialVMError| e.finish(Location::Undefined).into_vm_status())?; let move_struct = MoveStruct::simple_deserialize(blob, &struct_def)?; self.annotate_struct(&move_struct, &ty) } pub fn view_contract_event(&self, event: &ContractEvent) -> Result<AnnotatedMoveValue> { let ty = self.cache.resolve_type(event.type_tag())?; let move_ty = (&ty) .try_into() .map_err(|e: PartialVMError| e.finish(Location::Undefined).into_vm_status())?; let move_value = MoveValue::simple_deserialize(event.event_data(), &move_ty)?; self.annotate_value(&move_value, &ty) } pub fn view_account_state(&self, state: &AccountState) -> Result<AnnotatedAccountStateBlob> { let mut output = BTreeMap::new(); for (k, v) in state.iter() { let tag = match AccessPath::new(AccountAddress::random(), k.to_vec()).get_struct_tag() { Some(t) => t, None => { println!("Uncached AccessPath: {:?}", k); continue; } }; let ty = self.cache.resolve_struct(&tag)?; let struct_def = (&ty) .try_into() .map_err(|e: PartialVMError| e.finish(Location::Undefined).into_vm_status())?; let move_struct = MoveStruct::simple_deserialize(v.as_slice(), &struct_def)?; output.insert( ty.struct_tag() .map_err(|e| e.finish(Location::Undefined).into_vm_status()) .unwrap(), self.annotate_struct(&move_struct, &ty)?, ); } Ok(AnnotatedAccountStateBlob(output)) } fn annotate_struct( &self, move_struct: &MoveStruct, ty: &FatStructType, ) -> Result<AnnotatedMoveStruct> { let struct_tag = ty .struct_tag() .map_err(|e| e.finish(Location::Undefined).into_vm_status())?; let field_names = self.cache.get_field_names(ty)?; let mut annotated_fields = vec![]; for (ty, v) in ty.layout.iter().zip(move_struct.fields().iter()) { annotated_fields.push(self.annotate_value(v, ty)?); } Ok(AnnotatedMoveStruct { abilities: ty.abilities.0, type_: struct_tag, value: field_names .into_iter() .zip(annotated_fields.into_iter()) .collect(), }) } fn annotate_value(&self, value: &MoveValue, ty: &FatType) -> Result<AnnotatedMoveValue> { Ok(match (value, ty) { (MoveValue::Bool(b), FatType::Bool) => AnnotatedMoveValue::Bool(*b), (MoveValue::U8(i), FatType::U8) => AnnotatedMoveValue::U8(*i), (MoveValue::U64(i), FatType::U64) => AnnotatedMoveValue::U64(*i), (MoveValue::U128(i), FatType::U128) => AnnotatedMoveValue::U128(*i), (MoveValue::Address(a), FatType::Address) => AnnotatedMoveValue::Address(*a), (MoveValue::Vector(a), FatType::Vector(ty)) => match ty.as_ref() { FatType::U8 => AnnotatedMoveValue::Bytes( a.iter() .map(|v| match v { MoveValue::U8(i) => Ok(*i), _ => Err(anyhow!("unexpected value type")), }) .collect::<Result<_>>()?, ), _ => AnnotatedMoveValue::Vector( ty.type_tag().unwrap(), a.iter() .map(|v| self.annotate_value(v, ty.as_ref())) .collect::<Result<_>>()?, ), }, (MoveValue::Struct(s), FatType::Struct(ty)) => { AnnotatedMoveValue::Struct(self.annotate_struct(s, ty.as_ref())?) } _ => { return Err(anyhow!( "Cannot annotate value {:?} with type {:?}", value, ty )) } }) } } fn write_indent(f: &mut Formatter, indent: u64) -> std::fmt::Result { for _i in 0..indent { write!(f, " ")?; } Ok(()) } fn pretty_print_value( f: &mut Formatter, value: &AnnotatedMoveValue, indent: u64, ) -> std::fmt::Result { match value { AnnotatedMoveValue::Bool(b) => write!(f, "{}", b), AnnotatedMoveValue::U8(v) => write!(f, "{}u8", v), AnnotatedMoveValue::U64(v) => write!(f, "{}", v), AnnotatedMoveValue::U128(v) => write!(f, "{}u128", v), AnnotatedMoveValue::Address(a) => write!(f, "{}", a.short_str_lossless()), AnnotatedMoveValue::Vector(_, v) => { writeln!(f, "[")?; for value in v.iter() { write_indent(f, indent + 4)?; pretty_print_value(f, value, indent + 4)?; writeln!(f, ",")?; } write_indent(f, indent)?; write!(f, "]") } AnnotatedMoveValue::Bytes(v) => write!(f, "{}", hex::encode(&v)), AnnotatedMoveValue::Struct(s) => pretty_print_struct(f, s, indent), } } fn pretty_print_struct( f: &mut Formatter, value: &AnnotatedMoveStruct, indent: u64, ) -> std::fmt::Result {
write!(f, "{}: ", field_name)?; pretty_print_value(f, v, indent + 4)?; writeln!(f)?; } write_indent(f, indent)?; write!(f, "}}") } fn pretty_print_ability_modifiers(f: &mut Formatter, abilities: AbilitySet) -> std::fmt::Result { for ability in abilities { match ability { Ability::Copy => write!(f, "copy ")?, Ability::Drop => write!(f, "drop ")?, Ability::Store => write!(f, "store ")?, Ability::Key => write!(f, "key ")?, } } Ok(()) } impl Display for AnnotatedMoveValue { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { pretty_print_value(f, self, 0) } } impl Display for AnnotatedMoveStruct { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { pretty_print_struct(f, self, 0) } } impl Display for AnnotatedAccountStateBlob { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { writeln!(f, "{{")?; for v in self.0.values() { write!(f, "{}", v)?; writeln!(f, ",")?; } writeln!(f, "}}") } } #[derive(Default)] pub struct NullStateView(); impl StateView for NullStateView { fn get(&self, _access_path: &AccessPath) -> Result<Option<Vec<u8>>> { Err(anyhow!("No data")) } fn is_genesis(&self) -> bool { false } } impl MoveStorage for NullStateView { fn get_module(&self, _module_id: &ModuleId) -> VMResult<Option<Vec<u8>>> { Ok(None) } fn get_resource( &self, _address: &AccountAddress, _tag: &StructTag, ) -> PartialVMResult<Option<Vec<u8>>> { Ok(None) } }
pretty_print_ability_modifiers(f, value.abilities)?; writeln!(f, "{} {{", value.type_)?; for (field_name, v) in value.value.iter() { write_indent(f, indent + 4)?;
basic_class.py
class
(): def __init__(self, name): # 생성자(Constructor) self.name = name def say_hello(self): # 메서드(method) print('Hello, ' + self.name) presenter = Presenter('Chris') presenter.name = 'Christopher' presenter.say_hello()
Presenter
create_pdp_context_request.rs
use super::{ MessageTraits, MessageType, }; use super::information_elements::{InformationElementTraits, InformationElement}; pub struct Message { /* --------------------------------------------|---------------------------|------------------------- Information Element | Presence requirement | Reference --------------------------------------------|---------------------------|------------------------- IMSI | Conditional | 7.7.2 Routeing Area Identity (RAI) | Optional | 7.7.3 Recovery | Optional | 7.7.11 Selection mode | Conditional | 7.7.12 Tunnel Endpoint Identifier Data I | Mandatory | 7.7.13 Tunnel Endpoint Identifier Control Plane | Conditional | 7.7.14 NSAPI | Mandatory | 7.7.17 Linked NSAPI | Conditional | 7.7.17 Charging Characteristics | Conditional | 7.7.23 Trace Reference | Optional | 7.7.24 Trace Type | Optional | 7.7.25 End User Address | Conditional | 7.7.27 Access Point Name | Conditional | 7.7.30 Protocol Configuration Options | Optional | 7.7.31 SGSN Address for signalling | Mandatory | GSN Address 7.7.32 SGSN Address for user traffic | Mandatory | GSN Address 7.7.32 MSISDN | Conditional | 7.7.33 Quality of Service Profile | Mandatory | 7.7.34 TFT | Conditional | 7.7.36 Trigger Id | Optional | 7.7.41 OMC Identity | Optional | 7.7.42 Common Flags | Optional | 7.7.48 APN Restriction | Optional | 7.7.49 RAT Type | Optional | 7.7.50 User Location Information | Optional | 7.7.51 MS Time Zone | Optional | 7.7.52 IMEI(SV) | Conditional | 7.7.53 CAMEL Charging Information Container | Optional | 7.7.54 Additional Trace Info | Optional | 7.7.62 Correlation-ID | Optional | 7.7.82 Evolved Allocation/Retention Priority I | Optional | 7.7.91 Extended Common Flags | Optional | 7.7.93 User CSG Information | Optional | 7.7.94 APN-AMBR | Optional | 7.7.98 Signalling Priority Indication | Optional | 7.7.103 CN Operator Selection Entity | Optional | 7.7.116 Mapped UE Usage Type | Optional | 7.7.123 UP Function Selection Indication Flags | Optional | 7.7.124 Private Extension | Optional | 7.7.46 --------------------------------------------|---------------------------|------------------------- */ pub information_elements: Vec<InformationElement> } impl Message { pub fn new() -> Self { Message { information_elements: Vec::new() } } pub fn parse(_buffer: &[u8]) -> Option<(Self, usize)> { None } } impl MessageTraits for Message { fn push_ie(&mut self, ie: InformationElement) { // TODO: Check here that the ie we are adding is allowed for this message self.information_elements.push(ie); } fn pop_ie(&mut self) -> Option<InformationElement> { self.information_elements.pop() } fn message_type(&self) -> MessageType { MessageType::CreatePDPContextRequest } fn length(&self) -> u16 { let mut length = 0; for ie in self.information_elements.iter() { length = length + ie.length(); } length } fn generate(&self, buffer: &mut[u8]) -> usize { // NOTE: The list should be sorted by IE Type. We assume here they have been added in the correct order let mut pos = 0; for ie in self.information_elements.iter() { let ie_size = ie.generate(&mut buffer[pos..]); pos = pos + ie_size; } pos } } #[cfg(test)] mod tests { use super::*; use crate::MTU; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use crate::gtp_v1::packet::messages::{ MessageTraits }; use crate::gtp_v1::packet::messages::information_elements; use crate::gtp_v1::packet::messages::information_elements::InformationElementType; #[test] fn test_generate() { let mut buffer = [0; MTU]; let mut m = Message::new(); m.information_elements.push( InformationElement::TeidDataI(information_elements::teid_data_i::InformationElement::new(0x12345678)) ); let nsapi = information_elements::nsapi::InformationElement::new(0xF); if let Ok(nsapi) = nsapi
m.information_elements.push( InformationElement::GsnAddress( information_elements::gsn_address::InformationElement::new( IpAddr::V4( Ipv4Addr::new(192,168,0,1) ) ) ) ); m.information_elements.push( InformationElement::GsnAddress( information_elements::gsn_address::InformationElement::new( IpAddr::V6( Ipv6Addr::new(0xFADE, 0xDEAD, 0xBEEF, 0xCAFE, 0xFEED, 0xDEAF, 0xBEAD, 0xFACE) ) ) ) ); m.information_elements.push( InformationElement::QoSProfile( information_elements::qos_profile::InformationElement::new( 8, information_elements::qos_profile::DelayClass::BestEffort, information_elements::qos_profile::ReliabilityClass::UnAckGTPUnAckLLCUnAckRLCUnProtectedData, information_elements::qos_profile::PeakThroughput::UpTo1000OctetsPerSecond, information_elements::qos_profile::PrecedenceClass::NormalPriority, information_elements::qos_profile::MeanThroughput::BestEffort, ) ) ); let pos = m.generate(&mut buffer); let expected = [ InformationElementType::TeidDataI as u8, 0x12, 0x34, 0x56, 0x78, InformationElementType::Nsapi as u8, 0xF, InformationElementType::GsnAddress as u8, 0, 4, 192, 168, 0, 1, InformationElementType::GsnAddress as u8, 0, 16, 0xFA, 0xDE, 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xFE, 0xED, 0xDE, 0xAF, 0xBE, 0xAD, 0xFA, 0xCE, InformationElementType::QoSProfile as u8, 0, 4, 8, 0b0010_0101, 0b0001_0010, 0x1F, ]; for i in 0..pos { if buffer[i] != expected[i] { println!("{} (actual) != {} (expected) at byte {}", buffer[i], expected[i], i); assert!(false); } } } #[test] fn test_length() { let mut m = Message::new(); m.information_elements.push( InformationElement::TeidDataI( information_elements::teid_data_i::InformationElement::new(0x12345678) ) ); let nsapi = information_elements::nsapi::InformationElement::new(0xF); if let Ok(nsapi) = nsapi { m.information_elements.push( InformationElement::Nsapi(nsapi) ); } m.information_elements.push( InformationElement::GsnAddress( information_elements::gsn_address::InformationElement::new( IpAddr::V4( Ipv4Addr::new(192,168,0,1) ) ) ) ); m.information_elements.push( InformationElement::GsnAddress( information_elements::gsn_address::InformationElement::new( IpAddr::V6( Ipv6Addr::new(0xFADE, 0xDEAD, 0xBEEF, 0xCAFE, 0xFEED, 0xDEAF, 0xBEAD, 0xFACE) ) ) ) ); m.information_elements.push( InformationElement::QoSProfile( information_elements::qos_profile::InformationElement::new( 8, information_elements::qos_profile::DelayClass::BestEffort, information_elements::qos_profile::ReliabilityClass::UnAckGTPUnAckLLCUnAckRLCUnProtectedData, information_elements::qos_profile::PeakThroughput::UpTo1000OctetsPerSecond, information_elements::qos_profile::PrecedenceClass::NormalPriority, information_elements::qos_profile::MeanThroughput::BestEffort, ) ) ); assert_eq!(m.length(), 40); } #[test] fn test_message_type() { let m = Message::new(); assert_eq!(m.message_type() as u8, MessageType::CreatePDPContextRequest as u8) } #[test] fn test_message_parse() { assert_eq!(1, 1) } }
{ m.information_elements.push( InformationElement::Nsapi(nsapi) ); }
position.rs
//! 1-based position. mod sequence_index; pub use self::sequence_index::SequenceIndex; use std::{ fmt, num::{self, NonZeroUsize}, str::FromStr, }; /// A 1-based position. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Position(NonZeroUsize); impl Position { /// The minimum value of a position. pub const MIN: Self = match Self::new(1) { Some(position) => position, None => panic!("position cannot be non-zero"), }; /// Creates a position if the given value is not zero. /// /// # Examples /// /// ``` /// use noodles_core::Position; /// assert!(Position::new(8).is_some()); /// assert!(Position::new(0).is_none()); /// ``` pub const fn new(n: usize) -> Option<Self> { if let Some(m) = NonZeroUsize::new(n) { Some(Self(m)) } else { None } } /// Adds an unsigned integer to a 1-based position. /// /// This returns `None` if the operation overflowed. /// /// # Examples /// /// ``` /// use noodles_core::Position; /// let position = Position::try_from(8)?; /// assert_eq!(position.checked_add(5), Position::new(13)); /// # Ok::<_, noodles_core::position::TryFromIntError>(()) /// ``` pub fn checked_add(self, other: usize) -> Option<Self> { usize::from(self).checked_add(other).and_then(Self::new) } } impl fmt::Display for Position { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } /// An error returned when a position fails to parse. pub type ParseError = num::ParseIntError; impl FromStr for Position { type Err = ParseError; fn
(s: &str) -> Result<Self, Self::Err> { s.parse().map(Self) } } /// An error returned when a raw position fails to convert. pub type TryFromIntError = num::TryFromIntError; impl TryFrom<usize> for Position { type Error = TryFromIntError; fn try_from(n: usize) -> Result<Self, Self::Error> { NonZeroUsize::try_from(n).map(Position) } } impl From<Position> for usize { fn from(position: Position) -> Self { position.0.get() } }
from_str
index.js
import styled from '@emotion/styled'; export default styled.main` font-family: ${({ theme }) => theme.typography.fontFamily}; margin-top: 10rem; padding: 40px 3.375rem; transition: padding 0.4s ease; width: 100%; will-change: padding; @media ${({ theme }) => theme.mediaQueries.atLeastTablet} { margin-top: 7rem; padding: 0; } @media ${({ theme }) => theme.mediaQueries.phone} { margin-top: 88px; }
`;
configmaps.go
package elasticsearch import ( "bytes" "context" "crypto/sha256" "html/template" "io" "runtime" "strconv" "github.com/ViaQ/logerr/kverrors" "github.com/openshift/elasticsearch-operator/internal/manifests/configmap" v1 "k8s.io/api/core/v1" ) const ( esConfig = "elasticsearch.yml" log4jConfig = "log4j2.properties" indexSettingsConfig = "index_settings" ) // esYmlStruct is used to render esYmlTmpl to a proper elasticsearch.yml format
KibanaIndexMode string EsUnicastHost string NodeQuorum string RecoverExpectedNodes string SystemCallFilter string } type log4j2PropertiesStruct struct { RootLogger string LogLevel string SecurityLogLevel string } type indexSettingsStruct struct { PrimaryShards string ReplicaShards string } // CreateOrUpdateConfigMaps ensures the existence of ConfigMaps with Elasticsearch configuration func (er *ElasticsearchRequest) CreateOrUpdateConfigMaps() error { dpl := er.cluster kibanaIndexMode, err := kibanaIndexMode("") if err != nil { return err } dataNodeCount := int(GetDataCount(dpl)) masterNodeCount := int(getMasterCount(dpl)) logConfig := getLogConfig(dpl.GetAnnotations()) cm := newConfigMap( dpl.Name, dpl.Namespace, dpl.Labels, kibanaIndexMode, esUnicastHost(dpl.Name, dpl.Namespace), strconv.Itoa(masterNodeCount/2+1), strconv.Itoa(dataNodeCount), strconv.Itoa(CalculatePrimaryCount(dpl)), strconv.Itoa(CalculateReplicaCount(dpl)), strconv.FormatBool(runtime.GOARCH == "amd64"), logConfig, ) dpl.AddOwnerRefTo(cm) updated, err := configmap.CreateOrUpdate(context.TODO(), er.client, cm, configMapContentEqual, configmap.MutateDataOnly) if err != nil { return kverrors.Wrap(err, "failed to create or update elasticsearch configmap", "cluster", er.cluster.Name, "namespace", er.cluster.Namespace, ) } if updated { // Cluster settings has changed, make sure it doesnt go unnoticed if err := updateConditionWithRetry(dpl, v1.ConditionTrue, updateUpdatingSettingsCondition, er.client); err != nil { return err } } else { if err := updateConditionWithRetry(dpl, v1.ConditionFalse, updateUpdatingSettingsCondition, er.client); err != nil { return err } } return nil } func renderData(kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedNodes, primaryShardsCount, replicaShardsCount, systemCallFilter string, logConfig LogConfig) (map[string]string, error) { data := map[string]string{} buf := &bytes.Buffer{} if err := renderEsYml(buf, kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedNodes, systemCallFilter); err != nil { return data, err } data[esConfig] = buf.String() buf = &bytes.Buffer{} if err := renderLog4j2Properties(buf, logConfig); err != nil { return data, err } data[log4jConfig] = buf.String() buf = &bytes.Buffer{} if err := renderIndexSettings(buf, primaryShardsCount, replicaShardsCount); err != nil { return data, err } data[indexSettingsConfig] = buf.String() return data, nil } // newConfigMap returns a v1.ConfigMap object func newConfigMap(configMapName, namespace string, labels map[string]string, kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedNodes, primaryShardsCount, replicaShardsCount, systemCallFilter string, logConfig LogConfig) *v1.ConfigMap { data, err := renderData(kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedNodes, primaryShardsCount, replicaShardsCount, systemCallFilter, logConfig) if err != nil { return nil } return configmap.New(configMapName, namespace, labels, data) } func configMapContentEqual(old, new *v1.ConfigMap) bool { oldEsConfigSum := sha256.Sum256([]byte(old.Data[esConfig])) newEsConfigSum := sha256.Sum256([]byte(new.Data[esConfig])) if oldEsConfigSum != newEsConfigSum { return false } oldLog4jConfig := sha256.Sum256([]byte(old.Data[log4jConfig])) newLog4jConfig := sha256.Sum256([]byte(new.Data[log4jConfig])) if oldLog4jConfig != newLog4jConfig { return false } oldIndexSettingsConfig := sha256.Sum256([]byte(old.Data[indexSettingsConfig])) newIndexSettingsConfig := sha256.Sum256([]byte(new.Data[indexSettingsConfig])) if oldIndexSettingsConfig != newIndexSettingsConfig { return false } return true } func renderEsYml(w io.Writer, kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedNodes, systemCallFilter string) error { t := template.New("elasticsearch.yml") config := esYmlTmpl t, err := t.Parse(config) if err != nil { return err } esy := esYmlStruct{ KibanaIndexMode: kibanaIndexMode, EsUnicastHost: esUnicastHost, NodeQuorum: nodeQuorum, RecoverExpectedNodes: recoverExpectedNodes, SystemCallFilter: systemCallFilter, } return t.Execute(w, esy) } func renderLog4j2Properties(w io.Writer, logConfig LogConfig) error { t := template.New("log4j2.properties") t, err := t.Parse(log4j2PropertiesTmpl) if err != nil { return err } log4jProp := log4j2PropertiesStruct{ RootLogger: logConfig.ServerAppender, LogLevel: logConfig.ServerLoglevel, SecurityLogLevel: logConfig.LogLevel, } return t.Execute(w, log4jProp) } func renderIndexSettings(w io.Writer, primaryShardsCount, replicaShardsCount string) error { t := template.New("index_settings") t, err := t.Parse(indexSettingsTmpl) if err != nil { return err } indexSettings := indexSettingsStruct{ PrimaryShards: primaryShardsCount, ReplicaShards: replicaShardsCount, } return t.Execute(w, indexSettings) }
type esYmlStruct struct {
tcpstream_keyboard.rs
use std::{thread}; use std::result::Result::Ok; use log::*; use std::net::{TcpListener, TcpStream, Shutdown}; use std::io::Read; use std::io::Write; use std::sync::mpsc::{channel, Sender, Receiver}; pub fn bind_keyboard(port: u32) -> Receiver<u8> { let (tx, rx) = channel(); thread::spawn(move|| { let bind_string = format!("0.0.0.0:{}", port); info!("Binding to {}", bind_string); let listener = TcpListener::bind(bind_string).unwrap(); listener.set_nonblocking(true).expect("Cannot set non-blocking"); info!("Creating communication channel"); for stream in listener.incoming() { match stream { Ok(stream) => { let tx_owned = tx.clone(); thread::spawn(move|| { info!("Keyabord connection from client succeeded"); handle_client(stream, tx_owned) }); } Err(_e) => { } } } }); rx } fn handle_client(mut stream: TcpStream, tx: Sender<u8>)
{ let mut data = [0 as u8; 256]; // using 50 byte buffer while match stream.read(&mut data) { Ok(size) => { // echo everything! stream.write(&data[0..size]).unwrap(); for n in 0..size { info!("Sending to queue: {}", data[n]); tx.send(data[n]).unwrap(); } true }, Err(_) => { error!("An error occurred, terminating connection with {}", stream.peer_addr().unwrap()); stream.shutdown(Shutdown::Both).unwrap(); false } } {} }
discovery.py
import hubspot.crm.extensions.cards as api_client from ....discovery_base import DiscoveryBase class Discovery(DiscoveryBase):
@property def cards_api(self) -> api_client.CardsApi: return self._configure_api_client(api_client, "CardsApi")
transferQueueStandbyProcessor.go
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package history import ( "context"
enumsspb "go.temporal.io/server/api/enums/v1" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" "go.temporal.io/server/client" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/quotas" "go.temporal.io/server/common/xdc" "go.temporal.io/server/service/history/queues" "go.temporal.io/server/service/history/shard" "go.temporal.io/server/service/history/tasks" "go.temporal.io/server/service/history/workflow" "go.temporal.io/server/service/worker/archiver" ) type ( transferQueueStandbyProcessorImpl struct { *transferQueueProcessorBase *queueProcessorBase queueAckMgr // this is the scheduler owned by this standby queue processor ownedScheduler queues.Scheduler } ) func newTransferQueueStandbyProcessor( clusterName string, shard shard.Context, scheduler queues.Scheduler, workflowCache workflow.Cache, archivalClient archiver.Client, taskAllocator taskAllocator, clientBean client.Bean, rateLimiter quotas.RateLimiter, logger log.Logger, metricProvider metrics.MetricProvider, matchingClient matchingservice.MatchingServiceClient, ) *transferQueueStandbyProcessorImpl { config := shard.GetConfig() options := &QueueProcessorOptions{ BatchSize: config.TransferTaskBatchSize, MaxPollInterval: config.TransferProcessorMaxPollInterval, MaxPollIntervalJitterCoefficient: config.TransferProcessorMaxPollIntervalJitterCoefficient, UpdateAckInterval: config.TransferProcessorUpdateAckInterval, UpdateAckIntervalJitterCoefficient: config.TransferProcessorUpdateAckIntervalJitterCoefficient, RescheduleInterval: config.TransferProcessorRescheduleInterval, RescheduleIntervalJitterCoefficient: config.TransferProcessorRescheduleIntervalJitterCoefficient, MaxReschdulerSize: config.TransferProcessorMaxReschedulerSize, PollBackoffInterval: config.TransferProcessorPollBackoffInterval, MetricScope: metrics.TransferStandbyQueueProcessorScope, } logger = log.With(logger, tag.ClusterName(clusterName)) transferTaskFilter := func(task tasks.Task) bool { switch task.GetType() { case enumsspb.TASK_TYPE_TRANSFER_RESET_WORKFLOW: // no reset needed for standby return false case enumsspb.TASK_TYPE_TRANSFER_CLOSE_EXECUTION, enumsspb.TASK_TYPE_TRANSFER_DELETE_EXECUTION: return true default: return taskAllocator.verifyStandbyTask(clusterName, namespace.ID(task.GetNamespaceID()), task) } } maxReadAckLevel := func() int64 { return shard.GetQueueMaxReadLevel(tasks.CategoryTransfer, clusterName).TaskID } updateClusterAckLevel := func(ackLevel int64) error { return shard.UpdateQueueClusterAckLevel(tasks.CategoryTransfer, clusterName, tasks.NewImmediateKey(ackLevel)) } transferQueueShutdown := func() error { return nil } processor := &transferQueueStandbyProcessorImpl{ transferQueueProcessorBase: newTransferQueueProcessorBase( shard, options, maxReadAckLevel, updateClusterAckLevel, transferQueueShutdown, logger, ), } taskExecutor := newTransferQueueStandbyTaskExecutor( shard, workflowCache, archivalClient, xdc.NewNDCHistoryResender( shard.GetNamespaceRegistry(), clientBean, func(ctx context.Context, request *historyservice.ReplicateEventsV2Request) error { engine, err := shard.GetEngine() if err != nil { return err } return engine.ReplicateEventsV2(ctx, request) }, shard.GetPayloadSerializer(), config.StandbyTaskReReplicationContextTimeout, logger, ), logger, metricProvider, clusterName, matchingClient, ) if scheduler == nil { scheduler = newTransferTaskScheduler(shard, logger) processor.ownedScheduler = scheduler } rescheduler := queues.NewRescheduler( scheduler, shard.GetTimeSource(), metricProvider.WithTags(metrics.OperationTag(queues.OperationTransferStandbyQueueProcessor)), ) queueAckMgr := newQueueAckMgr( shard, options, processor, shard.GetQueueClusterAckLevel(tasks.CategoryTransfer, clusterName).TaskID, logger, func(t tasks.Task) queues.Executable { return queues.NewExecutable( t, transferTaskFilter, taskExecutor, scheduler, rescheduler, shard.GetTimeSource(), logger, shard.GetConfig().TransferTaskMaxRetryCount, queues.QueueTypeStandbyTransfer, shard.GetConfig().NamespaceCacheRefreshInterval, ) }, ) queueProcessorBase := newQueueProcessorBase( clusterName, shard, options, processor, queueAckMgr, workflowCache, scheduler, rescheduler, rateLimiter, logger, shard.GetMetricsClient().Scope(metrics.TransferStandbyQueueProcessorScope), ) processor.queueAckMgr = queueAckMgr processor.queueProcessorBase = queueProcessorBase return processor } func (t *transferQueueStandbyProcessorImpl) notifyNewTask() { t.queueProcessorBase.notifyNewTask() } func (t *transferQueueStandbyProcessorImpl) Start() { if t.ownedScheduler != nil { t.ownedScheduler.Start() } t.queueProcessorBase.Start() } func (t *transferQueueStandbyProcessorImpl) Stop() { t.queueProcessorBase.Stop() if t.ownedScheduler != nil { t.ownedScheduler.Stop() } }
trait_example.rs
struct Sheep { naked: bool, name: &'static str, } trait Animal { // Static method signature; `Self` refers to the implementor type fn new(name: &'static str) -> Self; // Instance method signatures; these will return a string. fn name(&self) -> &'static str; fn noise(&self) -> &'static str; // Traits can provide default method definitions fn talk(&self) { println!("{} says {}", self.name(), self.noise()) } } impl Sheep { fn is_naked(&self) -> bool { self.naked } fn shear(&mut self) { if self.is_naked() { // Implementor methods can use the implementor's trait methods. println!("{} is already naked", self.name()) } else { println!("{} gets a haircut!", self.name); self.naked = true; } } } // Implement the `Animal` trait for `Sheep`. impl Animal for Sheep { // `Self` is the implementor type: `Sheep` fn new(name: &'static str) -> Self { Sheep { name, naked: false } } fn
(&self) -> &'static str { self.name } fn noise(&self) -> &'static str { if self.is_naked() { "baaaaah?" } else { "baaaaah!" } } // Default trait methods can be overridden fn talk(&self) { // For example, we can add some quiet contemplation println!("{} pauses briefly... {}", self.name, self.noise()); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_case1() { // Type annotation is necessary in this case. let mut dolly: Sheep = Animal::new("Dolly"); dolly.talk(); dolly.shear(); dolly.talk(); } }
name
App.js
import React from 'react'; import {Questions} from './question/Questions' function
() { return ( <div className="App"> <Questions/> </div> ); } export default App;
App
provision_test.go
package sparta import ( "context" "testing" gocf "github.com/mweagle/go-cloudformation" "github.com/pkg/errors" "github.com/rs/zerolog" ) type cloudFormationProvisionTestResource struct { gocf.CloudFormationCustomResource ServiceToken string TestKey interface{} } func customResourceTestProvider(resourceType string) gocf.ResourceProperties { switch resourceType { case "Custom::ProvisionTestEmpty": { return &cloudFormationProvisionTestResource{} } default: return nil } } func init() { gocf.RegisterCustomResourceProvider(customResourceTestProvider) } func TestProvision(t *testing.T) { testProvision(t, testLambdaData(), nil) } func templateDecorator(ctx context.Context, serviceName string, lambdaResourceName string, lambdaResource gocf.LambdaFunction, resourceMetadata map[string]interface{}, lambdaFunctionCode *gocf.LambdaFunctionCode, buildID string, cfTemplate *gocf.Template, logger *zerolog.Logger) (context.Context, error)
func TestDecorateProvision(t *testing.T) { lambdas := testLambdaData() lambdas[0].Decorator = templateDecorator testProvision(t, lambdas, nil) }
{ // Add an empty resource newResource, err := newCloudFormationResource("Custom::ProvisionTestEmpty", logger) if nil != err { return ctx, errors.Wrapf(err, "Failed to create test resource") } customResource := newResource.(*cloudFormationProvisionTestResource) customResource.ServiceToken = "arn:aws:sns:us-east-1:84969EXAMPLE:CRTest" customResource.TestKey = "Hello World" cfTemplate.AddResource("ProvisionTestResource", customResource) // Add an output cfTemplate.Outputs["OutputDecorationTest"] = &gocf.Output{ Description: "Information about the value", Value: gocf.String("My key"), } return ctx, nil }
gen_plots.py
# Copyright 2020 Adap GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generate plots for Fashion-MNIST results.""" from typing import List, Tuple import numpy as np from flwr_experimental.baseline.plot import bar_chart, line_chart RESULTS = { "fedavg-t10": [ (0, 0.03759999945759773), (1, 0.03759999945759773), (2, 0.03759999945759773), (3, 0.03759999945759773), (4, 0.03759999945759773), (5, 0.03759999945759773), (6, 0.03759999945759773), (7, 0.03759999945759773), (8, 0.03759999945759773), (9, 0.03759999945759773), (10, 0.03759999945759773), (11, 0.03759999945759773), (12, 0.03759999945759773), (13, 0.03759999945759773), (14, 0.03759999945759773), (15, 0.03759999945759773), (16, 0.03759999945759773), (17, 0.03759999945759773), (18, 0.03759999945759773), (19, 0.03759999945759773), (20, 0.03759999945759773), ], "fedavg-t12": [ (0, 0.03759999945759773), (1, 0.03759999945759773), (2, 0.03759999945759773), (3, 0.03759999945759773), (4, 0.03759999945759773), (5, 0.03759999945759773), (6, 0.03759999945759773), (7, 0.03759999945759773), (8, 0.03759999945759773), (9, 0.03759999945759773), (10, 0.03759999945759773), (11, 0.03759999945759773), (12, 0.03759999945759773), (13, 0.03759999945759773), (14, 0.03759999945759773), (15, 0.03759999945759773), (16, 0.03759999945759773), (17, 0.03759999945759773), (18, 0.03759999945759773), (19, 0.03759999945759773), (20, 0.03759999945759773), ], "fedavg-t14": [ (0, 0.03759999945759773), (1, 0.03759999945759773), (2, 0.6743999719619751), (3, 0.6802999973297119), (4, 0.6802999973297119), (5, 0.6802999973297119), (6, 0.6802999973297119), (7, 0.7853999733924866), (8, 0.7853999733924866), (9, 0.7876999974250793), (10, 0.7642999887466431), (11, 0.8054999709129333), (12, 0.8181999921798706), (13, 0.8108999729156494), (14, 0.7907000184059143), (15, 0.763700008392334), (16, 0.8091999888420105), (17, 0.8296999931335449), (18, 0.8123999834060669), (19, 0.8123999834060669), (20, 0.8101999759674072), ], "fedavg-t16": [ (0, 0.03759999945759773), (1, 0.7197999954223633), (2, 0.7720999717712402), (3, 0.7900999784469604), (4, 0.7811999917030334), (5, 0.7724000215530396), (6, 0.8023999929428101), (7, 0.8043000102043152), (8, 0.8230999708175659), (9, 0.8327999711036682), (10, 0.8299000263214111), (11, 0.8402000069618225), (12, 0.853600025177002), (13, 0.8370000123977661), (14, 0.83160001039505), (15, 0.8424000144004822), (16, 0.830299973487854), (17, 0.8476999998092651), (18, 0.8632000088691711), (19, 0.8636999726295471), (20, 0.8657000064849854), ], "fedfs-t10": [ (0, 0.03759999945759773), (1, 0.7343000173568726), (2, 0.7664999961853027), (3, 0.7900000214576721), (4, 0.805899977684021), (5, 0.8237000107765198), (6, 0.8406999707221985), (7, 0.8263000249862671), (8, 0.8442999720573425), (9, 0.8564000129699707), (10, 0.8651999831199646), (11, 0.8375999927520752), (12, 0.8646000027656555), (13, 0.8669999837875366), (14, 0.861299991607666), (15, 0.8773999810218811), (16, 0.800599992275238), (17, 0.8676999807357788), (18, 0.8763999938964844), (19, 0.8695999979972839), (20, 0.873199999332428), ], "fedfs-t12": [ (0, 0.03759999945759773), (1, 0.7153000235557556), (2, 0.7835999727249146), (3, 0.8083999752998352), (4, 0.816100001335144), (5, 0.8215000033378601), (6, 0.8429999947547913), (7, 0.8464000225067139), (8, 0.8603000044822693), (9, 0.8482999801635742), (10, 0.8450000286102295), (11, 0.866599977016449), (12, 0.863099992275238), (13, 0.8709999918937683), (14, 0.873199999332428), (15, 0.8701000213623047), (16, 0.8600000143051147), (17, 0.8766999840736389), (18, 0.8697999715805054), (19, 0.8795999884605408), (20, 0.8830999732017517), ], "fedfs-t14": [ (0, 0.03759999945759773), (1, 0.7245000004768372), (2, 0.7972000241279602), (3, 0.8059999942779541), (4, 0.8252999782562256), (5, 0.8334000110626221), (6, 0.8560000061988831), (7, 0.8510000109672546), (8, 0.8650000095367432), (9, 0.8621000051498413), (10, 0.866599977016449), (11, 0.8615999817848206), (12, 0.8636999726295471), (13, 0.8740000128746033), (14, 0.866100013256073), (15, 0.867900013923645), (16, 0.83160001039505), (17, 0.8741999864578247), (18, 0.8736000061035156), (19, 0.8810999989509583), (20, 0.8762000203132629), ], "fedfs-t16": [ (0, 0.03759999945759773), (1, 0.7476999759674072), (2, 0.7982000112533569), (3, 0.8276000022888184), (4, 0.8256999850273132), (5, 0.8312000036239624), (6, 0.8536999821662903), (7, 0.8483999967575073), (8, 0.85589998960495), (9, 0.8687000274658203), (10, 0.8664000034332275), (11, 0.8586999773979187), (12, 0.8662999868392944), (13, 0.8754000067710876), (14, 0.878600001335144), (15, 0.8763999938964844), (16, 0.748199999332428), (17, 0.8806999921798706), (18, 0.8794000148773193), (19, 0.8813999891281128), (20, 0.8708000183105469), ], } RESULTS_WALL_CLOCK_TIME = { "fedavg-14": 218.49, "fedfs-14": 61.16, "fedavg-16": 153.56, "fedfs-16": 66.84, } def accuracy_t10() -> None: """Generate plots.""" lines = [ ("FedAvg, t=10", RESULTS["fedavg-t10"]), ("FedFS, t=10", RESULTS["fedfs-t10"]), ] plot(lines, "fmnist-progress-t10") def accuracy_t12() -> None: """Generate plots.""" lines = [ ("FedAvg, t=12", RESULTS["fedavg-t12"]), ("FedFS, t=12", RESULTS["fedfs-t12"]), ] plot(lines, "fmnist-progress-t12") def accuracy_t14() -> None: """Generate plots.""" lines = [ ("FedAvg, t=14", RESULTS["fedavg-t14"]), ("FedFS, t=14", RESULTS["fedfs-t14"]), ] plot(lines, "fmnist-progress-t14") def
() -> None: """Generate plots.""" lines = [ ("FedAvg, t=16", RESULTS["fedavg-t16"]), ("FedFS, t=16", RESULTS["fedfs-t16"]), ] plot(lines, "fmnist-progress-t16") def accuracy_fedavg_vs_fedfs() -> None: """Comparision of FedAvg vs FedFS.""" fedavg = [ RESULTS["fedavg-t10"][-1][1], RESULTS["fedavg-t12"][-1][1], RESULTS["fedavg-t14"][-1][1], RESULTS["fedavg-t16"][-1][1], ] fedfs = [ RESULTS["fedfs-t10"][-1][1], RESULTS["fedfs-t12"][-1][1], RESULTS["fedfs-t14"][-1][1], RESULTS["fedfs-t16"][-1][1], ] bar_chart( y_values=[ np.array([x * 100 for x in fedavg]), np.array([x * 100 for x in fedfs]), ], bar_labels=["FedAvg", "FedFS"], x_label="Timeout", x_tick_labels=["T=10", "T=12", "T=14", "T=16"], y_label="Accuracy", filename="fmnist-accuracy_fedavg_vs_fedfs", ) def wall_clock_time_fedavg_vs_fedfs() -> None: """Comparision of FedAvg vs FedFS.""" bar_chart( y_values=[ np.array( [ RESULTS_WALL_CLOCK_TIME["fedavg-14"], RESULTS_WALL_CLOCK_TIME["fedavg-16"], ] ), np.array( [ RESULTS_WALL_CLOCK_TIME["fedfs-t14"], RESULTS_WALL_CLOCK_TIME["fedfs-16"], ] ), ], bar_labels=["FedAvg", "FedFS"], x_label="Timeout", x_tick_labels=["T=14", "T=16"], y_label="Completion time", filename="fmnist-time_fedavg_vs_fedfs", ) def plot(lines: List[Tuple[str, List[Tuple[int, float]]]], filename: str) -> None: """Plot a single line chart.""" values = [np.array([x * 100 for _, x in val]) for _, val in lines] labels = [label for label, _ in lines] line_chart( values, labels, "Round", "Accuracy", filename=filename, y_floor=0, y_ceil=100, ) def main() -> None: """Call all plot functions.""" accuracy_t10() accuracy_t12() accuracy_t14() accuracy_t16() accuracy_fedavg_vs_fedfs() wall_clock_time_fedavg_vs_fedfs() if __name__ == "__main__": main()
accuracy_t16
beertasting_test.go
package beertasting import ( "appengine/aetest" "appengine/datastore" "encoding/json" "github.com/stretchr/testify/assert" httptest "github.com/stretchr/testify/http" "net/http" "testing" ) func Test_configKey(t *testing.T) { c, err := aetest.NewContext(nil) if err != nil { t.Fatal(err) } defer c.Close() assert.NotNil(t, configKey(c)) } type mockResponseWriter struct { httptest.TestResponseWriter t *testing.T } func (m *mockResponseWriter) WriteJson(v interface{}) error { b, err := m.EncodeJson(v) if err != nil { panic("NOT IMPLEMENTED") } _, err = m.Write(b) return err } func (m *mockResponseWriter) EncodeJson(v interface{}) ([]byte, error) { return json.Marshal(v) } func
(t *testing.T) { s := struct{ int }{4} var w = mockResponseWriter{t: t} writeJson(&w, &s) assert.Equal(t, "{}", w.Output) } func mustNewContext(t *testing.T) aetest.Context { c, err := aetest.NewContext(nil) if err != nil { t.Fatal(err) } return c } func mustPut(t *testing.T, c aetest.Context, v interface{}) { key := configKey(c) if _, err := datastore.Put(c, key, v); err != nil { t.Fatal(err) } } func Test_datastoreRestGet(t *testing.T) { c := mustNewContext(nil) defer c.Close() var cfg Config status, err := datastoreRestGet(c, configKey(c), &cfg) assert.Equal(t, http.StatusInternalServerError, status) assert.Error(t, err) cfg.ClientSecret = "foo" mustPut(t, c, &cfg) status, err = datastoreRestGet(c, configKey(c), &cfg) assert.Equal(t, http.StatusOK, status) assert.NoError(t, err) }
Test_writeJson
describe.py
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """resources describe command.""" from apitools.base.py import exceptions as apitools_exceptions from googlecloudsdk.api_lib.deployment_manager import dm_api_util from googlecloudsdk.api_lib.deployment_manager import dm_base from googlecloudsdk.calliope import base from googlecloudsdk.calliope import exceptions from googlecloudsdk.command_lib.deployment_manager import dm_v2_base class Describe(base.DescribeCommand): """Provide information about a resource. This command prints out all available details about a resource. """ detailed_help = { 'EXAMPLES': """\ To display information about a resource, run: $ {command} --deployment my-deployment my-resource-name """, } @staticmethod def Args(parser): """Args is called by calliope to gather arguments for this command. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed. """ parser.add_argument('resource', help='Resource name.') def Run(self, args):
"""Run 'resources describe'. Args: args: argparse.Namespace, The arguments that this command was invoked with. Returns: The requested resource. Raises: HttpException: An http error response was received while executing api request. """ try: return dm_v2_base.GetClient().resources.Get( dm_v2_base.GetMessages().DeploymentmanagerResourcesGetRequest( project=dm_base.GetProject(), deployment=args.deployment, resource=args.resource ) ) except apitools_exceptions.HttpError as error: raise exceptions.HttpException(error, dm_api_util.HTTP_ERROR_FORMAT)
luy_test.go
package luy import ( "testing" "time" "github.com/gohugoio/locales" "github.com/gohugoio/locales/currency" ) func TestLocale(t *testing.T) { trans := New() expected := "luy" if trans.Locale() != expected { t.Errorf("Expected '%s' Got '%s'", expected, trans.Locale()) } } func TestPluralsRange(t *testing.T) { trans := New() tests := []struct { expected locales.PluralRule }{ // { // expected: locales.PluralRuleOther, // }, } rules := trans.PluralsRange() // expected := 1 // if len(rules) != expected { // t.Errorf("Expected '%d' Got '%d'", expected, len(rules)) // } for _, tt := range tests { r := locales.PluralRuleUnknown for i := 0; i < len(rules); i++ { if rules[i] == tt.expected { r = rules[i] break } } if r == locales.PluralRuleUnknown { t.Errorf("Expected '%s' Got '%s'", tt.expected, r) } } } func TestPluralsOrdinal(t *testing.T) { trans := New() tests := []struct { expected locales.PluralRule }{ // { // expected: locales.PluralRuleOne, // }, // { // expected: locales.PluralRuleTwo, // }, // { // expected: locales.PluralRuleFew, // }, // { // expected: locales.PluralRuleOther, // }, } rules := trans.PluralsOrdinal() // expected := 4 // if len(rules) != expected { // t.Errorf("Expected '%d' Got '%d'", expected, len(rules)) // } for _, tt := range tests { r := locales.PluralRuleUnknown for i := 0; i < len(rules); i++ { if rules[i] == tt.expected { r = rules[i] break } } if r == locales.PluralRuleUnknown { t.Errorf("Expected '%s' Got '%s'", tt.expected, r) } } } func TestPluralsCardinal(t *testing.T) { trans := New() tests := []struct { expected locales.PluralRule }{ // { // expected: locales.PluralRuleOne, // }, // { // expected: locales.PluralRuleOther, // }, } rules := trans.PluralsCardinal() // expected := 2 // if len(rules) != expected { // t.Errorf("Expected '%d' Got '%d'", expected, len(rules)) // } for _, tt := range tests { r := locales.PluralRuleUnknown for i := 0; i < len(rules); i++ { if rules[i] == tt.expected { r = rules[i] break } } if r == locales.PluralRuleUnknown { t.Errorf("Expected '%s' Got '%s'", tt.expected, r) } } } func TestRangePlurals(t *testing.T) { trans := New() tests := []struct { num1 float64 v1 uint64 num2 float64 v2 uint64 expected locales.PluralRule }{ // { // num1: 1, // v1: 1, // num2: 2, // v2: 2, // expected: locales.PluralRuleOther, // }, } for _, tt := range tests { rule := trans.RangePluralRule(tt.num1, tt.v1, tt.num2, tt.v2) if rule != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, rule) } } } func TestOrdinalPlurals(t *testing.T) { trans := New() tests := []struct { num float64 v uint64 expected locales.PluralRule }{ // { // num: 1, // v: 0, // expected: locales.PluralRuleOne, // }, // { // num: 2, // v: 0, // expected: locales.PluralRuleTwo, // }, // { // num: 3, // v: 0, // expected: locales.PluralRuleFew, // }, // { // num: 4, // v: 0, // expected: locales.PluralRuleOther, // }, } for _, tt := range tests { rule := trans.OrdinalPluralRule(tt.num, tt.v) if rule != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, rule) } } } func TestCardinalPlurals(t *testing.T) { trans := New() tests := []struct { num float64 v uint64 expected locales.PluralRule }{ // { // num: 1, // v: 0, // expected: locales.PluralRuleOne, // }, // { // num: 4, // v: 0, // expected: locales.PluralRuleOther, // }, } for _, tt := range tests { rule := trans.CardinalPluralRule(tt.num, tt.v) if rule != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, rule) } } } func TestDaysAbbreviated(t *testing.T) { trans := New() days := trans.WeekdaysAbbreviated() for i, day := range days { s := trans.WeekdayAbbreviated(time.Weekday(i)) if s != day { t.Errorf("Expected '%s' Got '%s'", day, s) } } tests := []struct { idx int expected string }{ // { // idx: 0, // expected: "Sun", // }, // { // idx: 1, // expected: "Mon", // }, // { // idx: 2, // expected: "Tue", // }, // { // idx: 3, // expected: "Wed", // }, // { // idx: 4, // expected: "Thu", // }, // { // idx: 5, // expected: "Fri", // }, // { // idx: 6, // expected: "Sat", // }, } for _, tt := range tests { s := trans.WeekdayAbbreviated(time.Weekday(tt.idx)) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestDaysNarrow(t *testing.T) { trans := New() days := trans.WeekdaysNarrow() for i, day := range days { s := trans.WeekdayNarrow(time.Weekday(i)) if s != day { t.Errorf("Expected '%s' Got '%s'", string(day), s) } } tests := []struct { idx int expected string }{ // { // idx: 0, // expected: "S", // }, // { // idx: 1, // expected: "M", // }, // { // idx: 2, // expected: "T", // }, // { // idx: 3, // expected: "W", // }, // { // idx: 4, // expected: "T", // }, // { // idx: 5, // expected: "F", // }, // { // idx: 6, // expected: "S", // }, } for _, tt := range tests { s := trans.WeekdayNarrow(time.Weekday(tt.idx)) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestDaysShort(t *testing.T) { trans := New() days := trans.WeekdaysShort() for i, day := range days { s := trans.WeekdayShort(time.Weekday(i)) if s != day { t.Errorf("Expected '%s' Got '%s'", day, s) } } tests := []struct { idx int expected string }{ // { // idx: 0, // expected: "Su", // }, // { // idx: 1, // expected: "Mo", // }, // { // idx: 2, // expected: "Tu", // }, // { // idx: 3, // expected: "We", // }, // { // idx: 4, // expected: "Th", // }, // { // idx: 5, // expected: "Fr", // }, // { // idx: 6, // expected: "Sa", // }, } for _, tt := range tests { s := trans.WeekdayShort(time.Weekday(tt.idx)) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestDaysWide(t *testing.T) { trans := New() days := trans.WeekdaysWide() for i, day := range days { s := trans.WeekdayWide(time.Weekday(i)) if s != day { t.Errorf("Expected '%s' Got '%s'", day, s) } } tests := []struct { idx int expected string }{ // { // idx: 0, // expected: "Sunday", // }, // { // idx: 1, // expected: "Monday", // }, // { // idx: 2, // expected: "Tuesday", // }, // { // idx: 3, // expected: "Wednesday", // }, // { // idx: 4, // expected: "Thursday", // }, // { // idx: 5, // expected: "Friday", // }, // { // idx: 6, // expected: "Saturday", // }, } for _, tt := range tests { s := trans.WeekdayWide(time.Weekday(tt.idx)) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestMonthsAbbreviated(t *testing.T) { trans := New() months := trans.MonthsAbbreviated() for i, month := range months { s := trans.MonthAbbreviated(time.Month(i + 1)) if s != month { t.Errorf("Expected '%s' Got '%s'", month, s) } } tests := []struct { idx int expected string }{ // { // idx: 1, // expected: "Jan", // }, // { // idx: 2, // expected: "Feb", // }, // { // idx: 3, // expected: "Mar", // }, // { // idx: 4, // expected: "Apr", // }, // { // idx: 5, // expected: "May", // }, // { // idx: 6, // expected: "Jun", // }, // { // idx: 7, // expected: "Jul", // }, // { // idx: 8, // expected: "Aug", // }, // { // idx: 9, // expected: "Sep", // }, // { // idx: 10, // expected: "Oct", // }, // { // idx: 11, // expected: "Nov", // }, // { // idx: 12, // expected: "Dec", // }, } for _, tt := range tests { s := trans.MonthAbbreviated(time.Month(tt.idx)) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestMonthsNarrow(t *testing.T) { trans := New() months := trans.MonthsNarrow() for i, month := range months { s := trans.MonthNarrow(time.Month(i + 1)) if s != month { t.Errorf("Expected '%s' Got '%s'", month, s) } } tests := []struct { idx int expected string }{ // { // idx: 1, // expected: "J", // }, // { // idx: 2, // expected: "F", // }, // { // idx: 3, // expected: "M", // }, // { // idx: 4, // expected: "A", // }, // { // idx: 5, // expected: "M", // }, // { // idx: 6, // expected: "J", // }, // { // idx: 7, // expected: "J", // }, // { // idx: 8, // expected: "A", // }, // { // idx: 9, // expected: "S", // }, // { // idx: 10, // expected: "O", // }, // { // idx: 11, // expected: "N", // }, // { // idx: 12, // expected: "D", // }, } for _, tt := range tests { s := trans.MonthNarrow(time.Month(tt.idx)) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestMonthsWide(t *testing.T) { trans := New() months := trans.MonthsWide() for i, month := range months { s := trans.MonthWide(time.Month(i + 1)) if s != month
} tests := []struct { idx int expected string }{ // { // idx: 1, // expected: "January", // }, // { // idx: 2, // expected: "February", // }, // { // idx: 3, // expected: "March", // }, // { // idx: 4, // expected: "April", // }, // { // idx: 5, // expected: "May", // }, // { // idx: 6, // expected: "June", // }, // { // idx: 7, // expected: "July", // }, // { // idx: 8, // expected: "August", // }, // { // idx: 9, // expected: "September", // }, // { // idx: 10, // expected: "October", // }, // { // idx: 11, // expected: "November", // }, // { // idx: 12, // expected: "December", // }, } for _, tt := range tests { s := string(trans.MonthWide(time.Month(tt.idx))) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtTimeFull(t *testing.T) { // loc, err := time.LoadLocation("America/Toronto") // if err != nil { // t.Errorf("Expected '<nil>' Got '%s'", err) // } // fixed := time.FixedZone("OTHER", -4) tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc), // expected: "9:05:01 am Eastern Standard Time", // }, // { // t: time.Date(2016, 02, 03, 20, 5, 1, 0, fixed), // expected: "8:05:01 pm OTHER", // }, } trans := New() for _, tt := range tests { s := trans.FmtTimeFull(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtTimeLong(t *testing.T) { // loc, err := time.LoadLocation("America/Toronto") // if err != nil { // t.Errorf("Expected '<nil>' Got '%s'", err) // } tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc), // expected: "9:05:01 am EST", // }, // { // t: time.Date(2016, 02, 03, 20, 5, 1, 0, loc), // expected: "8:05:01 pm EST", // }, } trans := New() for _, tt := range tests { s := trans.FmtTimeLong(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtTimeMedium(t *testing.T) { tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC), // expected: "9:05:01 am", // }, // { // t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC), // expected: "8:05:01 pm", // }, } trans := New() for _, tt := range tests { s := trans.FmtTimeMedium(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtTimeShort(t *testing.T) { tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC), // expected: "9:05 am", // }, // { // t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC), // expected: "8:05 pm", // }, } trans := New() for _, tt := range tests { s := trans.FmtTimeShort(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtDateFull(t *testing.T) { tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC), // expected: "Wednesday, February 3, 2016", // }, } trans := New() for _, tt := range tests { s := trans.FmtDateFull(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtDateLong(t *testing.T) { tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC), // expected: "February 3, 2016", // }, } trans := New() for _, tt := range tests { s := trans.FmtDateLong(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtDateMedium(t *testing.T) { tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC), // expected: "Feb 3, 2016", // }, } trans := New() for _, tt := range tests { s := trans.FmtDateMedium(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtDateShort(t *testing.T) { tests := []struct { t time.Time expected string }{ // { // t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC), // expected: "2/3/16", // }, // { // t: time.Date(-500, 02, 03, 9, 0, 1, 0, time.UTC), // expected: "2/3/500", // }, } trans := New() for _, tt := range tests { s := trans.FmtDateShort(tt.t) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtNumber(t *testing.T) { tests := []struct { num float64 v uint64 expected string }{ // { // num: 1123456.5643, // v: 2, // expected: "1,123,456.56", // }, // { // num: 1123456.5643, // v: 1, // expected: "1,123,456.6", // }, // { // num: 221123456.5643, // v: 3, // expected: "221,123,456.564", // }, // { // num: -221123456.5643, // v: 3, // expected: "-221,123,456.564", // }, // { // num: -221123456.5643, // v: 3, // expected: "-221,123,456.564", // }, // { // num: 0, // v: 2, // expected: "0.00", // }, // { // num: -0, // v: 2, // expected: "0.00", // }, // { // num: -0, // v: 2, // expected: "0.00", // }, } trans := New() for _, tt := range tests { s := trans.FmtNumber(tt.num, tt.v) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtCurrency(t *testing.T) { tests := []struct { num float64 v uint64 currency currency.Type expected string }{ // { // num: 1123456.5643, // v: 2, // currency: currency.USD, // expected: "$1,123,456.56", // }, // { // num: 1123456.5643, // v: 1, // currency: currency.USD, // expected: "$1,123,456.60", // }, // { // num: 221123456.5643, // v: 3, // currency: currency.USD, // expected: "$221,123,456.564", // }, // { // num: -221123456.5643, // v: 3, // currency: currency.USD, // expected: "-$221,123,456.564", // }, // { // num: -221123456.5643, // v: 3, // currency: currency.CAD, // expected: "-CAD 221,123,456.564", // }, // { // num: 0, // v: 2, // currency: currency.USD, // expected: "$0.00", // }, // { // num: -0, // v: 2, // currency: currency.USD, // expected: "$0.00", // }, // { // num: -0, // v: 2, // currency: currency.CAD, // expected: "CAD 0.00", // }, // { // num: 1.23, // v: 0, // currency: currency.USD, // expected: "$1.00", // }, } trans := New() for _, tt := range tests { s := trans.FmtCurrency(tt.num, tt.v, tt.currency) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtAccounting(t *testing.T) { tests := []struct { num float64 v uint64 currency currency.Type expected string }{ // { // num: 1123456.5643, // v: 2, // currency: currency.USD, // expected: "$1,123,456.56", // }, // { // num: 1123456.5643, // v: 1, // currency: currency.USD, // expected: "$1,123,456.60", // }, // { // num: 221123456.5643, // v: 3, // currency: currency.USD, // expected: "$221,123,456.564", // }, // { // num: -221123456.5643, // v: 3, // currency: currency.USD, // expected: "($221,123,456.564)", // }, // { // num: -221123456.5643, // v: 3, // currency: currency.CAD, // expected: "(CAD 221,123,456.564)", // }, // { // num: -0, // v: 2, // currency: currency.USD, // expected: "$0.00", // }, // { // num: -0, // v: 2, // currency: currency.CAD, // expected: "CAD 0.00", // }, // { // num: 1.23, // v: 0, // currency: currency.USD, // expected: "$1.00", // }, } trans := New() for _, tt := range tests { s := trans.FmtAccounting(tt.num, tt.v, tt.currency) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } } func TestFmtPercent(t *testing.T) { tests := []struct { num float64 v uint64 expected string }{ // { // num: 15, // v: 0, // expected: "15%", // }, // { // num: 15, // v: 2, // expected: "15.00%", // }, // { // num: 434.45, // v: 0, // expected: "434%", // }, // { // num: 34.4, // v: 2, // expected: "34.40%", // }, // { // num: -34, // v: 0, // expected: "-34%", // }, } trans := New() for _, tt := range tests { s := trans.FmtPercent(tt.num, tt.v) if s != tt.expected { t.Errorf("Expected '%s' Got '%s'", tt.expected, s) } } }
{ t.Errorf("Expected '%s' Got '%s'", month, s) }
single.rs
use idgenerator::*; use std::time::Instant; fn main() -> Result<(), OptionError> { let mut new_id: i64 = 0; let mut times = 500000; // Setup the option for the id generator instance. let options = IdGeneratorOptions::new().worker_id(1).worker_id_bit_len(6); // Initialize the id generator instance with the option. // Other options not set will be given the default value. let _ = IdInstance::init(options)?; // Get the option from the id generator instance. let options = IdInstance::get_options(); println!("First setting: {:?}", options);
// Other options will not change if not set. // If new options are not compatible with the old options, it will return an error. let _ = IdInstance::set_options(options)?; // Get the option from the id generator instance to see what have change and what remains the same as you set first time. let options = IdInstance::get_options(); println!("Second setting: {:?}", options); println!("Start to generate new unique id"); let start = Instant::now(); while times > 0 { // Call `next_id` to generate a new unique id. new_id = IdInstance::next_id(); times -= 1; } let duration = start.elapsed(); println!( "Program finished after {} seconds! Last id {}", duration.as_secs(), new_id ); Ok(()) }
// Setup another option let options = IdGeneratorOptions::new().seq_bit_len(12); // Use `set_options` will only change the options you have set.
fetchCountyASMState.ts
import { endpoint } from 'corla/config'; import createFetchAction from 'corla/action/createFetchAction';
const url = endpoint('county-asm-state'); export default createFetchAction({ failType: 'FETCH_COUNTY_ASM_STATE_FAIL', networkFailType: 'FETCH_COUNTY_ASM_STATE_NETWORK_FAIL', okType: 'FETCH_COUNTY_ASM_STATE_OK', sendType: 'FETCH_COUNTY_ASM_STATE_SEND', url, });
flying-shuttle.ts
import { Sema } from 'async-sema' import crypto from 'crypto' import fs from 'fs' import mkdirpModule from 'mkdirp' import { CHUNK_GRAPH_MANIFEST, PRERENDER_MANIFEST } from 'next-server/constants' import { EOL } from 'os' import path from 'path' import { promisify } from 'util' import { recursiveDelete } from '../lib/recursive-delete' import { fileExists } from '../lib/file-exists' import * as Log from './output/log' import { PageInfo } from './utils' import { PrerenderRoute } from '.' const FILE_BUILD_ID = 'HEAD_BUILD_ID' const FILE_UPDATED_AT = 'UPDATED_AT' const DIR_FILES_NAME = 'files' const MAX_SHUTTLES = 3 const SAVED_MANIFESTS = [ 'serverless/pages-manifest.json', 'prerender-manifest.json', ] const mkdirp = promisify(mkdirpModule) const fsReadFile = promisify(fs.readFile) const fsWriteFile = promisify(fs.writeFile) const fsCopyFile = promisify(fs.copyFile) const fsReadDir = promisify(fs.readdir) const fsLstat = promisify(fs.lstat) type ChunkGraphManifest = { sharedFiles: string[] | undefined pages: { [page: string]: string[] } pageChunks: { [page: string]: string[] } chunks: { [page: string]: string[] } hashes: { [page: string]: string } } async function findCachedShuttles(apexShuttleDirectory: string) { return (await Promise.all( await fsReadDir(apexShuttleDirectory).then(shuttleFiles => shuttleFiles.map(async f => ({ file: f, stats: await fsLstat(path.join(apexShuttleDirectory, f)), })) ) )) .filter(({ stats }) => stats.isDirectory()) .map(({ file }) => file) } async function pruneShuttles(apexShuttleDirectory: string) { const allShuttles = await findCachedShuttles(apexShuttleDirectory) if (allShuttles.length <= MAX_SHUTTLES) { return } const datedShuttles: { updatedAt: Date; shuttleDirectory: string }[] = [] for (const shuttleId of allShuttles) { const shuttleDirectory = path.join(apexShuttleDirectory, shuttleId) const updatedAtPath = path.join(shuttleDirectory, FILE_UPDATED_AT) let updatedAt: Date try { updatedAt = new Date((await fsReadFile(updatedAtPath, 'utf8')).trim()) } catch (err) { if (err.code === 'ENOENT') { await recursiveDelete(shuttleDirectory) continue } throw err } datedShuttles.push({ updatedAt, shuttleDirectory }) } const sortedShuttles = datedShuttles.sort((a, b) => Math.sign(b.updatedAt.valueOf() - a.updatedAt.valueOf()) ) let prunedShuttles = 0 while (sortedShuttles.length > MAX_SHUTTLES) { const shuttleDirectory = sortedShuttles.pop() await recursiveDelete(shuttleDirectory!.shuttleDirectory) ++prunedShuttles } if (prunedShuttles) { Log.info( `decommissioned ${prunedShuttles} old shuttle${ prunedShuttles > 1 ? 's' : '' }` ) } } function isShuttleValid({ manifestPath, pagesDirectory, parentCacheIdentifier, }: { manifestPath: string pagesDirectory: string parentCacheIdentifier: string }) { const manifest = require(manifestPath) as ChunkGraphManifest const { sharedFiles, hashes } = manifest if (!sharedFiles) { return false } return !sharedFiles .map(file => { const filePath = path.join(path.dirname(pagesDirectory), file) const exists = fs.existsSync(filePath) if (!exists) { return true } const hash = crypto .createHash('sha1') .update(parentCacheIdentifier) .update(fs.readFileSync(filePath)) .digest('hex') return hash !== hashes[file] }) .some(Boolean) } export class FlyingShuttle { private apexShuttleDirectory: string private flyingShuttleId: string private buildId: string private pagesDirectory: string private distDirectory: string private parentCacheIdentifier: string private _shuttleBuildId: string | undefined private _restoreSema = new Sema(1) private _recalledManifest: ChunkGraphManifest = { sharedFiles: [], pages: {}, pageChunks: {}, chunks: {}, hashes: {}, }
buildId, pagesDirectory, distDirectory, cacheIdentifier, }: { buildId: string pagesDirectory: string distDirectory: string cacheIdentifier: string }) { mkdirpModule.sync( (this.apexShuttleDirectory = path.join( distDirectory, 'cache', 'next-flying-shuttle' )) ) this.flyingShuttleId = crypto.randomBytes(16).toString('hex') this.buildId = buildId this.pagesDirectory = pagesDirectory this.distDirectory = distDirectory this.parentCacheIdentifier = cacheIdentifier } get shuttleDirectory() { return path.join(this.apexShuttleDirectory, this.flyingShuttleId) } private findShuttleId = async () => { const shuttles = await findCachedShuttles(this.apexShuttleDirectory) return shuttles.find(shuttleId => { try { const manifestPath = path.join( this.apexShuttleDirectory, shuttleId, CHUNK_GRAPH_MANIFEST ) return isShuttleValid({ manifestPath, pagesDirectory: this.pagesDirectory, parentCacheIdentifier: this.parentCacheIdentifier, }) } catch (_) {} return false }) } hasShuttle = async () => { const existingFlyingShuttleId = await this.findShuttleId() this.flyingShuttleId = existingFlyingShuttleId || this.flyingShuttleId const found = this.shuttleBuildId && (await fileExists(path.join(this.shuttleDirectory, CHUNK_GRAPH_MANIFEST))) if (found) { Log.info('flying shuttle is docked') } return found } get shuttleBuildId() { if (this._shuttleBuildId) { return this._shuttleBuildId } const headBuildIdPath = path.join(this.shuttleDirectory, FILE_BUILD_ID) if (!fs.existsSync(headBuildIdPath)) { return (this._shuttleBuildId = undefined) } const contents = fs.readFileSync(headBuildIdPath, 'utf8').trim() return (this._shuttleBuildId = contents) } getPageInfos = async (): Promise<Map<string, PageInfo>> => { const pageInfos: Map<string, PageInfo> = new Map() const pagesManifest = JSON.parse( await fsReadFile( path.join( this.shuttleDirectory, DIR_FILES_NAME, 'serverless/pages-manifest.json' ), 'utf8' ) ) Object.keys(pagesManifest).forEach(pg => { const path = pagesManifest[pg] const isStatic: boolean = path.endsWith('html') let isAmp = Boolean(pagesManifest[pg + '.amp']) if (pg === '/') isAmp = Boolean(pagesManifest['/index.amp']) pageInfos.set(pg, { isAmp, size: 0, static: isStatic, serverBundle: path, }) }) return pageInfos } getUnchangedPages = async () => { const manifestPath = path.join(this.shuttleDirectory, CHUNK_GRAPH_MANIFEST) const manifest = require(manifestPath) as ChunkGraphManifest const { sharedFiles, pages: pageFileDictionary, hashes } = manifest const pageNames = Object.keys(pageFileDictionary) const allFiles = new Set(sharedFiles) pageNames.forEach(pageName => pageFileDictionary[pageName].forEach(file => allFiles.add(file)) ) const fileChanged = new Map() await Promise.all( [...allFiles].map(async file => { const filePath = path.join(path.dirname(this.pagesDirectory), file) const exists = await fileExists(filePath) if (!exists) { fileChanged.set(file, true) return } const hash = crypto .createHash('sha1') .update(this.parentCacheIdentifier) .update(await fsReadFile(filePath)) .digest('hex') fileChanged.set(file, hash !== hashes[file]) }) ) const unchangedPages = (sharedFiles || []) .map(f => fileChanged.get(f)) .some(Boolean) ? [] : pageNames .filter( p => !pageFileDictionary[p].map(f => fileChanged.get(f)).some(Boolean) ) .filter( pageName => pageName !== '/_app' && pageName !== '/_error' && pageName !== '/_document' ) if (unchangedPages.length) { const u = unchangedPages.length const c = pageNames.length - u Log.info(`found ${c} changed and ${u} unchanged page${u > 1 ? 's' : ''}`) } else { Log.warn( `flying shuttle is going to perform a full rebuild due to changes across all pages` ) } return unchangedPages } mergeManifests = async (): Promise<void> => { for (const manifestPath of SAVED_MANIFESTS) { const savedPagesManifest = path.join( this.shuttleDirectory, DIR_FILES_NAME, manifestPath ) if (!(await fileExists(savedPagesManifest))) return const saved = JSON.parse(await fsReadFile(savedPagesManifest, 'utf8')) const currentPagesManifest = path.join(this.distDirectory, manifestPath) const current = JSON.parse(await fsReadFile(currentPagesManifest, 'utf8')) if (manifestPath === PRERENDER_MANIFEST) { const prerenderRoutes = new Map<string, PrerenderRoute>() if (Array.isArray(saved.prerenderRoutes)) { saved.prerenderRoutes.forEach((route: PrerenderRoute) => { prerenderRoutes.set(route.path, route) }) } if (Array.isArray(current.prerenderRoutes)) { current.prerenderRoutes.forEach((route: PrerenderRoute) => { prerenderRoutes.set(route.path, route) }) } await fsWriteFile( currentPagesManifest, JSON.stringify({ prerenderRoutes: [...prerenderRoutes.values()], }) ) } else { await fsWriteFile( currentPagesManifest, JSON.stringify({ ...saved, ...current, }) ) } } } restorePage = async ( page: string, pageInfo: PageInfo = {} as PageInfo ): Promise<boolean> => { await this._restoreSema.acquire() try { const manifestPath = path.join( this.shuttleDirectory, CHUNK_GRAPH_MANIFEST ) const manifest = require(manifestPath) as ChunkGraphManifest const { pages, pageChunks, hashes } = manifest if (!(pages.hasOwnProperty(page) && pageChunks.hasOwnProperty(page))) { Log.warn(`unable to find ${page} in shuttle`) return false } const serverless = path.join( 'serverless/pages', `${page === '/' ? 'index' : page}.${pageInfo.static ? 'html' : 'js'}` ) const files = [serverless, ...pageChunks[page]] const filesExists = await Promise.all( files .map(f => path.join(this.shuttleDirectory, DIR_FILES_NAME, f)) .map(f => fileExists(f)) ) if (!filesExists.every(Boolean)) { Log.warn(`unable to locate files for ${page} in shuttle`) return false } const rewriteRegex = new RegExp(`${this.shuttleBuildId}[\\/\\\\]`) const movedPageChunks: string[] = [] await Promise.all( files.map(async recallFileName => { if (!rewriteRegex.test(recallFileName)) { const recallPath = path.join(this.distDirectory, recallFileName) const recallPathExists = await fileExists(recallPath) if (!recallPathExists) { await mkdirp(path.dirname(recallPath)) await fsCopyFile( path.join( this.shuttleDirectory, DIR_FILES_NAME, recallFileName ), recallPath ) } movedPageChunks.push(recallFileName) return } const newFileName = recallFileName.replace( rewriteRegex, `${this.buildId}/` ) const recallPath = path.join(this.distDirectory, newFileName) const recallPathExists = await fileExists(recallPath) if (!recallPathExists) { await mkdirp(path.dirname(recallPath)) await fsCopyFile( path.join(this.shuttleDirectory, DIR_FILES_NAME, recallFileName), recallPath ) } movedPageChunks.push(newFileName) }) ) this._recalledManifest.pages[page] = pages[page] this._recalledManifest.pageChunks[page] = movedPageChunks.filter( f => f !== serverless ) this._recalledManifest.hashes = Object.assign( {}, this._recalledManifest.hashes, pages[page].reduce( (acc, cur) => Object.assign(acc, { [cur]: hashes[cur] }), {} ) ) return true } finally { this._restoreSema.release() } } save = async (staticPages: Set<string>, pageInfos: Map<string, PageInfo>) => { Log.wait('docking flying shuttle') await recursiveDelete(this.shuttleDirectory) await mkdirp(this.shuttleDirectory) const nextManifestPath = path.join(this.distDirectory, CHUNK_GRAPH_MANIFEST) if (!(await fileExists(nextManifestPath))) { Log.warn('could not find shuttle payload :: shuttle will not be docked') return } const nextManifest = JSON.parse( await fsReadFile(nextManifestPath, 'utf8') ) as ChunkGraphManifest const storeManifest: ChunkGraphManifest = { // Intentionally does not merge with the recalled manifest sharedFiles: nextManifest.sharedFiles, pages: Object.assign( {}, this._recalledManifest.pages, nextManifest.pages ), pageChunks: Object.assign( {}, this._recalledManifest.pageChunks, nextManifest.pageChunks ), chunks: Object.assign( {}, this._recalledManifest.chunks, nextManifest.chunks ), hashes: Object.assign( {}, this._recalledManifest.hashes, nextManifest.hashes ), } await fsWriteFile( path.join(this.shuttleDirectory, FILE_BUILD_ID), this.buildId ) await fsWriteFile( path.join(this.shuttleDirectory, FILE_UPDATED_AT), new Date().toISOString() ) const usedChunks = new Set<string>() const pages = Object.keys(storeManifest.pageChunks) pages.forEach(page => { const info = pageInfos.get(page) || ({} as PageInfo) storeManifest.pageChunks[page].forEach((file, idx) => { if (info.isAmp) { // AMP pages don't have client bundles storeManifest.pageChunks[page] = [] return } usedChunks.add(file) }) usedChunks.add( path.join( 'serverless/pages', `${page === '/' ? 'index' : page}.${ staticPages.has(page) ? 'html' : 'js' }` ) ) const ampPage = (page === '/' ? '/index' : page) + '.amp' if (staticPages.has(ampPage)) { storeManifest.pages[ampPage] = [] storeManifest.pageChunks[ampPage] = [] usedChunks.add(path.join('serverless/pages', `${ampPage}.html`)) } }) await fsWriteFile( path.join(this.shuttleDirectory, CHUNK_GRAPH_MANIFEST), JSON.stringify(storeManifest, null, 2) + EOL ) await Promise.all( [...usedChunks].map(async usedChunk => { const target = path.join( this.shuttleDirectory, DIR_FILES_NAME, usedChunk ) await mkdirp(path.dirname(target)) return fsCopyFile(path.join(this.distDirectory, usedChunk), target) }) ) for (const manifestPath of SAVED_MANIFESTS) { await fsCopyFile( path.join(this.distDirectory, manifestPath), path.join(this.shuttleDirectory, DIR_FILES_NAME, manifestPath) ) } Log.info(`flying shuttle payload: ${usedChunks.size + 2} files`) Log.ready('flying shuttle docked') try { await pruneShuttles(this.apexShuttleDirectory) } catch (e) { Log.error('failed to prune old shuttles: ' + e) } } }
constructor({
mod.rs
#![allow(dead_code)] use assert_cmd::assert::OutputAssertExt; use jormungandr_lib::crypto::hash::Hash; use jormungandr_lib::interfaces::{ AccountState, CommitteeIdDef, FragmentLog, FragmentStatus, LeadershipLog, SettingsDto, StakePoolStats, UTxOInfo, UTxOOutputInfo, }; pub mod certificate; pub mod jcli_commands; pub mod jcli_transaction_wrapper; pub use jcli_transaction_wrapper::JCLITransactionWrapper; use super::configuration; use crate::common::jormungandr::JormungandrProcess; use assert_fs::prelude::*; use assert_fs::{fixture::ChildPath, NamedTempFile}; use chain_addr::Discrimination; use jormungandr_testing_utils::testing::process::ProcessOutput as _; use jortestkit::process::{ output_extensions::ProcessOutput, run_process_until_response_matches, Wait, }; use serde_json::Value; use std::collections::BTreeMap; use std::path::Path; use thiserror::Error; #[derive(Debug, Error)] pub enum Error { #[error("transaction {transaction_id} is not in block. message log: {message_log}. Jormungandr log: {log_content}")] TransactionNotInBlock { message_log: String, transaction_id: Hash, log_content: String, }, #[error("at least one transaction is not in block. message log: {message_log}. Jormungandr log: {log_content}")] TransactionsNotInBlock { message_log: String, log_content: String, }, } pub fn assert_genesis_encode(genesis_yaml_file_path: &Path, output_file: &ChildPath) { jcli_commands::get_genesis_encode_command(genesis_yaml_file_path, output_file.path()) .assert() .success(); output_file.assert(crate::predicate::file_exists_and_not_empty()); } pub fn assert_genesis_decode(genesis_yaml_file_path: &Path, output_file: &ChildPath) { jcli_commands::get_genesis_decode_command(genesis_yaml_file_path, output_file.path()) .assert() .success(); output_file.assert(crate::predicate::file_exists_and_not_empty()); } pub fn assert_genesis_encode_fails( genesis_yaml_file_path: &Path, output_file: &ChildPath, expected_msg: &str, ) { jcli_commands::get_genesis_encode_command(genesis_yaml_file_path, output_file.path()) .assert() .failure() .stderr(predicates::str::contains(expected_msg)); } pub fn assert_genesis_hash(path_to_output_block: &Path) -> String { jcli_commands::get_genesis_hash_command(&path_to_output_block) .assert() .success() .get_output() .as_single_line() } pub fn assert_genesis_hash_fails(path_to_output_block: &Path, expected_msg: &str) { jcli_commands::get_genesis_hash_command(&path_to_output_block) .assert() .failure() .stderr(predicates::str::contains(expected_msg)); } pub fn assert_rest_stats(host: &str) -> BTreeMap<String, String> { jcli_commands::get_rest_stats_command(&host) .assert() .success() .get_output() .as_single_node_yaml() } pub fn assert_rest_utxo_get_returns_same_utxo(host: &str, utxo: &UTxOInfo) { let rest_utxo = assert_rest_utxo_get_by_utxo(host, utxo); assert_eq!(utxo, &rest_utxo, "UTxO returned from REST is invalid"); } pub fn assert_rest_utxo_get_by_utxo(host: &str, utxo: &UTxOInfo) -> UTxOInfo { assert_rest_utxo_get( host, &utxo.transaction_id().to_string(), utxo.index_in_transaction(), ) } pub fn assert_rest_utxo_get(host: &str, fragment_id_bech32: &str, output_index: u8) -> UTxOInfo { let content = jcli_commands::get_rest_utxo_get_command(&host, fragment_id_bech32, output_index) .assert() .success() .get_output() .as_lossy_string(); let fragment_id = fragment_id_bech32 .parse() .expect("UTxO fragment ID is not a valid hex value"); serde_yaml::from_str::<UTxOOutputInfo>(&content) .expect("JCLI returned malformed UTxO") .into_utxo_info(fragment_id, output_index) } pub fn assert_rest_utxo_get_by_utxo_not_found(host: &str, utxo: &UTxOInfo) { assert_rest_utxo_get_not_found( host, &utxo.transaction_id().to_string(), utxo.index_in_transaction(), ) } pub fn assert_rest_utxo_get_not_found(host: &str, fragment_id_bech32: &str, output_index: u8) { jcli_commands::get_rest_utxo_get_command(&host, fragment_id_bech32, output_index) .assert() .failure() .stderr(predicates::str::contains("404 Not Found")); } pub fn assert_get_address_info(address: &str) -> BTreeMap<String, String> { jcli_commands::get_address_info_command_default(&address) .assert() .success() .get_output() .as_single_node_yaml() } pub fn assert_get_address_info_fails(adress: &str, expected_msg: &str) { jcli_commands::get_address_info_command_default(&adress) .assert() .failure() .stderr(predicates::str::contains(expected_msg)); } pub fn assert_genesis_init() -> String { jcli_commands::get_genesis_init_command() .assert() .success() .get_output() .as_lossy_string() } pub fn assert_address_single(public_key: &str, discrimination: Discrimination) -> String { jcli_commands::get_address_single_command(&public_key, discrimination) .assert() .success() .get_output() .as_single_line() } pub fn assert_address_delegation( public_key: &str, delegation_key: &str, discrimination: Discrimination, ) -> String { jcli_commands::get_address_delegation_command(&public_key, &delegation_key, discrimination) .assert() .success() .get_output() .as_single_line() } pub fn assert_address_account(public_key: &str, discrimination: Discrimination) -> String { jcli_commands::get_address_account_command(&public_key, discrimination) .assert() .success() .get_output() .as_single_line() } pub fn assert_post_transaction(transactions_message: &str, host: &str) -> Hash { let transaction_file = NamedTempFile::new("transaction.hash").unwrap(); transaction_file.write_str(transactions_message).unwrap(); jcli_commands::get_post_transaction_command(transaction_file.path(), host) .assert() .success() .get_output() .as_hash() } pub fn assert_transaction_post_accepted(transactions_message: &str, host: &str) { let node_stats = assert_rest_stats(&host); let before: i32 = node_stats.get("txRecvCnt").unwrap().parse().unwrap(); assert_post_transaction(&transactions_message, &host); let node_stats = assert_rest_stats(&host); let after: i32 = node_stats.get("txRecvCnt").unwrap().parse().unwrap(); assert_eq!( before + 1, after, "Transaction was NOT accepted by node: \ txRecvCnt counter wasn't incremented after post" ); } pub fn assert_transaction_post_failed(transactions_message: &str, host: &str) { let node_stats = assert_rest_stats(&host); let before: i32 = node_stats.get("txRecvCnt").unwrap().parse().unwrap(); assert_post_transaction(&transactions_message, &host); let node_stats = assert_rest_stats(&host); let after: i32 = node_stats.get("txRecvCnt").unwrap().parse().unwrap(); assert_eq!( before, after, "Transaction was accepted by node while it should not be: \ txRecvCnt counter was incremented after post" ); } pub fn assert_get_active_voting_committees(host: &str) -> Vec<CommitteeIdDef> { let content = jcli_commands::get_rest_active_committes(host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).expect("JCLI returned malformed CommitteeIdDef") } pub fn assert_get_active_vote_plans(host: &str) -> Vec<Value> { let content = jcli_commands::get_rest_active_vote_plans(host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).expect("JCLI returned malformed VotePlan") } pub fn assert_key_generate_default() -> String
pub fn assert_key_generate(key_type: &str) -> String { jcli_commands::get_key_generate_command(&key_type) .assert() .success() .get_output() .as_single_line() } pub fn assert_key_with_seed_generate(key_type: &str, seed: &str) -> String { jcli_commands::get_key_generate_with_seed_command(&key_type, &seed) .assert() .success() .get_output() .as_single_line() } pub fn assert_key_to_public_default(private_key: &str) -> String { let input_file = NamedTempFile::new("key_to_public.input").unwrap(); input_file.write_str(private_key).unwrap(); jcli_commands::get_key_to_public_command(input_file.path()) .assert() .success() .get_output() .as_single_line() } pub fn assert_key_to_public_fails(private_key: &str, expected_msg: &str) { let input_file = NamedTempFile::new("key_to_public.input").unwrap(); input_file.write_str(private_key).unwrap(); jcli_commands::get_key_to_public_command(input_file.path()) .assert() .failure() .stderr(predicates::str::contains(expected_msg)); } pub fn assert_key_to_bytes(private_key: &str, path_to_output_file: &Path) { let input_file = NamedTempFile::new("key_to_bytes.input").unwrap(); input_file.write_str(private_key).unwrap(); jcli_commands::get_key_to_bytes_command(input_file.path(), &path_to_output_file) .assert() .success(); } pub fn assert_key_from_bytes(path_to_input_file: &Path, key_type: &str) -> String { jcli_commands::get_key_from_bytes_command(&path_to_input_file, &key_type) .assert() .success() .get_output() .as_single_line() } pub fn assert_key_from_bytes_fails(path_to_input_file: &Path, key_type: &str, expected_msg: &str) { jcli_commands::get_key_from_bytes_command(&path_to_input_file, &key_type) .assert() .failure() .stderr(predicates::str::contains(expected_msg)); } pub fn assert_key_to_bytes_fails( input_file: &Path, path_to_output_file: &Path, expected_msg: &str, ) { jcli_commands::get_key_to_bytes_command(&input_file, &path_to_output_file) .assert() .failure() .stderr(predicates::str::contains(expected_msg)); } pub fn assert_rest_get_leadership_log(host: &str) -> Vec<LeadershipLog> { let content = jcli_commands::get_rest_leaders_logs_command(&host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).unwrap() } pub fn assert_rest_get_block_tip(host: &str) -> String { jcli_commands::get_rest_block_tip_command(&host) .assert() .success() .get_output() .as_single_line() } pub fn assert_rest_account_get_stats(address: &str, host: &str) -> AccountState { let content = jcli_commands::get_rest_account_stats_command(&address, &host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).unwrap() } pub fn assert_rest_shutdown(host: &str) { jcli_commands::get_rest_shutdown_command(&host) .assert() .success(); } pub fn assert_rest_get_block_by_id(block_id: &str, host: &str) -> String { jcli_commands::get_rest_get_block_command(&block_id, &host) .assert() .success() .get_output() .as_single_line() } pub fn assert_rest_get_next_block_id(block_id: &str, id_count: i32, host: &str) -> Hash { jcli_commands::get_rest_get_next_block_id_command(&block_id, id_count, &host) .assert() .success() .get_output() .as_hash() } pub fn assert_transaction_in_block( transaction_message: &str, jormungandr: &JormungandrProcess, ) -> Hash { let fragment_id = assert_post_transaction(&transaction_message, &jormungandr.rest_uri()); let wait: Wait = Default::default(); wait_until_transaction_processed(fragment_id, jormungandr, &wait).unwrap(); assert_transaction_log_shows_in_block(fragment_id, jormungandr); fragment_id } pub fn assert_transaction_in_block_with_wait( transaction_message: &str, jormungandr: &JormungandrProcess, wait: &Wait, ) -> Hash { let fragment_id = assert_post_transaction(&transaction_message, &jormungandr.rest_uri()); wait_until_transaction_processed(fragment_id, jormungandr, wait).unwrap(); assert_transaction_log_shows_in_block(fragment_id, jormungandr); fragment_id } pub fn assert_transaction_rejected( transaction_message: &str, jormungandr: &JormungandrProcess, expected_reason: &str, ) { let fragment_id = assert_post_transaction(&transaction_message, &jormungandr.rest_uri()); let wait: Wait = Default::default(); wait_until_transaction_processed(fragment_id, jormungandr, &wait).unwrap(); assert_transaction_log_shows_rejected(fragment_id, jormungandr, &expected_reason); } pub fn wait_until_transaction_processed( fragment_id: Hash, jormungandr: &JormungandrProcess, wait: &Wait, ) -> Result<(), Error> { run_process_until_response_matches( jcli_commands::get_rest_message_log_command(&jormungandr.rest_uri()), |output| { let content = output.as_lossy_string(); let fragments: Vec<FragmentLog> = serde_yaml::from_str(&content).expect("Cannot parse fragment logs"); match fragments.iter().find(|x| *x.fragment_id() == fragment_id) { Some(x) => { println!("Transaction found in mempool. {:?}", x); !x.is_pending() } None => { println!("Transaction with hash {} not found in mempool", fragment_id); false } } }, wait.sleep_duration().as_secs(), wait.attempts(), &format!( "Waiting for transaction: '{}' to be inBlock or rejected", fragment_id ), &format!( "transaction: '{}' is pending for too long, Logs: {:?}", fragment_id, jormungandr.logger.get_log_content() ), ) .map_err(|_| Error::TransactionNotInBlock { message_log: format!("{:?}", assert_get_rest_message_log(&jormungandr.rest_uri())), transaction_id: fragment_id, log_content: jormungandr.logger.get_log_content(), }) } pub fn assert_transaction_log_shows_in_block(fragment_id: Hash, jormungandr: &JormungandrProcess) { let fragments = assert_get_rest_message_log(&jormungandr.rest_uri()); match fragments.iter().find(|x| *x.fragment_id() == fragment_id) { Some(x) => assert!( x.is_in_a_block(), "Fragment should be in block, actual: {:?}. Logs: {:?}", &x, jormungandr.logger.get_log_content() ), None => panic!( "cannot find any fragment in rest message log, output: {:?}. Node log: {:?}", &fragments, jormungandr.logger.get_log_content() ), } } pub fn assert_transaction_log_shows_rejected( fragment_id: Hash, jormungandr: &JormungandrProcess, expected_msg: &str, ) { let fragments = assert_get_rest_message_log(&jormungandr.rest_uri()); match fragments.iter().find(|x| *x.fragment_id() == fragment_id) { Some(x) => { assert!( x.is_rejected(), "Fragment should be rejected, actual: {:?}. Logs: {:?}", &x, jormungandr.logger.get_log_content() ); match x.status() { FragmentStatus::Rejected { reason } => assert!(reason.contains(&expected_msg)), _ => panic!("Non expected state for for rejected log"), } } None => panic!( "cannot find any fragment in rest message log, output: {:?}. Logs: {:?}", &fragments, jormungandr.logger.get_log_content() ), } } pub fn send_transactions_and_wait_until_in_block( transactions_messages: &[String], jormungandr: &JormungandrProcess, ) -> Result<(), Error> { for transactions_message in transactions_messages.iter() { assert_post_transaction(&transactions_message, &jormungandr.rest_uri()); } wait_until_all_transactions_processed(&jormungandr)?; check_all_transaction_log_shows_in_block(&jormungandr) } pub fn wait_until_all_transactions_processed( jormungandr: &JormungandrProcess, ) -> Result<(), Error> { run_process_until_response_matches( jcli_commands::get_rest_message_log_command(&jormungandr.rest_uri()), |output| { let content = output.as_lossy_string(); let fragments: Vec<FragmentLog> = serde_yaml::from_str(&content).expect("Cannot parse fragment logs"); let at_least_one_pending = fragments.iter().any(|x| x.is_pending()); !at_least_one_pending }, 1, 5, "Waiting for last transaction to be inBlock or rejected", "transaction is pending for too long", ) .map_err(|_| Error::TransactionsNotInBlock { message_log: format!("{:?}", assert_get_rest_message_log(&jormungandr.rest_uri())), log_content: jormungandr.logger.get_log_content(), }) } pub fn check_all_transaction_log_shows_in_block( jormungandr: &JormungandrProcess, ) -> Result<(), Error> { let fragments = assert_get_rest_message_log(&jormungandr.rest_uri()); for fragment in fragments.iter() { if !fragment.is_in_a_block() { return Err(Error::TransactionNotInBlock { message_log: format!("{:?}", fragments.clone()), transaction_id: *fragment.fragment_id(), log_content: jormungandr.logger.get_log_content(), }); } } Ok(()) } pub fn assert_get_rest_message_log(host: &str) -> Vec<FragmentLog> { let content = jcli_commands::get_rest_message_log_command(&host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).expect("Failed to parse fragment log") } pub fn assert_get_rest_settings(host: &str) -> SettingsDto { let content = jcli_commands::get_rest_settings_command(&host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).expect("Failed to parse settings") } pub fn assert_rest_get_stake_pools(host: &str) -> Vec<String> { let content = jcli_commands::get_stake_pools_command(&host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).expect("Failed to parse stake poools collection") } pub fn assert_rest_get_stake_pool(stake_pool_id: &str, host: &str) -> StakePoolStats { let content = jcli_commands::get_stake_pool_command(&stake_pool_id, &host) .assert() .success() .get_output() .as_lossy_string(); serde_yaml::from_str(&content).expect("Failed to parse stak pool stats") }
{ jcli_commands::get_key_generate_command_default() .assert() .success() .get_output() .as_single_line() }
output.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl CreateTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(&self) -> std::option::Option<&crate::model::TemplateSyncConfig> { self.template_sync_config.as_ref() } } impl std::fmt::Debug for CreateTemplateSyncConfigOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateTemplateSyncConfigOutput"); formatter.field("template_sync_config", &self.template_sync_config); formatter.finish() } } /// See [`CreateTemplateSyncConfigOutput`](crate::output::CreateTemplateSyncConfigOutput) pub mod create_template_sync_config_output { /// A builder for [`CreateTemplateSyncConfigOutput`](crate::output::CreateTemplateSyncConfigOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl Builder { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(mut self, input: crate::model::TemplateSyncConfig) -> Self { self.template_sync_config = Some(input); self } /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn set_template_sync_config( mut self, input: std::option::Option<crate::model::TemplateSyncConfig>, ) -> Self { self.template_sync_config = input; self } /// Consumes the builder and constructs a [`CreateTemplateSyncConfigOutput`](crate::output::CreateTemplateSyncConfigOutput) pub fn build(self) -> crate::output::CreateTemplateSyncConfigOutput { crate::output::CreateTemplateSyncConfigOutput { template_sync_config: self.template_sync_config, } } } } impl CreateTemplateSyncConfigOutput { /// Creates a new builder-style object to manufacture [`CreateTemplateSyncConfigOutput`](crate::output::CreateTemplateSyncConfigOutput) pub fn builder() -> crate::output::create_template_sync_config_output::Builder { crate::output::create_template_sync_config_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl DeleteTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(&self) -> std::option::Option<&crate::model::TemplateSyncConfig> { self.template_sync_config.as_ref() } } impl std::fmt::Debug for DeleteTemplateSyncConfigOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteTemplateSyncConfigOutput"); formatter.field("template_sync_config", &self.template_sync_config); formatter.finish() } } /// See [`DeleteTemplateSyncConfigOutput`](crate::output::DeleteTemplateSyncConfigOutput) pub mod delete_template_sync_config_output { /// A builder for [`DeleteTemplateSyncConfigOutput`](crate::output::DeleteTemplateSyncConfigOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl Builder { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(mut self, input: crate::model::TemplateSyncConfig) -> Self { self.template_sync_config = Some(input); self } /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn set_template_sync_config( mut self, input: std::option::Option<crate::model::TemplateSyncConfig>, ) -> Self { self.template_sync_config = input; self } /// Consumes the builder and constructs a [`DeleteTemplateSyncConfigOutput`](crate::output::DeleteTemplateSyncConfigOutput) pub fn build(self) -> crate::output::DeleteTemplateSyncConfigOutput { crate::output::DeleteTemplateSyncConfigOutput { template_sync_config: self.template_sync_config, } } } } impl DeleteTemplateSyncConfigOutput { /// Creates a new builder-style object to manufacture [`DeleteTemplateSyncConfigOutput`](crate::output::DeleteTemplateSyncConfigOutput) pub fn builder() -> crate::output::delete_template_sync_config_output::Builder { crate::output::delete_template_sync_config_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl UpdateTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(&self) -> std::option::Option<&crate::model::TemplateSyncConfig> { self.template_sync_config.as_ref() } } impl std::fmt::Debug for UpdateTemplateSyncConfigOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateTemplateSyncConfigOutput"); formatter.field("template_sync_config", &self.template_sync_config); formatter.finish() } } /// See [`UpdateTemplateSyncConfigOutput`](crate::output::UpdateTemplateSyncConfigOutput) pub mod update_template_sync_config_output { /// A builder for [`UpdateTemplateSyncConfigOutput`](crate::output::UpdateTemplateSyncConfigOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl Builder { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(mut self, input: crate::model::TemplateSyncConfig) -> Self { self.template_sync_config = Some(input); self } /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn set_template_sync_config( mut self, input: std::option::Option<crate::model::TemplateSyncConfig>, ) -> Self { self.template_sync_config = input; self } /// Consumes the builder and constructs a [`UpdateTemplateSyncConfigOutput`](crate::output::UpdateTemplateSyncConfigOutput) pub fn build(self) -> crate::output::UpdateTemplateSyncConfigOutput { crate::output::UpdateTemplateSyncConfigOutput { template_sync_config: self.template_sync_config, } } } } impl UpdateTemplateSyncConfigOutput { /// Creates a new builder-style object to manufacture [`UpdateTemplateSyncConfigOutput`](crate::output::UpdateTemplateSyncConfigOutput) pub fn builder() -> crate::output::update_template_sync_config_output::Builder { crate::output::update_template_sync_config_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl GetTemplateSyncConfigOutput { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(&self) -> std::option::Option<&crate::model::TemplateSyncConfig> { self.template_sync_config.as_ref() } } impl std::fmt::Debug for GetTemplateSyncConfigOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetTemplateSyncConfigOutput"); formatter.field("template_sync_config", &self.template_sync_config); formatter.finish() } } /// See [`GetTemplateSyncConfigOutput`](crate::output::GetTemplateSyncConfigOutput) pub mod get_template_sync_config_output { /// A builder for [`GetTemplateSyncConfigOutput`](crate::output::GetTemplateSyncConfigOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) template_sync_config: std::option::Option<crate::model::TemplateSyncConfig>, } impl Builder { /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn template_sync_config(mut self, input: crate::model::TemplateSyncConfig) -> Self { self.template_sync_config = Some(input); self } /// <p>The template sync configuration detail data that's returned by Proton.</p> pub fn set_template_sync_config( mut self, input: std::option::Option<crate::model::TemplateSyncConfig>, ) -> Self { self.template_sync_config = input; self } /// Consumes the builder and constructs a [`GetTemplateSyncConfigOutput`](crate::output::GetTemplateSyncConfigOutput) pub fn build(self) -> crate::output::GetTemplateSyncConfigOutput { crate::output::GetTemplateSyncConfigOutput { template_sync_config: self.template_sync_config, } } } } impl GetTemplateSyncConfigOutput { /// Creates a new builder-style object to manufacture [`GetTemplateSyncConfigOutput`](crate::output::GetTemplateSyncConfigOutput) pub fn builder() -> crate::output::get_template_sync_config_output::Builder { crate::output::get_template_sync_config_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServiceTemplateVersionsOutput { /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the /// current requested list of service major or minor versions.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of major or minor versions of a service template with detail data.</p> pub template_versions: std::option::Option<std::vec::Vec<crate::model::ServiceTemplateVersionSummary>>, } impl ListServiceTemplateVersionsOutput { /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the /// current requested list of service major or minor versions.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of major or minor versions of a service template with detail data.</p> pub fn template_versions( &self, ) -> std::option::Option<&[crate::model::ServiceTemplateVersionSummary]> { self.template_versions.as_deref() } } impl std::fmt::Debug for ListServiceTemplateVersionsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServiceTemplateVersionsOutput"); formatter.field("next_token", &self.next_token); formatter.field("template_versions", &self.template_versions); formatter.finish() } } /// See [`ListServiceTemplateVersionsOutput`](crate::output::ListServiceTemplateVersionsOutput) pub mod list_service_template_versions_output { /// A builder for [`ListServiceTemplateVersionsOutput`](crate::output::ListServiceTemplateVersionsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) template_versions: std::option::Option<std::vec::Vec<crate::model::ServiceTemplateVersionSummary>>, } impl Builder { /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the /// current requested list of service major or minor versions.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of a service template, after the /// current requested list of service major or minor versions.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `template_versions`. /// /// To override the contents of this collection use [`set_template_versions`](Self::set_template_versions). /// /// <p>An array of major or minor versions of a service template with detail data.</p> pub fn template_versions( mut self, input: impl Into<crate::model::ServiceTemplateVersionSummary>, ) -> Self { let mut v = self.template_versions.unwrap_or_default(); v.push(input.into()); self.template_versions = Some(v); self } /// <p>An array of major or minor versions of a service template with detail data.</p> pub fn set_template_versions( mut self, input: std::option::Option<std::vec::Vec<crate::model::ServiceTemplateVersionSummary>>, ) -> Self { self.template_versions = input; self } /// Consumes the builder and constructs a [`ListServiceTemplateVersionsOutput`](crate::output::ListServiceTemplateVersionsOutput) pub fn build(self) -> crate::output::ListServiceTemplateVersionsOutput { crate::output::ListServiceTemplateVersionsOutput { next_token: self.next_token, template_versions: self.template_versions, } } } } impl ListServiceTemplateVersionsOutput { /// Creates a new builder-style object to manufacture [`ListServiceTemplateVersionsOutput`](crate::output::ListServiceTemplateVersionsOutput) pub fn builder() -> crate::output::list_service_template_versions_output::Builder { crate::output::list_service_template_versions_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateServiceTemplateVersionOutput { /// <p>The service template version summary of detail data that's returned by Proton.</p> pub service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl CreateServiceTemplateVersionOutput { /// <p>The service template version summary of detail data that's returned by Proton.</p> pub fn service_template_version( &self, ) -> std::option::Option<&crate::model::ServiceTemplateVersion> { self.service_template_version.as_ref() } } impl std::fmt::Debug for CreateServiceTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateServiceTemplateVersionOutput"); formatter.field("service_template_version", &self.service_template_version); formatter.finish() } } /// See [`CreateServiceTemplateVersionOutput`](crate::output::CreateServiceTemplateVersionOutput) pub mod create_service_template_version_output { /// A builder for [`CreateServiceTemplateVersionOutput`](crate::output::CreateServiceTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl Builder { /// <p>The service template version summary of detail data that's returned by Proton.</p> pub fn service_template_version( mut self, input: crate::model::ServiceTemplateVersion, ) -> Self { self.service_template_version = Some(input); self } /// <p>The service template version summary of detail data that's returned by Proton.</p> pub fn set_service_template_version( mut self, input: std::option::Option<crate::model::ServiceTemplateVersion>, ) -> Self { self.service_template_version = input; self } /// Consumes the builder and constructs a [`CreateServiceTemplateVersionOutput`](crate::output::CreateServiceTemplateVersionOutput) pub fn build(self) -> crate::output::CreateServiceTemplateVersionOutput { crate::output::CreateServiceTemplateVersionOutput { service_template_version: self.service_template_version, } } } } impl CreateServiceTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`CreateServiceTemplateVersionOutput`](crate::output::CreateServiceTemplateVersionOutput) pub fn builder() -> crate::output::create_service_template_version_output::Builder { crate::output::create_service_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteServiceTemplateVersionOutput { /// <p>The service template version detail data that's returned by Proton.</p> pub service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl DeleteServiceTemplateVersionOutput { /// <p>The service template version detail data that's returned by Proton.</p> pub fn service_template_version( &self, ) -> std::option::Option<&crate::model::ServiceTemplateVersion> { self.service_template_version.as_ref() } } impl std::fmt::Debug for DeleteServiceTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteServiceTemplateVersionOutput"); formatter.field("service_template_version", &self.service_template_version); formatter.finish() } } /// See [`DeleteServiceTemplateVersionOutput`](crate::output::DeleteServiceTemplateVersionOutput) pub mod delete_service_template_version_output { /// A builder for [`DeleteServiceTemplateVersionOutput`](crate::output::DeleteServiceTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl Builder { /// <p>The service template version detail data that's returned by Proton.</p> pub fn service_template_version( mut self, input: crate::model::ServiceTemplateVersion, ) -> Self { self.service_template_version = Some(input); self } /// <p>The service template version detail data that's returned by Proton.</p> pub fn set_service_template_version( mut self, input: std::option::Option<crate::model::ServiceTemplateVersion>, ) -> Self { self.service_template_version = input; self } /// Consumes the builder and constructs a [`DeleteServiceTemplateVersionOutput`](crate::output::DeleteServiceTemplateVersionOutput) pub fn build(self) -> crate::output::DeleteServiceTemplateVersionOutput { crate::output::DeleteServiceTemplateVersionOutput { service_template_version: self.service_template_version, } } } } impl DeleteServiceTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`DeleteServiceTemplateVersionOutput`](crate::output::DeleteServiceTemplateVersionOutput) pub fn builder() -> crate::output::delete_service_template_version_output::Builder { crate::output::delete_service_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateServiceTemplateVersionOutput { /// <p>The service template version detail data that's returned by Proton.</p> pub service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl UpdateServiceTemplateVersionOutput { /// <p>The service template version detail data that's returned by Proton.</p> pub fn service_template_version( &self, ) -> std::option::Option<&crate::model::ServiceTemplateVersion> { self.service_template_version.as_ref() } } impl std::fmt::Debug for UpdateServiceTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateServiceTemplateVersionOutput"); formatter.field("service_template_version", &self.service_template_version); formatter.finish() } } /// See [`UpdateServiceTemplateVersionOutput`](crate::output::UpdateServiceTemplateVersionOutput) pub mod update_service_template_version_output { /// A builder for [`UpdateServiceTemplateVersionOutput`](crate::output::UpdateServiceTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl Builder { /// <p>The service template version detail data that's returned by Proton.</p> pub fn service_template_version( mut self, input: crate::model::ServiceTemplateVersion, ) -> Self { self.service_template_version = Some(input); self } /// <p>The service template version detail data that's returned by Proton.</p> pub fn set_service_template_version( mut self, input: std::option::Option<crate::model::ServiceTemplateVersion>, ) -> Self { self.service_template_version = input; self } /// Consumes the builder and constructs a [`UpdateServiceTemplateVersionOutput`](crate::output::UpdateServiceTemplateVersionOutput) pub fn build(self) -> crate::output::UpdateServiceTemplateVersionOutput { crate::output::UpdateServiceTemplateVersionOutput { service_template_version: self.service_template_version, } } } } impl UpdateServiceTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`UpdateServiceTemplateVersionOutput`](crate::output::UpdateServiceTemplateVersionOutput) pub fn builder() -> crate::output::update_service_template_version_output::Builder { crate::output::update_service_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetServiceTemplateVersionOutput { /// <p>The service template version detail data that's returned by Proton.</p> pub service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl GetServiceTemplateVersionOutput { /// <p>The service template version detail data that's returned by Proton.</p> pub fn service_template_version( &self, ) -> std::option::Option<&crate::model::ServiceTemplateVersion> { self.service_template_version.as_ref() } } impl std::fmt::Debug for GetServiceTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetServiceTemplateVersionOutput"); formatter.field("service_template_version", &self.service_template_version); formatter.finish() } } /// See [`GetServiceTemplateVersionOutput`](crate::output::GetServiceTemplateVersionOutput) pub mod get_service_template_version_output { /// A builder for [`GetServiceTemplateVersionOutput`](crate::output::GetServiceTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template_version: std::option::Option<crate::model::ServiceTemplateVersion>, } impl Builder { /// <p>The service template version detail data that's returned by Proton.</p> pub fn service_template_version( mut self, input: crate::model::ServiceTemplateVersion, ) -> Self { self.service_template_version = Some(input); self } /// <p>The service template version detail data that's returned by Proton.</p> pub fn set_service_template_version( mut self, input: std::option::Option<crate::model::ServiceTemplateVersion>, ) -> Self { self.service_template_version = input; self } /// Consumes the builder and constructs a [`GetServiceTemplateVersionOutput`](crate::output::GetServiceTemplateVersionOutput) pub fn build(self) -> crate::output::GetServiceTemplateVersionOutput { crate::output::GetServiceTemplateVersionOutput { service_template_version: self.service_template_version, } } } } impl GetServiceTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`GetServiceTemplateVersionOutput`](crate::output::GetServiceTemplateVersionOutput) pub fn builder() -> crate::output::get_service_template_version_output::Builder { crate::output::get_service_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServiceTemplatesOutput { /// <p>A token to indicate the location of the next service template in the array of service templates, after the current requested list of service /// templates.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of service templates with detail data.</p> pub templates: std::option::Option<std::vec::Vec<crate::model::ServiceTemplateSummary>>, } impl ListServiceTemplatesOutput { /// <p>A token to indicate the location of the next service template in the array of service templates, after the current requested list of service /// templates.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of service templates with detail data.</p> pub fn templates(&self) -> std::option::Option<&[crate::model::ServiceTemplateSummary]> { self.templates.as_deref() } } impl std::fmt::Debug for ListServiceTemplatesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServiceTemplatesOutput"); formatter.field("next_token", &self.next_token); formatter.field("templates", &self.templates); formatter.finish() } } /// See [`ListServiceTemplatesOutput`](crate::output::ListServiceTemplatesOutput) pub mod list_service_templates_output { /// A builder for [`ListServiceTemplatesOutput`](crate::output::ListServiceTemplatesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) templates: std::option::Option<std::vec::Vec<crate::model::ServiceTemplateSummary>>, } impl Builder { /// <p>A token to indicate the location of the next service template in the array of service templates, after the current requested list of service /// templates.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next service template in the array of service templates, after the current requested list of service /// templates.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `templates`. /// /// To override the contents of this collection use [`set_templates`](Self::set_templates). /// /// <p>An array of service templates with detail data.</p> pub fn templates(mut self, input: impl Into<crate::model::ServiceTemplateSummary>) -> Self { let mut v = self.templates.unwrap_or_default(); v.push(input.into()); self.templates = Some(v); self } /// <p>An array of service templates with detail data.</p> pub fn set_templates( mut self, input: std::option::Option<std::vec::Vec<crate::model::ServiceTemplateSummary>>, ) -> Self { self.templates = input; self } /// Consumes the builder and constructs a [`ListServiceTemplatesOutput`](crate::output::ListServiceTemplatesOutput) pub fn build(self) -> crate::output::ListServiceTemplatesOutput { crate::output::ListServiceTemplatesOutput { next_token: self.next_token, templates: self.templates, } } } } impl ListServiceTemplatesOutput { /// Creates a new builder-style object to manufacture [`ListServiceTemplatesOutput`](crate::output::ListServiceTemplatesOutput) pub fn builder() -> crate::output::list_service_templates_output::Builder { crate::output::list_service_templates_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub service_template: std::option::Option<crate::model::ServiceTemplate>, } impl CreateServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(&self) -> std::option::Option<&crate::model::ServiceTemplate> { self.service_template.as_ref() } } impl std::fmt::Debug for CreateServiceTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateServiceTemplateOutput"); formatter.field("service_template", &self.service_template); formatter.finish() } } /// See [`CreateServiceTemplateOutput`](crate::output::CreateServiceTemplateOutput) pub mod create_service_template_output { /// A builder for [`CreateServiceTemplateOutput`](crate::output::CreateServiceTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template: std::option::Option<crate::model::ServiceTemplate>, } impl Builder { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(mut self, input: crate::model::ServiceTemplate) -> Self { self.service_template = Some(input); self } /// <p>The service template detail data that's returned by Proton.</p> pub fn set_service_template( mut self, input: std::option::Option<crate::model::ServiceTemplate>, ) -> Self { self.service_template = input; self } /// Consumes the builder and constructs a [`CreateServiceTemplateOutput`](crate::output::CreateServiceTemplateOutput) pub fn build(self) -> crate::output::CreateServiceTemplateOutput { crate::output::CreateServiceTemplateOutput { service_template: self.service_template, } } } } impl CreateServiceTemplateOutput { /// Creates a new builder-style object to manufacture [`CreateServiceTemplateOutput`](crate::output::CreateServiceTemplateOutput) pub fn builder() -> crate::output::create_service_template_output::Builder { crate::output::create_service_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub service_template: std::option::Option<crate::model::ServiceTemplate>, } impl DeleteServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(&self) -> std::option::Option<&crate::model::ServiceTemplate> { self.service_template.as_ref() } } impl std::fmt::Debug for DeleteServiceTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteServiceTemplateOutput"); formatter.field("service_template", &self.service_template); formatter.finish() } } /// See [`DeleteServiceTemplateOutput`](crate::output::DeleteServiceTemplateOutput) pub mod delete_service_template_output { /// A builder for [`DeleteServiceTemplateOutput`](crate::output::DeleteServiceTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template: std::option::Option<crate::model::ServiceTemplate>, } impl Builder { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(mut self, input: crate::model::ServiceTemplate) -> Self { self.service_template = Some(input); self } /// <p>The service template detail data that's returned by Proton.</p> pub fn set_service_template( mut self, input: std::option::Option<crate::model::ServiceTemplate>, ) -> Self { self.service_template = input; self } /// Consumes the builder and constructs a [`DeleteServiceTemplateOutput`](crate::output::DeleteServiceTemplateOutput) pub fn build(self) -> crate::output::DeleteServiceTemplateOutput { crate::output::DeleteServiceTemplateOutput { service_template: self.service_template, } } } } impl DeleteServiceTemplateOutput { /// Creates a new builder-style object to manufacture [`DeleteServiceTemplateOutput`](crate::output::DeleteServiceTemplateOutput) pub fn builder() -> crate::output::delete_service_template_output::Builder { crate::output::delete_service_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub service_template: std::option::Option<crate::model::ServiceTemplate>, } impl UpdateServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(&self) -> std::option::Option<&crate::model::ServiceTemplate> { self.service_template.as_ref() } } impl std::fmt::Debug for UpdateServiceTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateServiceTemplateOutput"); formatter.field("service_template", &self.service_template); formatter.finish() } } /// See [`UpdateServiceTemplateOutput`](crate::output::UpdateServiceTemplateOutput) pub mod update_service_template_output { /// A builder for [`UpdateServiceTemplateOutput`](crate::output::UpdateServiceTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template: std::option::Option<crate::model::ServiceTemplate>, } impl Builder { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(mut self, input: crate::model::ServiceTemplate) -> Self { self.service_template = Some(input); self } /// <p>The service template detail data that's returned by Proton.</p> pub fn set_service_template( mut self, input: std::option::Option<crate::model::ServiceTemplate>, ) -> Self { self.service_template = input; self } /// Consumes the builder and constructs a [`UpdateServiceTemplateOutput`](crate::output::UpdateServiceTemplateOutput) pub fn build(self) -> crate::output::UpdateServiceTemplateOutput { crate::output::UpdateServiceTemplateOutput { service_template: self.service_template, } } } } impl UpdateServiceTemplateOutput { /// Creates a new builder-style object to manufacture [`UpdateServiceTemplateOutput`](crate::output::UpdateServiceTemplateOutput) pub fn builder() -> crate::output::update_service_template_output::Builder { crate::output::update_service_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub service_template: std::option::Option<crate::model::ServiceTemplate>, } impl GetServiceTemplateOutput { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(&self) -> std::option::Option<&crate::model::ServiceTemplate> { self.service_template.as_ref() } } impl std::fmt::Debug for GetServiceTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetServiceTemplateOutput"); formatter.field("service_template", &self.service_template); formatter.finish() } } /// See [`GetServiceTemplateOutput`](crate::output::GetServiceTemplateOutput) pub mod get_service_template_output { /// A builder for [`GetServiceTemplateOutput`](crate::output::GetServiceTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_template: std::option::Option<crate::model::ServiceTemplate>, } impl Builder { /// <p>The service template detail data that's returned by Proton.</p> pub fn service_template(mut self, input: crate::model::ServiceTemplate) -> Self { self.service_template = Some(input); self } /// <p>The service template detail data that's returned by Proton.</p> pub fn set_service_template( mut self, input: std::option::Option<crate::model::ServiceTemplate>, ) -> Self { self.service_template = input; self } /// Consumes the builder and constructs a [`GetServiceTemplateOutput`](crate::output::GetServiceTemplateOutput) pub fn build(self) -> crate::output::GetServiceTemplateOutput { crate::output::GetServiceTemplateOutput { service_template: self.service_template, } } } } impl GetServiceTemplateOutput { /// Creates a new builder-style object to manufacture [`GetServiceTemplateOutput`](crate::output::GetServiceTemplateOutput) pub fn builder() -> crate::output::get_service_template_output::Builder { crate::output::get_service_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServicesOutput { /// <p>A token to indicate the location of the next service in the array of services, after the current requested list of services.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of services with summaries of detail data.</p> pub services: std::option::Option<std::vec::Vec<crate::model::ServiceSummary>>,
pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of services with summaries of detail data.</p> pub fn services(&self) -> std::option::Option<&[crate::model::ServiceSummary]> { self.services.as_deref() } } impl std::fmt::Debug for ListServicesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServicesOutput"); formatter.field("next_token", &self.next_token); formatter.field("services", &self.services); formatter.finish() } } /// See [`ListServicesOutput`](crate::output::ListServicesOutput) pub mod list_services_output { /// A builder for [`ListServicesOutput`](crate::output::ListServicesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) services: std::option::Option<std::vec::Vec<crate::model::ServiceSummary>>, } impl Builder { /// <p>A token to indicate the location of the next service in the array of services, after the current requested list of services.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next service in the array of services, after the current requested list of services.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `services`. /// /// To override the contents of this collection use [`set_services`](Self::set_services). /// /// <p>An array of services with summaries of detail data.</p> pub fn services(mut self, input: impl Into<crate::model::ServiceSummary>) -> Self { let mut v = self.services.unwrap_or_default(); v.push(input.into()); self.services = Some(v); self } /// <p>An array of services with summaries of detail data.</p> pub fn set_services( mut self, input: std::option::Option<std::vec::Vec<crate::model::ServiceSummary>>, ) -> Self { self.services = input; self } /// Consumes the builder and constructs a [`ListServicesOutput`](crate::output::ListServicesOutput) pub fn build(self) -> crate::output::ListServicesOutput { crate::output::ListServicesOutput { next_token: self.next_token, services: self.services, } } } } impl ListServicesOutput { /// Creates a new builder-style object to manufacture [`ListServicesOutput`](crate::output::ListServicesOutput) pub fn builder() -> crate::output::list_services_output::Builder { crate::output::list_services_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub service: std::option::Option<crate::model::Service>, } impl CreateServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub fn service(&self) -> std::option::Option<&crate::model::Service> { self.service.as_ref() } } impl std::fmt::Debug for CreateServiceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateServiceOutput"); formatter.field("service", &self.service); formatter.finish() } } /// See [`CreateServiceOutput`](crate::output::CreateServiceOutput) pub mod create_service_output { /// A builder for [`CreateServiceOutput`](crate::output::CreateServiceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service: std::option::Option<crate::model::Service>, } impl Builder { /// <p>The service detail data that's returned by Proton.</p> pub fn service(mut self, input: crate::model::Service) -> Self { self.service = Some(input); self } /// <p>The service detail data that's returned by Proton.</p> pub fn set_service(mut self, input: std::option::Option<crate::model::Service>) -> Self { self.service = input; self } /// Consumes the builder and constructs a [`CreateServiceOutput`](crate::output::CreateServiceOutput) pub fn build(self) -> crate::output::CreateServiceOutput { crate::output::CreateServiceOutput { service: self.service, } } } } impl CreateServiceOutput { /// Creates a new builder-style object to manufacture [`CreateServiceOutput`](crate::output::CreateServiceOutput) pub fn builder() -> crate::output::create_service_output::Builder { crate::output::create_service_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub service: std::option::Option<crate::model::Service>, } impl DeleteServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub fn service(&self) -> std::option::Option<&crate::model::Service> { self.service.as_ref() } } impl std::fmt::Debug for DeleteServiceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteServiceOutput"); formatter.field("service", &self.service); formatter.finish() } } /// See [`DeleteServiceOutput`](crate::output::DeleteServiceOutput) pub mod delete_service_output { /// A builder for [`DeleteServiceOutput`](crate::output::DeleteServiceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service: std::option::Option<crate::model::Service>, } impl Builder { /// <p>The service detail data that's returned by Proton.</p> pub fn service(mut self, input: crate::model::Service) -> Self { self.service = Some(input); self } /// <p>The service detail data that's returned by Proton.</p> pub fn set_service(mut self, input: std::option::Option<crate::model::Service>) -> Self { self.service = input; self } /// Consumes the builder and constructs a [`DeleteServiceOutput`](crate::output::DeleteServiceOutput) pub fn build(self) -> crate::output::DeleteServiceOutput { crate::output::DeleteServiceOutput { service: self.service, } } } } impl DeleteServiceOutput { /// Creates a new builder-style object to manufacture [`DeleteServiceOutput`](crate::output::DeleteServiceOutput) pub fn builder() -> crate::output::delete_service_output::Builder { crate::output::delete_service_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub service: std::option::Option<crate::model::Service>, } impl UpdateServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub fn service(&self) -> std::option::Option<&crate::model::Service> { self.service.as_ref() } } impl std::fmt::Debug for UpdateServiceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateServiceOutput"); formatter.field("service", &self.service); formatter.finish() } } /// See [`UpdateServiceOutput`](crate::output::UpdateServiceOutput) pub mod update_service_output { /// A builder for [`UpdateServiceOutput`](crate::output::UpdateServiceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service: std::option::Option<crate::model::Service>, } impl Builder { /// <p>The service detail data that's returned by Proton.</p> pub fn service(mut self, input: crate::model::Service) -> Self { self.service = Some(input); self } /// <p>The service detail data that's returned by Proton.</p> pub fn set_service(mut self, input: std::option::Option<crate::model::Service>) -> Self { self.service = input; self } /// Consumes the builder and constructs a [`UpdateServiceOutput`](crate::output::UpdateServiceOutput) pub fn build(self) -> crate::output::UpdateServiceOutput { crate::output::UpdateServiceOutput { service: self.service, } } } } impl UpdateServiceOutput { /// Creates a new builder-style object to manufacture [`UpdateServiceOutput`](crate::output::UpdateServiceOutput) pub fn builder() -> crate::output::update_service_output::Builder { crate::output::update_service_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub service: std::option::Option<crate::model::Service>, } impl GetServiceOutput { /// <p>The service detail data that's returned by Proton.</p> pub fn service(&self) -> std::option::Option<&crate::model::Service> { self.service.as_ref() } } impl std::fmt::Debug for GetServiceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetServiceOutput"); formatter.field("service", &self.service); formatter.finish() } } /// See [`GetServiceOutput`](crate::output::GetServiceOutput) pub mod get_service_output { /// A builder for [`GetServiceOutput`](crate::output::GetServiceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service: std::option::Option<crate::model::Service>, } impl Builder { /// <p>The service detail data that's returned by Proton.</p> pub fn service(mut self, input: crate::model::Service) -> Self { self.service = Some(input); self } /// <p>The service detail data that's returned by Proton.</p> pub fn set_service(mut self, input: std::option::Option<crate::model::Service>) -> Self { self.service = input; self } /// Consumes the builder and constructs a [`GetServiceOutput`](crate::output::GetServiceOutput) pub fn build(self) -> crate::output::GetServiceOutput { crate::output::GetServiceOutput { service: self.service, } } } } impl GetServiceOutput { /// Creates a new builder-style object to manufacture [`GetServiceOutput`](crate::output::GetServiceOutput) pub fn builder() -> crate::output::get_service_output::Builder { crate::output::get_service_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateServicePipelineOutput { /// <p>The pipeline details returned by Proton.</p> pub pipeline: std::option::Option<crate::model::ServicePipeline>, } impl UpdateServicePipelineOutput { /// <p>The pipeline details returned by Proton.</p> pub fn pipeline(&self) -> std::option::Option<&crate::model::ServicePipeline> { self.pipeline.as_ref() } } impl std::fmt::Debug for UpdateServicePipelineOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateServicePipelineOutput"); formatter.field("pipeline", &self.pipeline); formatter.finish() } } /// See [`UpdateServicePipelineOutput`](crate::output::UpdateServicePipelineOutput) pub mod update_service_pipeline_output { /// A builder for [`UpdateServicePipelineOutput`](crate::output::UpdateServicePipelineOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) pipeline: std::option::Option<crate::model::ServicePipeline>, } impl Builder { /// <p>The pipeline details returned by Proton.</p> pub fn pipeline(mut self, input: crate::model::ServicePipeline) -> Self { self.pipeline = Some(input); self } /// <p>The pipeline details returned by Proton.</p> pub fn set_pipeline( mut self, input: std::option::Option<crate::model::ServicePipeline>, ) -> Self { self.pipeline = input; self } /// Consumes the builder and constructs a [`UpdateServicePipelineOutput`](crate::output::UpdateServicePipelineOutput) pub fn build(self) -> crate::output::UpdateServicePipelineOutput { crate::output::UpdateServicePipelineOutput { pipeline: self.pipeline, } } } } impl UpdateServicePipelineOutput { /// Creates a new builder-style object to manufacture [`UpdateServicePipelineOutput`](crate::output::UpdateServicePipelineOutput) pub fn builder() -> crate::output::update_service_pipeline_output::Builder { crate::output::update_service_pipeline_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServicePipelineProvisionedResourcesOutput { /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of provisioned resources for a service and pipeline.</p> pub provisioned_resources: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, } impl ListServicePipelineProvisionedResourcesOutput { /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of provisioned resources for a service and pipeline.</p> pub fn provisioned_resources( &self, ) -> std::option::Option<&[crate::model::ProvisionedResource]> { self.provisioned_resources.as_deref() } } impl std::fmt::Debug for ListServicePipelineProvisionedResourcesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServicePipelineProvisionedResourcesOutput"); formatter.field("next_token", &self.next_token); formatter.field("provisioned_resources", &self.provisioned_resources); formatter.finish() } } /// See [`ListServicePipelineProvisionedResourcesOutput`](crate::output::ListServicePipelineProvisionedResourcesOutput) pub mod list_service_pipeline_provisioned_resources_output { /// A builder for [`ListServicePipelineProvisionedResourcesOutput`](crate::output::ListServicePipelineProvisionedResourcesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) provisioned_resources: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, } impl Builder { /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `provisioned_resources`. /// /// To override the contents of this collection use [`set_provisioned_resources`](Self::set_provisioned_resources). /// /// <p>An array of provisioned resources for a service and pipeline.</p> pub fn provisioned_resources( mut self, input: impl Into<crate::model::ProvisionedResource>, ) -> Self { let mut v = self.provisioned_resources.unwrap_or_default(); v.push(input.into()); self.provisioned_resources = Some(v); self } /// <p>An array of provisioned resources for a service and pipeline.</p> pub fn set_provisioned_resources( mut self, input: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, ) -> Self { self.provisioned_resources = input; self } /// Consumes the builder and constructs a [`ListServicePipelineProvisionedResourcesOutput`](crate::output::ListServicePipelineProvisionedResourcesOutput) pub fn build(self) -> crate::output::ListServicePipelineProvisionedResourcesOutput { crate::output::ListServicePipelineProvisionedResourcesOutput { next_token: self.next_token, provisioned_resources: self.provisioned_resources, } } } } impl ListServicePipelineProvisionedResourcesOutput { /// Creates a new builder-style object to manufacture [`ListServicePipelineProvisionedResourcesOutput`](crate::output::ListServicePipelineProvisionedResourcesOutput) pub fn builder() -> crate::output::list_service_pipeline_provisioned_resources_output::Builder { crate::output::list_service_pipeline_provisioned_resources_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServicePipelineOutputsOutput { /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of outputs.</p> pub outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, } impl ListServicePipelineOutputsOutput { /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of outputs.</p> pub fn outputs(&self) -> std::option::Option<&[crate::model::Output]> { self.outputs.as_deref() } } impl std::fmt::Debug for ListServicePipelineOutputsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServicePipelineOutputsOutput"); formatter.field("next_token", &self.next_token); formatter.field("outputs", &self.outputs); formatter.finish() } } /// See [`ListServicePipelineOutputsOutput`](crate::output::ListServicePipelineOutputsOutput) pub mod list_service_pipeline_outputs_output { /// A builder for [`ListServicePipelineOutputsOutput`](crate::output::ListServicePipelineOutputsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, } impl Builder { /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `outputs`. /// /// To override the contents of this collection use [`set_outputs`](Self::set_outputs). /// /// <p>An array of outputs.</p> pub fn outputs(mut self, input: impl Into<crate::model::Output>) -> Self { let mut v = self.outputs.unwrap_or_default(); v.push(input.into()); self.outputs = Some(v); self } /// <p>An array of outputs.</p> pub fn set_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::Output>>, ) -> Self { self.outputs = input; self } /// Consumes the builder and constructs a [`ListServicePipelineOutputsOutput`](crate::output::ListServicePipelineOutputsOutput) pub fn build(self) -> crate::output::ListServicePipelineOutputsOutput { crate::output::ListServicePipelineOutputsOutput { next_token: self.next_token, outputs: self.outputs, } } } } impl ListServicePipelineOutputsOutput { /// Creates a new builder-style object to manufacture [`ListServicePipelineOutputsOutput`](crate::output::ListServicePipelineOutputsOutput) pub fn builder() -> crate::output::list_service_pipeline_outputs_output::Builder { crate::output::list_service_pipeline_outputs_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServiceInstancesOutput { /// <p>A token to indicate the location of the next service instance in the array of service instances, after the current requested list of service /// instances.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of service instances with summaries of detail data.</p> pub service_instances: std::option::Option<std::vec::Vec<crate::model::ServiceInstanceSummary>>, } impl ListServiceInstancesOutput { /// <p>A token to indicate the location of the next service instance in the array of service instances, after the current requested list of service /// instances.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of service instances with summaries of detail data.</p> pub fn service_instances( &self, ) -> std::option::Option<&[crate::model::ServiceInstanceSummary]> { self.service_instances.as_deref() } } impl std::fmt::Debug for ListServiceInstancesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServiceInstancesOutput"); formatter.field("next_token", &self.next_token); formatter.field("service_instances", &self.service_instances); formatter.finish() } } /// See [`ListServiceInstancesOutput`](crate::output::ListServiceInstancesOutput) pub mod list_service_instances_output { /// A builder for [`ListServiceInstancesOutput`](crate::output::ListServiceInstancesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) service_instances: std::option::Option<std::vec::Vec<crate::model::ServiceInstanceSummary>>, } impl Builder { /// <p>A token to indicate the location of the next service instance in the array of service instances, after the current requested list of service /// instances.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next service instance in the array of service instances, after the current requested list of service /// instances.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `service_instances`. /// /// To override the contents of this collection use [`set_service_instances`](Self::set_service_instances). /// /// <p>An array of service instances with summaries of detail data.</p> pub fn service_instances( mut self, input: impl Into<crate::model::ServiceInstanceSummary>, ) -> Self { let mut v = self.service_instances.unwrap_or_default(); v.push(input.into()); self.service_instances = Some(v); self } /// <p>An array of service instances with summaries of detail data.</p> pub fn set_service_instances( mut self, input: std::option::Option<std::vec::Vec<crate::model::ServiceInstanceSummary>>, ) -> Self { self.service_instances = input; self } /// Consumes the builder and constructs a [`ListServiceInstancesOutput`](crate::output::ListServiceInstancesOutput) pub fn build(self) -> crate::output::ListServiceInstancesOutput { crate::output::ListServiceInstancesOutput { next_token: self.next_token, service_instances: self.service_instances, } } } } impl ListServiceInstancesOutput { /// Creates a new builder-style object to manufacture [`ListServiceInstancesOutput`](crate::output::ListServiceInstancesOutput) pub fn builder() -> crate::output::list_service_instances_output::Builder { crate::output::list_service_instances_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateServiceInstanceOutput { /// <p>The service instance summary data returned by Proton.</p> pub service_instance: std::option::Option<crate::model::ServiceInstance>, } impl UpdateServiceInstanceOutput { /// <p>The service instance summary data returned by Proton.</p> pub fn service_instance(&self) -> std::option::Option<&crate::model::ServiceInstance> { self.service_instance.as_ref() } } impl std::fmt::Debug for UpdateServiceInstanceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateServiceInstanceOutput"); formatter.field("service_instance", &self.service_instance); formatter.finish() } } /// See [`UpdateServiceInstanceOutput`](crate::output::UpdateServiceInstanceOutput) pub mod update_service_instance_output { /// A builder for [`UpdateServiceInstanceOutput`](crate::output::UpdateServiceInstanceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_instance: std::option::Option<crate::model::ServiceInstance>, } impl Builder { /// <p>The service instance summary data returned by Proton.</p> pub fn service_instance(mut self, input: crate::model::ServiceInstance) -> Self { self.service_instance = Some(input); self } /// <p>The service instance summary data returned by Proton.</p> pub fn set_service_instance( mut self, input: std::option::Option<crate::model::ServiceInstance>, ) -> Self { self.service_instance = input; self } /// Consumes the builder and constructs a [`UpdateServiceInstanceOutput`](crate::output::UpdateServiceInstanceOutput) pub fn build(self) -> crate::output::UpdateServiceInstanceOutput { crate::output::UpdateServiceInstanceOutput { service_instance: self.service_instance, } } } } impl UpdateServiceInstanceOutput { /// Creates a new builder-style object to manufacture [`UpdateServiceInstanceOutput`](crate::output::UpdateServiceInstanceOutput) pub fn builder() -> crate::output::update_service_instance_output::Builder { crate::output::update_service_instance_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetServiceInstanceOutput { /// <p>The service instance detail data that's returned by Proton.</p> pub service_instance: std::option::Option<crate::model::ServiceInstance>, } impl GetServiceInstanceOutput { /// <p>The service instance detail data that's returned by Proton.</p> pub fn service_instance(&self) -> std::option::Option<&crate::model::ServiceInstance> { self.service_instance.as_ref() } } impl std::fmt::Debug for GetServiceInstanceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetServiceInstanceOutput"); formatter.field("service_instance", &self.service_instance); formatter.finish() } } /// See [`GetServiceInstanceOutput`](crate::output::GetServiceInstanceOutput) pub mod get_service_instance_output { /// A builder for [`GetServiceInstanceOutput`](crate::output::GetServiceInstanceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_instance: std::option::Option<crate::model::ServiceInstance>, } impl Builder { /// <p>The service instance detail data that's returned by Proton.</p> pub fn service_instance(mut self, input: crate::model::ServiceInstance) -> Self { self.service_instance = Some(input); self } /// <p>The service instance detail data that's returned by Proton.</p> pub fn set_service_instance( mut self, input: std::option::Option<crate::model::ServiceInstance>, ) -> Self { self.service_instance = input; self } /// Consumes the builder and constructs a [`GetServiceInstanceOutput`](crate::output::GetServiceInstanceOutput) pub fn build(self) -> crate::output::GetServiceInstanceOutput { crate::output::GetServiceInstanceOutput { service_instance: self.service_instance, } } } } impl GetServiceInstanceOutput { /// Creates a new builder-style object to manufacture [`GetServiceInstanceOutput`](crate::output::GetServiceInstanceOutput) pub fn builder() -> crate::output::get_service_instance_output::Builder { crate::output::get_service_instance_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServiceInstanceProvisionedResourcesOutput { /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of provisioned resources for a service instance.</p> pub provisioned_resources: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, } impl ListServiceInstanceProvisionedResourcesOutput { /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of provisioned resources for a service instance.</p> pub fn provisioned_resources( &self, ) -> std::option::Option<&[crate::model::ProvisionedResource]> { self.provisioned_resources.as_deref() } } impl std::fmt::Debug for ListServiceInstanceProvisionedResourcesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServiceInstanceProvisionedResourcesOutput"); formatter.field("next_token", &self.next_token); formatter.field("provisioned_resources", &self.provisioned_resources); formatter.finish() } } /// See [`ListServiceInstanceProvisionedResourcesOutput`](crate::output::ListServiceInstanceProvisionedResourcesOutput) pub mod list_service_instance_provisioned_resources_output { /// A builder for [`ListServiceInstanceProvisionedResourcesOutput`](crate::output::ListServiceInstanceProvisionedResourcesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) provisioned_resources: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, } impl Builder { /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next provisioned resource in the array of provisioned resources, after the current requested list of /// provisioned resources.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `provisioned_resources`. /// /// To override the contents of this collection use [`set_provisioned_resources`](Self::set_provisioned_resources). /// /// <p>An array of provisioned resources for a service instance.</p> pub fn provisioned_resources( mut self, input: impl Into<crate::model::ProvisionedResource>, ) -> Self { let mut v = self.provisioned_resources.unwrap_or_default(); v.push(input.into()); self.provisioned_resources = Some(v); self } /// <p>An array of provisioned resources for a service instance.</p> pub fn set_provisioned_resources( mut self, input: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, ) -> Self { self.provisioned_resources = input; self } /// Consumes the builder and constructs a [`ListServiceInstanceProvisionedResourcesOutput`](crate::output::ListServiceInstanceProvisionedResourcesOutput) pub fn build(self) -> crate::output::ListServiceInstanceProvisionedResourcesOutput { crate::output::ListServiceInstanceProvisionedResourcesOutput { next_token: self.next_token, provisioned_resources: self.provisioned_resources, } } } } impl ListServiceInstanceProvisionedResourcesOutput { /// Creates a new builder-style object to manufacture [`ListServiceInstanceProvisionedResourcesOutput`](crate::output::ListServiceInstanceProvisionedResourcesOutput) pub fn builder() -> crate::output::list_service_instance_provisioned_resources_output::Builder { crate::output::list_service_instance_provisioned_resources_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListServiceInstanceOutputsOutput { /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of service instance infrastructure as code outputs.</p> pub outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, } impl ListServiceInstanceOutputsOutput { /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of service instance infrastructure as code outputs.</p> pub fn outputs(&self) -> std::option::Option<&[crate::model::Output]> { self.outputs.as_deref() } } impl std::fmt::Debug for ListServiceInstanceOutputsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListServiceInstanceOutputsOutput"); formatter.field("next_token", &self.next_token); formatter.field("outputs", &self.outputs); formatter.finish() } } /// See [`ListServiceInstanceOutputsOutput`](crate::output::ListServiceInstanceOutputsOutput) pub mod list_service_instance_outputs_output { /// A builder for [`ListServiceInstanceOutputsOutput`](crate::output::ListServiceInstanceOutputsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, } impl Builder { /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next output in the array of outputs, after the current requested list of outputs.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `outputs`. /// /// To override the contents of this collection use [`set_outputs`](Self::set_outputs). /// /// <p>An array of service instance infrastructure as code outputs.</p> pub fn outputs(mut self, input: impl Into<crate::model::Output>) -> Self { let mut v = self.outputs.unwrap_or_default(); v.push(input.into()); self.outputs = Some(v); self } /// <p>An array of service instance infrastructure as code outputs.</p> pub fn set_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::Output>>, ) -> Self { self.outputs = input; self } /// Consumes the builder and constructs a [`ListServiceInstanceOutputsOutput`](crate::output::ListServiceInstanceOutputsOutput) pub fn build(self) -> crate::output::ListServiceInstanceOutputsOutput { crate::output::ListServiceInstanceOutputsOutput { next_token: self.next_token, outputs: self.outputs, } } } } impl ListServiceInstanceOutputsOutput { /// Creates a new builder-style object to manufacture [`ListServiceInstanceOutputsOutput`](crate::output::ListServiceInstanceOutputsOutput) pub fn builder() -> crate::output::list_service_instance_outputs_output::Builder { crate::output::list_service_instance_outputs_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListRepositoriesOutput { /// <p>A token to indicate the location of the next repository in the array of repositories, after the current requested list of repositories. /// </p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of repositories.</p> pub repositories: std::option::Option<std::vec::Vec<crate::model::RepositorySummary>>, } impl ListRepositoriesOutput { /// <p>A token to indicate the location of the next repository in the array of repositories, after the current requested list of repositories. /// </p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of repositories.</p> pub fn repositories(&self) -> std::option::Option<&[crate::model::RepositorySummary]> { self.repositories.as_deref() } } impl std::fmt::Debug for ListRepositoriesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListRepositoriesOutput"); formatter.field("next_token", &self.next_token); formatter.field("repositories", &self.repositories); formatter.finish() } } /// See [`ListRepositoriesOutput`](crate::output::ListRepositoriesOutput) pub mod list_repositories_output { /// A builder for [`ListRepositoriesOutput`](crate::output::ListRepositoriesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) repositories: std::option::Option<std::vec::Vec<crate::model::RepositorySummary>>, } impl Builder { /// <p>A token to indicate the location of the next repository in the array of repositories, after the current requested list of repositories. /// </p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next repository in the array of repositories, after the current requested list of repositories. /// </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `repositories`. /// /// To override the contents of this collection use [`set_repositories`](Self::set_repositories). /// /// <p>An array of repositories.</p> pub fn repositories(mut self, input: impl Into<crate::model::RepositorySummary>) -> Self { let mut v = self.repositories.unwrap_or_default(); v.push(input.into()); self.repositories = Some(v); self } /// <p>An array of repositories.</p> pub fn set_repositories( mut self, input: std::option::Option<std::vec::Vec<crate::model::RepositorySummary>>, ) -> Self { self.repositories = input; self } /// Consumes the builder and constructs a [`ListRepositoriesOutput`](crate::output::ListRepositoriesOutput) pub fn build(self) -> crate::output::ListRepositoriesOutput { crate::output::ListRepositoriesOutput { next_token: self.next_token, repositories: self.repositories, } } } } impl ListRepositoriesOutput { /// Creates a new builder-style object to manufacture [`ListRepositoriesOutput`](crate::output::ListRepositoriesOutput) pub fn builder() -> crate::output::list_repositories_output::Builder { crate::output::list_repositories_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateRepositoryOutput { /// <p>The repository detail data that's returned by Proton.</p> pub repository: std::option::Option<crate::model::Repository>, } impl CreateRepositoryOutput { /// <p>The repository detail data that's returned by Proton.</p> pub fn repository(&self) -> std::option::Option<&crate::model::Repository> { self.repository.as_ref() } } impl std::fmt::Debug for CreateRepositoryOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateRepositoryOutput"); formatter.field("repository", &self.repository); formatter.finish() } } /// See [`CreateRepositoryOutput`](crate::output::CreateRepositoryOutput) pub mod create_repository_output { /// A builder for [`CreateRepositoryOutput`](crate::output::CreateRepositoryOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) repository: std::option::Option<crate::model::Repository>, } impl Builder { /// <p>The repository detail data that's returned by Proton.</p> pub fn repository(mut self, input: crate::model::Repository) -> Self { self.repository = Some(input); self } /// <p>The repository detail data that's returned by Proton.</p> pub fn set_repository( mut self, input: std::option::Option<crate::model::Repository>, ) -> Self { self.repository = input; self } /// Consumes the builder and constructs a [`CreateRepositoryOutput`](crate::output::CreateRepositoryOutput) pub fn build(self) -> crate::output::CreateRepositoryOutput { crate::output::CreateRepositoryOutput { repository: self.repository, } } } } impl CreateRepositoryOutput { /// Creates a new builder-style object to manufacture [`CreateRepositoryOutput`](crate::output::CreateRepositoryOutput) pub fn builder() -> crate::output::create_repository_output::Builder { crate::output::create_repository_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteRepositoryOutput { /// <p>The repository detail data that's returned by Proton.</p> pub repository: std::option::Option<crate::model::Repository>, } impl DeleteRepositoryOutput { /// <p>The repository detail data that's returned by Proton.</p> pub fn repository(&self) -> std::option::Option<&crate::model::Repository> { self.repository.as_ref() } } impl std::fmt::Debug for DeleteRepositoryOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteRepositoryOutput"); formatter.field("repository", &self.repository); formatter.finish() } } /// See [`DeleteRepositoryOutput`](crate::output::DeleteRepositoryOutput) pub mod delete_repository_output { /// A builder for [`DeleteRepositoryOutput`](crate::output::DeleteRepositoryOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) repository: std::option::Option<crate::model::Repository>, } impl Builder { /// <p>The repository detail data that's returned by Proton.</p> pub fn repository(mut self, input: crate::model::Repository) -> Self { self.repository = Some(input); self } /// <p>The repository detail data that's returned by Proton.</p> pub fn set_repository( mut self, input: std::option::Option<crate::model::Repository>, ) -> Self { self.repository = input; self } /// Consumes the builder and constructs a [`DeleteRepositoryOutput`](crate::output::DeleteRepositoryOutput) pub fn build(self) -> crate::output::DeleteRepositoryOutput { crate::output::DeleteRepositoryOutput { repository: self.repository, } } } } impl DeleteRepositoryOutput { /// Creates a new builder-style object to manufacture [`DeleteRepositoryOutput`](crate::output::DeleteRepositoryOutput) pub fn builder() -> crate::output::delete_repository_output::Builder { crate::output::delete_repository_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetRepositoryOutput { /// <p>The repository detail data that's returned by Proton.</p> pub repository: std::option::Option<crate::model::Repository>, } impl GetRepositoryOutput { /// <p>The repository detail data that's returned by Proton.</p> pub fn repository(&self) -> std::option::Option<&crate::model::Repository> { self.repository.as_ref() } } impl std::fmt::Debug for GetRepositoryOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetRepositoryOutput"); formatter.field("repository", &self.repository); formatter.finish() } } /// See [`GetRepositoryOutput`](crate::output::GetRepositoryOutput) pub mod get_repository_output { /// A builder for [`GetRepositoryOutput`](crate::output::GetRepositoryOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) repository: std::option::Option<crate::model::Repository>, } impl Builder { /// <p>The repository detail data that's returned by Proton.</p> pub fn repository(mut self, input: crate::model::Repository) -> Self { self.repository = Some(input); self } /// <p>The repository detail data that's returned by Proton.</p> pub fn set_repository( mut self, input: std::option::Option<crate::model::Repository>, ) -> Self { self.repository = input; self } /// Consumes the builder and constructs a [`GetRepositoryOutput`](crate::output::GetRepositoryOutput) pub fn build(self) -> crate::output::GetRepositoryOutput { crate::output::GetRepositoryOutput { repository: self.repository, } } } } impl GetRepositoryOutput { /// Creates a new builder-style object to manufacture [`GetRepositoryOutput`](crate::output::GetRepositoryOutput) pub fn builder() -> crate::output::get_repository_output::Builder { crate::output::get_repository_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListEnvironmentTemplateVersionsOutput { /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of an environment template, after /// the list of major or minor versions that was previously requested.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of major or minor versions of an environment template detail data.</p> pub template_versions: std::option::Option<std::vec::Vec<crate::model::EnvironmentTemplateVersionSummary>>, } impl ListEnvironmentTemplateVersionsOutput { /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of an environment template, after /// the list of major or minor versions that was previously requested.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of major or minor versions of an environment template detail data.</p> pub fn template_versions( &self, ) -> std::option::Option<&[crate::model::EnvironmentTemplateVersionSummary]> { self.template_versions.as_deref() } } impl std::fmt::Debug for ListEnvironmentTemplateVersionsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListEnvironmentTemplateVersionsOutput"); formatter.field("next_token", &self.next_token); formatter.field("template_versions", &self.template_versions); formatter.finish() } } /// See [`ListEnvironmentTemplateVersionsOutput`](crate::output::ListEnvironmentTemplateVersionsOutput) pub mod list_environment_template_versions_output { /// A builder for [`ListEnvironmentTemplateVersionsOutput`](crate::output::ListEnvironmentTemplateVersionsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) template_versions: std::option::Option<std::vec::Vec<crate::model::EnvironmentTemplateVersionSummary>>, } impl Builder { /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of an environment template, after /// the list of major or minor versions that was previously requested.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next major or minor version in the array of major or minor versions of an environment template, after /// the list of major or minor versions that was previously requested.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `template_versions`. /// /// To override the contents of this collection use [`set_template_versions`](Self::set_template_versions). /// /// <p>An array of major or minor versions of an environment template detail data.</p> pub fn template_versions( mut self, input: impl Into<crate::model::EnvironmentTemplateVersionSummary>, ) -> Self { let mut v = self.template_versions.unwrap_or_default(); v.push(input.into()); self.template_versions = Some(v); self } /// <p>An array of major or minor versions of an environment template detail data.</p> pub fn set_template_versions( mut self, input: std::option::Option< std::vec::Vec<crate::model::EnvironmentTemplateVersionSummary>, >, ) -> Self { self.template_versions = input; self } /// Consumes the builder and constructs a [`ListEnvironmentTemplateVersionsOutput`](crate::output::ListEnvironmentTemplateVersionsOutput) pub fn build(self) -> crate::output::ListEnvironmentTemplateVersionsOutput { crate::output::ListEnvironmentTemplateVersionsOutput { next_token: self.next_token, template_versions: self.template_versions, } } } } impl ListEnvironmentTemplateVersionsOutput { /// Creates a new builder-style object to manufacture [`ListEnvironmentTemplateVersionsOutput`](crate::output::ListEnvironmentTemplateVersionsOutput) pub fn builder() -> crate::output::list_environment_template_versions_output::Builder { crate::output::list_environment_template_versions_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateEnvironmentTemplateVersionOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl CreateEnvironmentTemplateVersionOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template_version( &self, ) -> std::option::Option<&crate::model::EnvironmentTemplateVersion> { self.environment_template_version.as_ref() } } impl std::fmt::Debug for CreateEnvironmentTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateEnvironmentTemplateVersionOutput"); formatter.field( "environment_template_version", &self.environment_template_version, ); formatter.finish() } } /// See [`CreateEnvironmentTemplateVersionOutput`](crate::output::CreateEnvironmentTemplateVersionOutput) pub mod create_environment_template_version_output { /// A builder for [`CreateEnvironmentTemplateVersionOutput`](crate::output::CreateEnvironmentTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl Builder { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template_version( mut self, input: crate::model::EnvironmentTemplateVersion, ) -> Self { self.environment_template_version = Some(input); self } /// <p>The environment template detail data that's returned by Proton.</p> pub fn set_environment_template_version( mut self, input: std::option::Option<crate::model::EnvironmentTemplateVersion>, ) -> Self { self.environment_template_version = input; self } /// Consumes the builder and constructs a [`CreateEnvironmentTemplateVersionOutput`](crate::output::CreateEnvironmentTemplateVersionOutput) pub fn build(self) -> crate::output::CreateEnvironmentTemplateVersionOutput { crate::output::CreateEnvironmentTemplateVersionOutput { environment_template_version: self.environment_template_version, } } } } impl CreateEnvironmentTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`CreateEnvironmentTemplateVersionOutput`](crate::output::CreateEnvironmentTemplateVersionOutput) pub fn builder() -> crate::output::create_environment_template_version_output::Builder { crate::output::create_environment_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteEnvironmentTemplateVersionOutput { /// <p>The environment template version detail data that's returned by Proton.</p> pub environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl DeleteEnvironmentTemplateVersionOutput { /// <p>The environment template version detail data that's returned by Proton.</p> pub fn environment_template_version( &self, ) -> std::option::Option<&crate::model::EnvironmentTemplateVersion> { self.environment_template_version.as_ref() } } impl std::fmt::Debug for DeleteEnvironmentTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteEnvironmentTemplateVersionOutput"); formatter.field( "environment_template_version", &self.environment_template_version, ); formatter.finish() } } /// See [`DeleteEnvironmentTemplateVersionOutput`](crate::output::DeleteEnvironmentTemplateVersionOutput) pub mod delete_environment_template_version_output { /// A builder for [`DeleteEnvironmentTemplateVersionOutput`](crate::output::DeleteEnvironmentTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl Builder { /// <p>The environment template version detail data that's returned by Proton.</p> pub fn environment_template_version( mut self, input: crate::model::EnvironmentTemplateVersion, ) -> Self { self.environment_template_version = Some(input); self } /// <p>The environment template version detail data that's returned by Proton.</p> pub fn set_environment_template_version( mut self, input: std::option::Option<crate::model::EnvironmentTemplateVersion>, ) -> Self { self.environment_template_version = input; self } /// Consumes the builder and constructs a [`DeleteEnvironmentTemplateVersionOutput`](crate::output::DeleteEnvironmentTemplateVersionOutput) pub fn build(self) -> crate::output::DeleteEnvironmentTemplateVersionOutput { crate::output::DeleteEnvironmentTemplateVersionOutput { environment_template_version: self.environment_template_version, } } } } impl DeleteEnvironmentTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`DeleteEnvironmentTemplateVersionOutput`](crate::output::DeleteEnvironmentTemplateVersionOutput) pub fn builder() -> crate::output::delete_environment_template_version_output::Builder { crate::output::delete_environment_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateEnvironmentTemplateVersionOutput { /// <p>The environment template version detail data that's returned by Proton.</p> pub environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl UpdateEnvironmentTemplateVersionOutput { /// <p>The environment template version detail data that's returned by Proton.</p> pub fn environment_template_version( &self, ) -> std::option::Option<&crate::model::EnvironmentTemplateVersion> { self.environment_template_version.as_ref() } } impl std::fmt::Debug for UpdateEnvironmentTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateEnvironmentTemplateVersionOutput"); formatter.field( "environment_template_version", &self.environment_template_version, ); formatter.finish() } } /// See [`UpdateEnvironmentTemplateVersionOutput`](crate::output::UpdateEnvironmentTemplateVersionOutput) pub mod update_environment_template_version_output { /// A builder for [`UpdateEnvironmentTemplateVersionOutput`](crate::output::UpdateEnvironmentTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl Builder { /// <p>The environment template version detail data that's returned by Proton.</p> pub fn environment_template_version( mut self, input: crate::model::EnvironmentTemplateVersion, ) -> Self { self.environment_template_version = Some(input); self } /// <p>The environment template version detail data that's returned by Proton.</p> pub fn set_environment_template_version( mut self, input: std::option::Option<crate::model::EnvironmentTemplateVersion>, ) -> Self { self.environment_template_version = input; self } /// Consumes the builder and constructs a [`UpdateEnvironmentTemplateVersionOutput`](crate::output::UpdateEnvironmentTemplateVersionOutput) pub fn build(self) -> crate::output::UpdateEnvironmentTemplateVersionOutput { crate::output::UpdateEnvironmentTemplateVersionOutput { environment_template_version: self.environment_template_version, } } } } impl UpdateEnvironmentTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`UpdateEnvironmentTemplateVersionOutput`](crate::output::UpdateEnvironmentTemplateVersionOutput) pub fn builder() -> crate::output::update_environment_template_version_output::Builder { crate::output::update_environment_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetEnvironmentTemplateVersionOutput { /// <p>The environment template version detail data that's returned by Proton.</p> pub environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl GetEnvironmentTemplateVersionOutput { /// <p>The environment template version detail data that's returned by Proton.</p> pub fn environment_template_version( &self, ) -> std::option::Option<&crate::model::EnvironmentTemplateVersion> { self.environment_template_version.as_ref() } } impl std::fmt::Debug for GetEnvironmentTemplateVersionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetEnvironmentTemplateVersionOutput"); formatter.field( "environment_template_version", &self.environment_template_version, ); formatter.finish() } } /// See [`GetEnvironmentTemplateVersionOutput`](crate::output::GetEnvironmentTemplateVersionOutput) pub mod get_environment_template_version_output { /// A builder for [`GetEnvironmentTemplateVersionOutput`](crate::output::GetEnvironmentTemplateVersionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template_version: std::option::Option<crate::model::EnvironmentTemplateVersion>, } impl Builder { /// <p>The environment template version detail data that's returned by Proton.</p> pub fn environment_template_version( mut self, input: crate::model::EnvironmentTemplateVersion, ) -> Self { self.environment_template_version = Some(input); self } /// <p>The environment template version detail data that's returned by Proton.</p> pub fn set_environment_template_version( mut self, input: std::option::Option<crate::model::EnvironmentTemplateVersion>, ) -> Self { self.environment_template_version = input; self } /// Consumes the builder and constructs a [`GetEnvironmentTemplateVersionOutput`](crate::output::GetEnvironmentTemplateVersionOutput) pub fn build(self) -> crate::output::GetEnvironmentTemplateVersionOutput { crate::output::GetEnvironmentTemplateVersionOutput { environment_template_version: self.environment_template_version, } } } } impl GetEnvironmentTemplateVersionOutput { /// Creates a new builder-style object to manufacture [`GetEnvironmentTemplateVersionOutput`](crate::output::GetEnvironmentTemplateVersionOutput) pub fn builder() -> crate::output::get_environment_template_version_output::Builder { crate::output::get_environment_template_version_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListEnvironmentTemplatesOutput { /// <p>A token to indicate the location of the next environment template in the array of environment templates, after the current requested list of /// environment templates.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of environment templates with detail data.</p> pub templates: std::option::Option<std::vec::Vec<crate::model::EnvironmentTemplateSummary>>, } impl ListEnvironmentTemplatesOutput { /// <p>A token to indicate the location of the next environment template in the array of environment templates, after the current requested list of /// environment templates.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of environment templates with detail data.</p> pub fn templates(&self) -> std::option::Option<&[crate::model::EnvironmentTemplateSummary]> { self.templates.as_deref() } } impl std::fmt::Debug for ListEnvironmentTemplatesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListEnvironmentTemplatesOutput"); formatter.field("next_token", &self.next_token); formatter.field("templates", &self.templates); formatter.finish() } } /// See [`ListEnvironmentTemplatesOutput`](crate::output::ListEnvironmentTemplatesOutput) pub mod list_environment_templates_output { /// A builder for [`ListEnvironmentTemplatesOutput`](crate::output::ListEnvironmentTemplatesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) templates: std::option::Option<std::vec::Vec<crate::model::EnvironmentTemplateSummary>>, } impl Builder { /// <p>A token to indicate the location of the next environment template in the array of environment templates, after the current requested list of /// environment templates.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next environment template in the array of environment templates, after the current requested list of /// environment templates.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `templates`. /// /// To override the contents of this collection use [`set_templates`](Self::set_templates). /// /// <p>An array of environment templates with detail data.</p> pub fn templates( mut self, input: impl Into<crate::model::EnvironmentTemplateSummary>, ) -> Self { let mut v = self.templates.unwrap_or_default(); v.push(input.into()); self.templates = Some(v); self } /// <p>An array of environment templates with detail data.</p> pub fn set_templates( mut self, input: std::option::Option<std::vec::Vec<crate::model::EnvironmentTemplateSummary>>, ) -> Self { self.templates = input; self } /// Consumes the builder and constructs a [`ListEnvironmentTemplatesOutput`](crate::output::ListEnvironmentTemplatesOutput) pub fn build(self) -> crate::output::ListEnvironmentTemplatesOutput { crate::output::ListEnvironmentTemplatesOutput { next_token: self.next_token, templates: self.templates, } } } } impl ListEnvironmentTemplatesOutput { /// Creates a new builder-style object to manufacture [`ListEnvironmentTemplatesOutput`](crate::output::ListEnvironmentTemplatesOutput) pub fn builder() -> crate::output::list_environment_templates_output::Builder { crate::output::list_environment_templates_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl CreateEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(&self) -> std::option::Option<&crate::model::EnvironmentTemplate> { self.environment_template.as_ref() } } impl std::fmt::Debug for CreateEnvironmentTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateEnvironmentTemplateOutput"); formatter.field("environment_template", &self.environment_template); formatter.finish() } } /// See [`CreateEnvironmentTemplateOutput`](crate::output::CreateEnvironmentTemplateOutput) pub mod create_environment_template_output { /// A builder for [`CreateEnvironmentTemplateOutput`](crate::output::CreateEnvironmentTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl Builder { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(mut self, input: crate::model::EnvironmentTemplate) -> Self { self.environment_template = Some(input); self } /// <p>The environment template detail data that's returned by Proton.</p> pub fn set_environment_template( mut self, input: std::option::Option<crate::model::EnvironmentTemplate>, ) -> Self { self.environment_template = input; self } /// Consumes the builder and constructs a [`CreateEnvironmentTemplateOutput`](crate::output::CreateEnvironmentTemplateOutput) pub fn build(self) -> crate::output::CreateEnvironmentTemplateOutput { crate::output::CreateEnvironmentTemplateOutput { environment_template: self.environment_template, } } } } impl CreateEnvironmentTemplateOutput { /// Creates a new builder-style object to manufacture [`CreateEnvironmentTemplateOutput`](crate::output::CreateEnvironmentTemplateOutput) pub fn builder() -> crate::output::create_environment_template_output::Builder { crate::output::create_environment_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl DeleteEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(&self) -> std::option::Option<&crate::model::EnvironmentTemplate> { self.environment_template.as_ref() } } impl std::fmt::Debug for DeleteEnvironmentTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteEnvironmentTemplateOutput"); formatter.field("environment_template", &self.environment_template); formatter.finish() } } /// See [`DeleteEnvironmentTemplateOutput`](crate::output::DeleteEnvironmentTemplateOutput) pub mod delete_environment_template_output { /// A builder for [`DeleteEnvironmentTemplateOutput`](crate::output::DeleteEnvironmentTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl Builder { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(mut self, input: crate::model::EnvironmentTemplate) -> Self { self.environment_template = Some(input); self } /// <p>The environment template detail data that's returned by Proton.</p> pub fn set_environment_template( mut self, input: std::option::Option<crate::model::EnvironmentTemplate>, ) -> Self { self.environment_template = input; self } /// Consumes the builder and constructs a [`DeleteEnvironmentTemplateOutput`](crate::output::DeleteEnvironmentTemplateOutput) pub fn build(self) -> crate::output::DeleteEnvironmentTemplateOutput { crate::output::DeleteEnvironmentTemplateOutput { environment_template: self.environment_template, } } } } impl DeleteEnvironmentTemplateOutput { /// Creates a new builder-style object to manufacture [`DeleteEnvironmentTemplateOutput`](crate::output::DeleteEnvironmentTemplateOutput) pub fn builder() -> crate::output::delete_environment_template_output::Builder { crate::output::delete_environment_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl UpdateEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(&self) -> std::option::Option<&crate::model::EnvironmentTemplate> { self.environment_template.as_ref() } } impl std::fmt::Debug for UpdateEnvironmentTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateEnvironmentTemplateOutput"); formatter.field("environment_template", &self.environment_template); formatter.finish() } } /// See [`UpdateEnvironmentTemplateOutput`](crate::output::UpdateEnvironmentTemplateOutput) pub mod update_environment_template_output { /// A builder for [`UpdateEnvironmentTemplateOutput`](crate::output::UpdateEnvironmentTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl Builder { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(mut self, input: crate::model::EnvironmentTemplate) -> Self { self.environment_template = Some(input); self } /// <p>The environment template detail data that's returned by Proton.</p> pub fn set_environment_template( mut self, input: std::option::Option<crate::model::EnvironmentTemplate>, ) -> Self { self.environment_template = input; self } /// Consumes the builder and constructs a [`UpdateEnvironmentTemplateOutput`](crate::output::UpdateEnvironmentTemplateOutput) pub fn build(self) -> crate::output::UpdateEnvironmentTemplateOutput { crate::output::UpdateEnvironmentTemplateOutput { environment_template: self.environment_template, } } } } impl UpdateEnvironmentTemplateOutput { /// Creates a new builder-style object to manufacture [`UpdateEnvironmentTemplateOutput`](crate::output::UpdateEnvironmentTemplateOutput) pub fn builder() -> crate::output::update_environment_template_output::Builder { crate::output::update_environment_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl GetEnvironmentTemplateOutput { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(&self) -> std::option::Option<&crate::model::EnvironmentTemplate> { self.environment_template.as_ref() } } impl std::fmt::Debug for GetEnvironmentTemplateOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetEnvironmentTemplateOutput"); formatter.field("environment_template", &self.environment_template); formatter.finish() } } /// See [`GetEnvironmentTemplateOutput`](crate::output::GetEnvironmentTemplateOutput) pub mod get_environment_template_output { /// A builder for [`GetEnvironmentTemplateOutput`](crate::output::GetEnvironmentTemplateOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_template: std::option::Option<crate::model::EnvironmentTemplate>, } impl Builder { /// <p>The environment template detail data that's returned by Proton.</p> pub fn environment_template(mut self, input: crate::model::EnvironmentTemplate) -> Self { self.environment_template = Some(input); self } /// <p>The environment template detail data that's returned by Proton.</p> pub fn set_environment_template( mut self, input: std::option::Option<crate::model::EnvironmentTemplate>, ) -> Self { self.environment_template = input; self } /// Consumes the builder and constructs a [`GetEnvironmentTemplateOutput`](crate::output::GetEnvironmentTemplateOutput) pub fn build(self) -> crate::output::GetEnvironmentTemplateOutput { crate::output::GetEnvironmentTemplateOutput { environment_template: self.environment_template, } } } } impl GetEnvironmentTemplateOutput { /// Creates a new builder-style object to manufacture [`GetEnvironmentTemplateOutput`](crate::output::GetEnvironmentTemplateOutput) pub fn builder() -> crate::output::get_environment_template_output::Builder { crate::output::get_environment_template_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListEnvironmentsOutput { /// <p>A token to indicate the location of the next environment in the array of environments, after the current requested list of /// environments.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of environment detail data summaries.</p> pub environments: std::option::Option<std::vec::Vec<crate::model::EnvironmentSummary>>, } impl ListEnvironmentsOutput { /// <p>A token to indicate the location of the next environment in the array of environments, after the current requested list of /// environments.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of environment detail data summaries.</p> pub fn environments(&self) -> std::option::Option<&[crate::model::EnvironmentSummary]> { self.environments.as_deref() } } impl std::fmt::Debug for ListEnvironmentsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListEnvironmentsOutput"); formatter.field("next_token", &self.next_token); formatter.field("environments", &self.environments); formatter.finish() } } /// See [`ListEnvironmentsOutput`](crate::output::ListEnvironmentsOutput) pub mod list_environments_output { /// A builder for [`ListEnvironmentsOutput`](crate::output::ListEnvironmentsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) environments: std::option::Option<std::vec::Vec<crate::model::EnvironmentSummary>>, } impl Builder { /// <p>A token to indicate the location of the next environment in the array of environments, after the current requested list of /// environments.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next environment in the array of environments, after the current requested list of /// environments.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `environments`. /// /// To override the contents of this collection use [`set_environments`](Self::set_environments). /// /// <p>An array of environment detail data summaries.</p> pub fn environments(mut self, input: impl Into<crate::model::EnvironmentSummary>) -> Self { let mut v = self.environments.unwrap_or_default(); v.push(input.into()); self.environments = Some(v); self } /// <p>An array of environment detail data summaries.</p> pub fn set_environments( mut self, input: std::option::Option<std::vec::Vec<crate::model::EnvironmentSummary>>, ) -> Self { self.environments = input; self } /// Consumes the builder and constructs a [`ListEnvironmentsOutput`](crate::output::ListEnvironmentsOutput) pub fn build(self) -> crate::output::ListEnvironmentsOutput { crate::output::ListEnvironmentsOutput { next_token: self.next_token, environments: self.environments, } } } } impl ListEnvironmentsOutput { /// Creates a new builder-style object to manufacture [`ListEnvironmentsOutput`](crate::output::ListEnvironmentsOutput) pub fn builder() -> crate::output::list_environments_output::Builder { crate::output::list_environments_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub environment: std::option::Option<crate::model::Environment>, } impl CreateEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(&self) -> std::option::Option<&crate::model::Environment> { self.environment.as_ref() } } impl std::fmt::Debug for CreateEnvironmentOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateEnvironmentOutput"); formatter.field("environment", &self.environment); formatter.finish() } } /// See [`CreateEnvironmentOutput`](crate::output::CreateEnvironmentOutput) pub mod create_environment_output { /// A builder for [`CreateEnvironmentOutput`](crate::output::CreateEnvironmentOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment: std::option::Option<crate::model::Environment>, } impl Builder { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(mut self, input: crate::model::Environment) -> Self { self.environment = Some(input); self } /// <p>The environment detail data that's returned by Proton.</p> pub fn set_environment( mut self, input: std::option::Option<crate::model::Environment>, ) -> Self { self.environment = input; self } /// Consumes the builder and constructs a [`CreateEnvironmentOutput`](crate::output::CreateEnvironmentOutput) pub fn build(self) -> crate::output::CreateEnvironmentOutput { crate::output::CreateEnvironmentOutput { environment: self.environment, } } } } impl CreateEnvironmentOutput { /// Creates a new builder-style object to manufacture [`CreateEnvironmentOutput`](crate::output::CreateEnvironmentOutput) pub fn builder() -> crate::output::create_environment_output::Builder { crate::output::create_environment_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub environment: std::option::Option<crate::model::Environment>, } impl DeleteEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(&self) -> std::option::Option<&crate::model::Environment> { self.environment.as_ref() } } impl std::fmt::Debug for DeleteEnvironmentOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteEnvironmentOutput"); formatter.field("environment", &self.environment); formatter.finish() } } /// See [`DeleteEnvironmentOutput`](crate::output::DeleteEnvironmentOutput) pub mod delete_environment_output { /// A builder for [`DeleteEnvironmentOutput`](crate::output::DeleteEnvironmentOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment: std::option::Option<crate::model::Environment>, } impl Builder { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(mut self, input: crate::model::Environment) -> Self { self.environment = Some(input); self } /// <p>The environment detail data that's returned by Proton.</p> pub fn set_environment( mut self, input: std::option::Option<crate::model::Environment>, ) -> Self { self.environment = input; self } /// Consumes the builder and constructs a [`DeleteEnvironmentOutput`](crate::output::DeleteEnvironmentOutput) pub fn build(self) -> crate::output::DeleteEnvironmentOutput { crate::output::DeleteEnvironmentOutput { environment: self.environment, } } } } impl DeleteEnvironmentOutput { /// Creates a new builder-style object to manufacture [`DeleteEnvironmentOutput`](crate::output::DeleteEnvironmentOutput) pub fn builder() -> crate::output::delete_environment_output::Builder { crate::output::delete_environment_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub environment: std::option::Option<crate::model::Environment>, } impl UpdateEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(&self) -> std::option::Option<&crate::model::Environment> { self.environment.as_ref() } } impl std::fmt::Debug for UpdateEnvironmentOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateEnvironmentOutput"); formatter.field("environment", &self.environment); formatter.finish() } } /// See [`UpdateEnvironmentOutput`](crate::output::UpdateEnvironmentOutput) pub mod update_environment_output { /// A builder for [`UpdateEnvironmentOutput`](crate::output::UpdateEnvironmentOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment: std::option::Option<crate::model::Environment>, } impl Builder { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(mut self, input: crate::model::Environment) -> Self { self.environment = Some(input); self } /// <p>The environment detail data that's returned by Proton.</p> pub fn set_environment( mut self, input: std::option::Option<crate::model::Environment>, ) -> Self { self.environment = input; self } /// Consumes the builder and constructs a [`UpdateEnvironmentOutput`](crate::output::UpdateEnvironmentOutput) pub fn build(self) -> crate::output::UpdateEnvironmentOutput { crate::output::UpdateEnvironmentOutput { environment: self.environment, } } } } impl UpdateEnvironmentOutput { /// Creates a new builder-style object to manufacture [`UpdateEnvironmentOutput`](crate::output::UpdateEnvironmentOutput) pub fn builder() -> crate::output::update_environment_output::Builder { crate::output::update_environment_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub environment: std::option::Option<crate::model::Environment>, } impl GetEnvironmentOutput { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(&self) -> std::option::Option<&crate::model::Environment> { self.environment.as_ref() } } impl std::fmt::Debug for GetEnvironmentOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetEnvironmentOutput"); formatter.field("environment", &self.environment); formatter.finish() } } /// See [`GetEnvironmentOutput`](crate::output::GetEnvironmentOutput) pub mod get_environment_output { /// A builder for [`GetEnvironmentOutput`](crate::output::GetEnvironmentOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment: std::option::Option<crate::model::Environment>, } impl Builder { /// <p>The environment detail data that's returned by Proton.</p> pub fn environment(mut self, input: crate::model::Environment) -> Self { self.environment = Some(input); self } /// <p>The environment detail data that's returned by Proton.</p> pub fn set_environment( mut self, input: std::option::Option<crate::model::Environment>, ) -> Self { self.environment = input; self } /// Consumes the builder and constructs a [`GetEnvironmentOutput`](crate::output::GetEnvironmentOutput) pub fn build(self) -> crate::output::GetEnvironmentOutput { crate::output::GetEnvironmentOutput { environment: self.environment, } } } } impl GetEnvironmentOutput { /// Creates a new builder-style object to manufacture [`GetEnvironmentOutput`](crate::output::GetEnvironmentOutput) pub fn builder() -> crate::output::get_environment_output::Builder { crate::output::get_environment_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListEnvironmentProvisionedResourcesOutput { /// <p>A token to indicate the location of the next environment provisioned resource in the array of provisioned resources, after the current /// requested list of environment provisioned resources.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of environment provisioned resources.</p> pub provisioned_resources: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, } impl ListEnvironmentProvisionedResourcesOutput { /// <p>A token to indicate the location of the next environment provisioned resource in the array of provisioned resources, after the current /// requested list of environment provisioned resources.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of environment provisioned resources.</p> pub fn provisioned_resources( &self, ) -> std::option::Option<&[crate::model::ProvisionedResource]> { self.provisioned_resources.as_deref() } } impl std::fmt::Debug for ListEnvironmentProvisionedResourcesOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListEnvironmentProvisionedResourcesOutput"); formatter.field("next_token", &self.next_token); formatter.field("provisioned_resources", &self.provisioned_resources); formatter.finish() } } /// See [`ListEnvironmentProvisionedResourcesOutput`](crate::output::ListEnvironmentProvisionedResourcesOutput) pub mod list_environment_provisioned_resources_output { /// A builder for [`ListEnvironmentProvisionedResourcesOutput`](crate::output::ListEnvironmentProvisionedResourcesOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) provisioned_resources: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, } impl Builder { /// <p>A token to indicate the location of the next environment provisioned resource in the array of provisioned resources, after the current /// requested list of environment provisioned resources.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next environment provisioned resource in the array of provisioned resources, after the current /// requested list of environment provisioned resources.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `provisioned_resources`. /// /// To override the contents of this collection use [`set_provisioned_resources`](Self::set_provisioned_resources). /// /// <p>An array of environment provisioned resources.</p> pub fn provisioned_resources( mut self, input: impl Into<crate::model::ProvisionedResource>, ) -> Self { let mut v = self.provisioned_resources.unwrap_or_default(); v.push(input.into()); self.provisioned_resources = Some(v); self } /// <p>An array of environment provisioned resources.</p> pub fn set_provisioned_resources( mut self, input: std::option::Option<std::vec::Vec<crate::model::ProvisionedResource>>, ) -> Self { self.provisioned_resources = input; self } /// Consumes the builder and constructs a [`ListEnvironmentProvisionedResourcesOutput`](crate::output::ListEnvironmentProvisionedResourcesOutput) pub fn build(self) -> crate::output::ListEnvironmentProvisionedResourcesOutput { crate::output::ListEnvironmentProvisionedResourcesOutput { next_token: self.next_token, provisioned_resources: self.provisioned_resources, } } } } impl ListEnvironmentProvisionedResourcesOutput { /// Creates a new builder-style object to manufacture [`ListEnvironmentProvisionedResourcesOutput`](crate::output::ListEnvironmentProvisionedResourcesOutput) pub fn builder() -> crate::output::list_environment_provisioned_resources_output::Builder { crate::output::list_environment_provisioned_resources_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListEnvironmentOutputsOutput { /// <p>A token to indicate the location of the next environment output in the array of environment outputs, after the current requested list of /// environment outputs.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of environment outputs with detail data.</p> pub outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, } impl ListEnvironmentOutputsOutput { /// <p>A token to indicate the location of the next environment output in the array of environment outputs, after the current requested list of /// environment outputs.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of environment outputs with detail data.</p> pub fn outputs(&self) -> std::option::Option<&[crate::model::Output]> { self.outputs.as_deref() } } impl std::fmt::Debug for ListEnvironmentOutputsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListEnvironmentOutputsOutput"); formatter.field("next_token", &self.next_token); formatter.field("outputs", &self.outputs); formatter.finish() } } /// See [`ListEnvironmentOutputsOutput`](crate::output::ListEnvironmentOutputsOutput) pub mod list_environment_outputs_output { /// A builder for [`ListEnvironmentOutputsOutput`](crate::output::ListEnvironmentOutputsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) outputs: std::option::Option<std::vec::Vec<crate::model::Output>>, } impl Builder { /// <p>A token to indicate the location of the next environment output in the array of environment outputs, after the current requested list of /// environment outputs.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next environment output in the array of environment outputs, after the current requested list of /// environment outputs.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `outputs`. /// /// To override the contents of this collection use [`set_outputs`](Self::set_outputs). /// /// <p>An array of environment outputs with detail data.</p> pub fn outputs(mut self, input: impl Into<crate::model::Output>) -> Self { let mut v = self.outputs.unwrap_or_default(); v.push(input.into()); self.outputs = Some(v); self } /// <p>An array of environment outputs with detail data.</p> pub fn set_outputs( mut self, input: std::option::Option<std::vec::Vec<crate::model::Output>>, ) -> Self { self.outputs = input; self } /// Consumes the builder and constructs a [`ListEnvironmentOutputsOutput`](crate::output::ListEnvironmentOutputsOutput) pub fn build(self) -> crate::output::ListEnvironmentOutputsOutput { crate::output::ListEnvironmentOutputsOutput { next_token: self.next_token, outputs: self.outputs, } } } } impl ListEnvironmentOutputsOutput { /// Creates a new builder-style object to manufacture [`ListEnvironmentOutputsOutput`](crate::output::ListEnvironmentOutputsOutput) pub fn builder() -> crate::output::list_environment_outputs_output::Builder { crate::output::list_environment_outputs_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RejectEnvironmentAccountConnectionOutput { /// <p>The environment connection account detail data that's returned by Proton.</p> pub environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl RejectEnvironmentAccountConnectionOutput { /// <p>The environment connection account detail data that's returned by Proton.</p> pub fn environment_account_connection( &self, ) -> std::option::Option<&crate::model::EnvironmentAccountConnection> { self.environment_account_connection.as_ref() } } impl std::fmt::Debug for RejectEnvironmentAccountConnectionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RejectEnvironmentAccountConnectionOutput"); formatter.field( "environment_account_connection", &self.environment_account_connection, ); formatter.finish() } } /// See [`RejectEnvironmentAccountConnectionOutput`](crate::output::RejectEnvironmentAccountConnectionOutput) pub mod reject_environment_account_connection_output { /// A builder for [`RejectEnvironmentAccountConnectionOutput`](crate::output::RejectEnvironmentAccountConnectionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl Builder { /// <p>The environment connection account detail data that's returned by Proton.</p> pub fn environment_account_connection( mut self, input: crate::model::EnvironmentAccountConnection, ) -> Self { self.environment_account_connection = Some(input); self } /// <p>The environment connection account detail data that's returned by Proton.</p> pub fn set_environment_account_connection( mut self, input: std::option::Option<crate::model::EnvironmentAccountConnection>, ) -> Self { self.environment_account_connection = input; self } /// Consumes the builder and constructs a [`RejectEnvironmentAccountConnectionOutput`](crate::output::RejectEnvironmentAccountConnectionOutput) pub fn build(self) -> crate::output::RejectEnvironmentAccountConnectionOutput { crate::output::RejectEnvironmentAccountConnectionOutput { environment_account_connection: self.environment_account_connection, } } } } impl RejectEnvironmentAccountConnectionOutput { /// Creates a new builder-style object to manufacture [`RejectEnvironmentAccountConnectionOutput`](crate::output::RejectEnvironmentAccountConnectionOutput) pub fn builder() -> crate::output::reject_environment_account_connection_output::Builder { crate::output::reject_environment_account_connection_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct AcceptEnvironmentAccountConnectionOutput { /// <p>The environment account connection data that's returned by Proton.</p> pub environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl AcceptEnvironmentAccountConnectionOutput { /// <p>The environment account connection data that's returned by Proton.</p> pub fn environment_account_connection( &self, ) -> std::option::Option<&crate::model::EnvironmentAccountConnection> { self.environment_account_connection.as_ref() } } impl std::fmt::Debug for AcceptEnvironmentAccountConnectionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("AcceptEnvironmentAccountConnectionOutput"); formatter.field( "environment_account_connection", &self.environment_account_connection, ); formatter.finish() } } /// See [`AcceptEnvironmentAccountConnectionOutput`](crate::output::AcceptEnvironmentAccountConnectionOutput) pub mod accept_environment_account_connection_output { /// A builder for [`AcceptEnvironmentAccountConnectionOutput`](crate::output::AcceptEnvironmentAccountConnectionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl Builder { /// <p>The environment account connection data that's returned by Proton.</p> pub fn environment_account_connection( mut self, input: crate::model::EnvironmentAccountConnection, ) -> Self { self.environment_account_connection = Some(input); self } /// <p>The environment account connection data that's returned by Proton.</p> pub fn set_environment_account_connection( mut self, input: std::option::Option<crate::model::EnvironmentAccountConnection>, ) -> Self { self.environment_account_connection = input; self } /// Consumes the builder and constructs a [`AcceptEnvironmentAccountConnectionOutput`](crate::output::AcceptEnvironmentAccountConnectionOutput) pub fn build(self) -> crate::output::AcceptEnvironmentAccountConnectionOutput { crate::output::AcceptEnvironmentAccountConnectionOutput { environment_account_connection: self.environment_account_connection, } } } } impl AcceptEnvironmentAccountConnectionOutput { /// Creates a new builder-style object to manufacture [`AcceptEnvironmentAccountConnectionOutput`](crate::output::AcceptEnvironmentAccountConnectionOutput) pub fn builder() -> crate::output::accept_environment_account_connection_output::Builder { crate::output::accept_environment_account_connection_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListEnvironmentAccountConnectionsOutput { /// <p>An array of environment account connections with details that's returned by Proton. </p> pub environment_account_connections: std::option::Option<std::vec::Vec<crate::model::EnvironmentAccountConnectionSummary>>, /// <p>A token to indicate the location of the next environment account connection in the array of environment account connections, after the current /// requested list of environment account connections.</p> pub next_token: std::option::Option<std::string::String>, } impl ListEnvironmentAccountConnectionsOutput { /// <p>An array of environment account connections with details that's returned by Proton. </p> pub fn environment_account_connections( &self, ) -> std::option::Option<&[crate::model::EnvironmentAccountConnectionSummary]> { self.environment_account_connections.as_deref() } /// <p>A token to indicate the location of the next environment account connection in the array of environment account connections, after the current /// requested list of environment account connections.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListEnvironmentAccountConnectionsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListEnvironmentAccountConnectionsOutput"); formatter.field( "environment_account_connections", &self.environment_account_connections, ); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListEnvironmentAccountConnectionsOutput`](crate::output::ListEnvironmentAccountConnectionsOutput) pub mod list_environment_account_connections_output { /// A builder for [`ListEnvironmentAccountConnectionsOutput`](crate::output::ListEnvironmentAccountConnectionsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_account_connections: std::option::Option<std::vec::Vec<crate::model::EnvironmentAccountConnectionSummary>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `environment_account_connections`. /// /// To override the contents of this collection use [`set_environment_account_connections`](Self::set_environment_account_connections). /// /// <p>An array of environment account connections with details that's returned by Proton. </p> pub fn environment_account_connections( mut self, input: impl Into<crate::model::EnvironmentAccountConnectionSummary>, ) -> Self { let mut v = self.environment_account_connections.unwrap_or_default(); v.push(input.into()); self.environment_account_connections = Some(v); self } /// <p>An array of environment account connections with details that's returned by Proton. </p> pub fn set_environment_account_connections( mut self, input: std::option::Option< std::vec::Vec<crate::model::EnvironmentAccountConnectionSummary>, >, ) -> Self { self.environment_account_connections = input; self } /// <p>A token to indicate the location of the next environment account connection in the array of environment account connections, after the current /// requested list of environment account connections.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next environment account connection in the array of environment account connections, after the current /// requested list of environment account connections.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListEnvironmentAccountConnectionsOutput`](crate::output::ListEnvironmentAccountConnectionsOutput) pub fn build(self) -> crate::output::ListEnvironmentAccountConnectionsOutput { crate::output::ListEnvironmentAccountConnectionsOutput { environment_account_connections: self.environment_account_connections, next_token: self.next_token, } } } } impl ListEnvironmentAccountConnectionsOutput { /// Creates a new builder-style object to manufacture [`ListEnvironmentAccountConnectionsOutput`](crate::output::ListEnvironmentAccountConnectionsOutput) pub fn builder() -> crate::output::list_environment_account_connections_output::Builder { crate::output::list_environment_account_connections_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl CreateEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( &self, ) -> std::option::Option<&crate::model::EnvironmentAccountConnection> { self.environment_account_connection.as_ref() } } impl std::fmt::Debug for CreateEnvironmentAccountConnectionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateEnvironmentAccountConnectionOutput"); formatter.field( "environment_account_connection", &self.environment_account_connection, ); formatter.finish() } } /// See [`CreateEnvironmentAccountConnectionOutput`](crate::output::CreateEnvironmentAccountConnectionOutput) pub mod create_environment_account_connection_output { /// A builder for [`CreateEnvironmentAccountConnectionOutput`](crate::output::CreateEnvironmentAccountConnectionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl Builder { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( mut self, input: crate::model::EnvironmentAccountConnection, ) -> Self { self.environment_account_connection = Some(input); self } /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn set_environment_account_connection( mut self, input: std::option::Option<crate::model::EnvironmentAccountConnection>, ) -> Self { self.environment_account_connection = input; self } /// Consumes the builder and constructs a [`CreateEnvironmentAccountConnectionOutput`](crate::output::CreateEnvironmentAccountConnectionOutput) pub fn build(self) -> crate::output::CreateEnvironmentAccountConnectionOutput { crate::output::CreateEnvironmentAccountConnectionOutput { environment_account_connection: self.environment_account_connection, } } } } impl CreateEnvironmentAccountConnectionOutput { /// Creates a new builder-style object to manufacture [`CreateEnvironmentAccountConnectionOutput`](crate::output::CreateEnvironmentAccountConnectionOutput) pub fn builder() -> crate::output::create_environment_account_connection_output::Builder { crate::output::create_environment_account_connection_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl DeleteEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( &self, ) -> std::option::Option<&crate::model::EnvironmentAccountConnection> { self.environment_account_connection.as_ref() } } impl std::fmt::Debug for DeleteEnvironmentAccountConnectionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteEnvironmentAccountConnectionOutput"); formatter.field( "environment_account_connection", &self.environment_account_connection, ); formatter.finish() } } /// See [`DeleteEnvironmentAccountConnectionOutput`](crate::output::DeleteEnvironmentAccountConnectionOutput) pub mod delete_environment_account_connection_output { /// A builder for [`DeleteEnvironmentAccountConnectionOutput`](crate::output::DeleteEnvironmentAccountConnectionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl Builder { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( mut self, input: crate::model::EnvironmentAccountConnection, ) -> Self { self.environment_account_connection = Some(input); self } /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn set_environment_account_connection( mut self, input: std::option::Option<crate::model::EnvironmentAccountConnection>, ) -> Self { self.environment_account_connection = input; self } /// Consumes the builder and constructs a [`DeleteEnvironmentAccountConnectionOutput`](crate::output::DeleteEnvironmentAccountConnectionOutput) pub fn build(self) -> crate::output::DeleteEnvironmentAccountConnectionOutput { crate::output::DeleteEnvironmentAccountConnectionOutput { environment_account_connection: self.environment_account_connection, } } } } impl DeleteEnvironmentAccountConnectionOutput { /// Creates a new builder-style object to manufacture [`DeleteEnvironmentAccountConnectionOutput`](crate::output::DeleteEnvironmentAccountConnectionOutput) pub fn builder() -> crate::output::delete_environment_account_connection_output::Builder { crate::output::delete_environment_account_connection_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl UpdateEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( &self, ) -> std::option::Option<&crate::model::EnvironmentAccountConnection> { self.environment_account_connection.as_ref() } } impl std::fmt::Debug for UpdateEnvironmentAccountConnectionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateEnvironmentAccountConnectionOutput"); formatter.field( "environment_account_connection", &self.environment_account_connection, ); formatter.finish() } } /// See [`UpdateEnvironmentAccountConnectionOutput`](crate::output::UpdateEnvironmentAccountConnectionOutput) pub mod update_environment_account_connection_output { /// A builder for [`UpdateEnvironmentAccountConnectionOutput`](crate::output::UpdateEnvironmentAccountConnectionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl Builder { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( mut self, input: crate::model::EnvironmentAccountConnection, ) -> Self { self.environment_account_connection = Some(input); self } /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn set_environment_account_connection( mut self, input: std::option::Option<crate::model::EnvironmentAccountConnection>, ) -> Self { self.environment_account_connection = input; self } /// Consumes the builder and constructs a [`UpdateEnvironmentAccountConnectionOutput`](crate::output::UpdateEnvironmentAccountConnectionOutput) pub fn build(self) -> crate::output::UpdateEnvironmentAccountConnectionOutput { crate::output::UpdateEnvironmentAccountConnectionOutput { environment_account_connection: self.environment_account_connection, } } } } impl UpdateEnvironmentAccountConnectionOutput { /// Creates a new builder-style object to manufacture [`UpdateEnvironmentAccountConnectionOutput`](crate::output::UpdateEnvironmentAccountConnectionOutput) pub fn builder() -> crate::output::update_environment_account_connection_output::Builder { crate::output::update_environment_account_connection_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl GetEnvironmentAccountConnectionOutput { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( &self, ) -> std::option::Option<&crate::model::EnvironmentAccountConnection> { self.environment_account_connection.as_ref() } } impl std::fmt::Debug for GetEnvironmentAccountConnectionOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetEnvironmentAccountConnectionOutput"); formatter.field( "environment_account_connection", &self.environment_account_connection, ); formatter.finish() } } /// See [`GetEnvironmentAccountConnectionOutput`](crate::output::GetEnvironmentAccountConnectionOutput) pub mod get_environment_account_connection_output { /// A builder for [`GetEnvironmentAccountConnectionOutput`](crate::output::GetEnvironmentAccountConnectionOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment_account_connection: std::option::Option<crate::model::EnvironmentAccountConnection>, } impl Builder { /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn environment_account_connection( mut self, input: crate::model::EnvironmentAccountConnection, ) -> Self { self.environment_account_connection = Some(input); self } /// <p>The environment account connection detail data that's returned by Proton.</p> pub fn set_environment_account_connection( mut self, input: std::option::Option<crate::model::EnvironmentAccountConnection>, ) -> Self { self.environment_account_connection = input; self } /// Consumes the builder and constructs a [`GetEnvironmentAccountConnectionOutput`](crate::output::GetEnvironmentAccountConnectionOutput) pub fn build(self) -> crate::output::GetEnvironmentAccountConnectionOutput { crate::output::GetEnvironmentAccountConnectionOutput { environment_account_connection: self.environment_account_connection, } } } } impl GetEnvironmentAccountConnectionOutput { /// Creates a new builder-style object to manufacture [`GetEnvironmentAccountConnectionOutput`](crate::output::GetEnvironmentAccountConnectionOutput) pub fn builder() -> crate::output::get_environment_account_connection_output::Builder { crate::output::get_environment_account_connection_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateAccountSettingsOutput { /// <p>The Proton pipeline service role repository detail data that's returned by Proton.</p> pub account_settings: std::option::Option<crate::model::AccountSettings>, } impl UpdateAccountSettingsOutput { /// <p>The Proton pipeline service role repository detail data that's returned by Proton.</p> pub fn account_settings(&self) -> std::option::Option<&crate::model::AccountSettings> { self.account_settings.as_ref() } } impl std::fmt::Debug for UpdateAccountSettingsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateAccountSettingsOutput"); formatter.field("account_settings", &self.account_settings); formatter.finish() } } /// See [`UpdateAccountSettingsOutput`](crate::output::UpdateAccountSettingsOutput) pub mod update_account_settings_output { /// A builder for [`UpdateAccountSettingsOutput`](crate::output::UpdateAccountSettingsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) account_settings: std::option::Option<crate::model::AccountSettings>, } impl Builder { /// <p>The Proton pipeline service role repository detail data that's returned by Proton.</p> pub fn account_settings(mut self, input: crate::model::AccountSettings) -> Self { self.account_settings = Some(input); self } /// <p>The Proton pipeline service role repository detail data that's returned by Proton.</p> pub fn set_account_settings( mut self, input: std::option::Option<crate::model::AccountSettings>, ) -> Self { self.account_settings = input; self } /// Consumes the builder and constructs a [`UpdateAccountSettingsOutput`](crate::output::UpdateAccountSettingsOutput) pub fn build(self) -> crate::output::UpdateAccountSettingsOutput { crate::output::UpdateAccountSettingsOutput { account_settings: self.account_settings, } } } } impl UpdateAccountSettingsOutput { /// Creates a new builder-style object to manufacture [`UpdateAccountSettingsOutput`](crate::output::UpdateAccountSettingsOutput) pub fn builder() -> crate::output::update_account_settings_output::Builder { crate::output::update_account_settings_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetAccountSettingsOutput { /// <p>The Proton pipeline service role detail data that's returned by Proton.</p> pub account_settings: std::option::Option<crate::model::AccountSettings>, } impl GetAccountSettingsOutput { /// <p>The Proton pipeline service role detail data that's returned by Proton.</p> pub fn account_settings(&self) -> std::option::Option<&crate::model::AccountSettings> { self.account_settings.as_ref() } } impl std::fmt::Debug for GetAccountSettingsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetAccountSettingsOutput"); formatter.field("account_settings", &self.account_settings); formatter.finish() } } /// See [`GetAccountSettingsOutput`](crate::output::GetAccountSettingsOutput) pub mod get_account_settings_output { /// A builder for [`GetAccountSettingsOutput`](crate::output::GetAccountSettingsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) account_settings: std::option::Option<crate::model::AccountSettings>, } impl Builder { /// <p>The Proton pipeline service role detail data that's returned by Proton.</p> pub fn account_settings(mut self, input: crate::model::AccountSettings) -> Self { self.account_settings = Some(input); self } /// <p>The Proton pipeline service role detail data that's returned by Proton.</p> pub fn set_account_settings( mut self, input: std::option::Option<crate::model::AccountSettings>, ) -> Self { self.account_settings = input; self } /// Consumes the builder and constructs a [`GetAccountSettingsOutput`](crate::output::GetAccountSettingsOutput) pub fn build(self) -> crate::output::GetAccountSettingsOutput { crate::output::GetAccountSettingsOutput { account_settings: self.account_settings, } } } } impl GetAccountSettingsOutput { /// Creates a new builder-style object to manufacture [`GetAccountSettingsOutput`](crate::output::GetAccountSettingsOutput) pub fn builder() -> crate::output::get_account_settings_output::Builder { crate::output::get_account_settings_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UntagResourceOutput {} impl std::fmt::Debug for UntagResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UntagResourceOutput"); formatter.finish() } } /// See [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub mod untag_resource_output { /// A builder for [`UntagResourceOutput`](crate::output::UntagResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub fn build(self) -> crate::output::UntagResourceOutput { crate::output::UntagResourceOutput {} } } } impl UntagResourceOutput { /// Creates a new builder-style object to manufacture [`UntagResourceOutput`](crate::output::UntagResourceOutput) pub fn builder() -> crate::output::untag_resource_output::Builder { crate::output::untag_resource_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct TagResourceOutput {} impl std::fmt::Debug for TagResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("TagResourceOutput"); formatter.finish() } } /// See [`TagResourceOutput`](crate::output::TagResourceOutput) pub mod tag_resource_output { /// A builder for [`TagResourceOutput`](crate::output::TagResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`TagResourceOutput`](crate::output::TagResourceOutput) pub fn build(self) -> crate::output::TagResourceOutput { crate::output::TagResourceOutput {} } } } impl TagResourceOutput { /// Creates a new builder-style object to manufacture [`TagResourceOutput`](crate::output::TagResourceOutput) pub fn builder() -> crate::output::tag_resource_output::Builder { crate::output::tag_resource_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct NotifyResourceDeploymentStatusChangeOutput {} impl std::fmt::Debug for NotifyResourceDeploymentStatusChangeOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("NotifyResourceDeploymentStatusChangeOutput"); formatter.finish() } } /// See [`NotifyResourceDeploymentStatusChangeOutput`](crate::output::NotifyResourceDeploymentStatusChangeOutput) pub mod notify_resource_deployment_status_change_output { /// A builder for [`NotifyResourceDeploymentStatusChangeOutput`](crate::output::NotifyResourceDeploymentStatusChangeOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder {} impl Builder { /// Consumes the builder and constructs a [`NotifyResourceDeploymentStatusChangeOutput`](crate::output::NotifyResourceDeploymentStatusChangeOutput) pub fn build(self) -> crate::output::NotifyResourceDeploymentStatusChangeOutput { crate::output::NotifyResourceDeploymentStatusChangeOutput {} } } } impl NotifyResourceDeploymentStatusChangeOutput { /// Creates a new builder-style object to manufacture [`NotifyResourceDeploymentStatusChangeOutput`](crate::output::NotifyResourceDeploymentStatusChangeOutput) pub fn builder() -> crate::output::notify_resource_deployment_status_change_output::Builder { crate::output::notify_resource_deployment_status_change_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListTagsForResourceOutput { /// <p>An array of resource tags with detail data.</p> pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>, /// <p>A token to indicate the location of the next resource tag in the array of resource tags, after the current requested list of resource /// tags.</p> pub next_token: std::option::Option<std::string::String>, } impl ListTagsForResourceOutput { /// <p>An array of resource tags with detail data.</p> pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> { self.tags.as_deref() } /// <p>A token to indicate the location of the next resource tag in the array of resource tags, after the current requested list of resource /// tags.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListTagsForResourceOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListTagsForResourceOutput"); formatter.field("tags", &self.tags); formatter.field("next_token", &self.next_token); formatter.finish() } } /// See [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub mod list_tags_for_resource_output { /// A builder for [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>An array of resource tags with detail data.</p> pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self { let mut v = self.tags.unwrap_or_default(); v.push(input.into()); self.tags = Some(v); self } /// <p>An array of resource tags with detail data.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.tags = input; self } /// <p>A token to indicate the location of the next resource tag in the array of resource tags, after the current requested list of resource /// tags.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next resource tag in the array of resource tags, after the current requested list of resource /// tags.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub fn build(self) -> crate::output::ListTagsForResourceOutput { crate::output::ListTagsForResourceOutput { tags: self.tags, next_token: self.next_token, } } } } impl ListTagsForResourceOutput { /// Creates a new builder-style object to manufacture [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) pub fn builder() -> crate::output::list_tags_for_resource_output::Builder { crate::output::list_tags_for_resource_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListRepositorySyncDefinitionsOutput { /// <p>A token to indicate the location of the next repository sync definition in the array of repository sync definitions, after the current /// requested list of repository sync definitions.</p> pub next_token: std::option::Option<std::string::String>, /// <p>An array of repository sync definitions.</p> pub sync_definitions: std::option::Option<std::vec::Vec<crate::model::RepositorySyncDefinition>>, } impl ListRepositorySyncDefinitionsOutput { /// <p>A token to indicate the location of the next repository sync definition in the array of repository sync definitions, after the current /// requested list of repository sync definitions.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } /// <p>An array of repository sync definitions.</p> pub fn sync_definitions( &self, ) -> std::option::Option<&[crate::model::RepositorySyncDefinition]> { self.sync_definitions.as_deref() } } impl std::fmt::Debug for ListRepositorySyncDefinitionsOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListRepositorySyncDefinitionsOutput"); formatter.field("next_token", &self.next_token); formatter.field("sync_definitions", &self.sync_definitions); formatter.finish() } } /// See [`ListRepositorySyncDefinitionsOutput`](crate::output::ListRepositorySyncDefinitionsOutput) pub mod list_repository_sync_definitions_output { /// A builder for [`ListRepositorySyncDefinitionsOutput`](crate::output::ListRepositorySyncDefinitionsOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) next_token: std::option::Option<std::string::String>, pub(crate) sync_definitions: std::option::Option<std::vec::Vec<crate::model::RepositorySyncDefinition>>, } impl Builder { /// <p>A token to indicate the location of the next repository sync definition in the array of repository sync definitions, after the current /// requested list of repository sync definitions.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>A token to indicate the location of the next repository sync definition in the array of repository sync definitions, after the current /// requested list of repository sync definitions.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Appends an item to `sync_definitions`. /// /// To override the contents of this collection use [`set_sync_definitions`](Self::set_sync_definitions). /// /// <p>An array of repository sync definitions.</p> pub fn sync_definitions( mut self, input: impl Into<crate::model::RepositorySyncDefinition>, ) -> Self { let mut v = self.sync_definitions.unwrap_or_default(); v.push(input.into()); self.sync_definitions = Some(v); self } /// <p>An array of repository sync definitions.</p> pub fn set_sync_definitions( mut self, input: std::option::Option<std::vec::Vec<crate::model::RepositorySyncDefinition>>, ) -> Self { self.sync_definitions = input; self } /// Consumes the builder and constructs a [`ListRepositorySyncDefinitionsOutput`](crate::output::ListRepositorySyncDefinitionsOutput) pub fn build(self) -> crate::output::ListRepositorySyncDefinitionsOutput { crate::output::ListRepositorySyncDefinitionsOutput { next_token: self.next_token, sync_definitions: self.sync_definitions, } } } } impl ListRepositorySyncDefinitionsOutput { /// Creates a new builder-style object to manufacture [`ListRepositorySyncDefinitionsOutput`](crate::output::ListRepositorySyncDefinitionsOutput) pub fn builder() -> crate::output::list_repository_sync_definitions_output::Builder { crate::output::list_repository_sync_definitions_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetTemplateSyncStatusOutput { /// <p>The details of the last sync that's returned by Proton.</p> pub latest_sync: std::option::Option<crate::model::ResourceSyncAttempt>, /// <p>The details of the last successful sync that's returned by Proton.</p> pub latest_successful_sync: std::option::Option<crate::model::ResourceSyncAttempt>, /// <p>The template sync desired state that's returned by Proton.</p> pub desired_state: std::option::Option<crate::model::Revision>, } impl GetTemplateSyncStatusOutput { /// <p>The details of the last sync that's returned by Proton.</p> pub fn latest_sync(&self) -> std::option::Option<&crate::model::ResourceSyncAttempt> { self.latest_sync.as_ref() } /// <p>The details of the last successful sync that's returned by Proton.</p> pub fn latest_successful_sync( &self, ) -> std::option::Option<&crate::model::ResourceSyncAttempt> { self.latest_successful_sync.as_ref() } /// <p>The template sync desired state that's returned by Proton.</p> pub fn desired_state(&self) -> std::option::Option<&crate::model::Revision> { self.desired_state.as_ref() } } impl std::fmt::Debug for GetTemplateSyncStatusOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetTemplateSyncStatusOutput"); formatter.field("latest_sync", &self.latest_sync); formatter.field("latest_successful_sync", &self.latest_successful_sync); formatter.field("desired_state", &self.desired_state); formatter.finish() } } /// See [`GetTemplateSyncStatusOutput`](crate::output::GetTemplateSyncStatusOutput) pub mod get_template_sync_status_output { /// A builder for [`GetTemplateSyncStatusOutput`](crate::output::GetTemplateSyncStatusOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) latest_sync: std::option::Option<crate::model::ResourceSyncAttempt>, pub(crate) latest_successful_sync: std::option::Option<crate::model::ResourceSyncAttempt>, pub(crate) desired_state: std::option::Option<crate::model::Revision>, } impl Builder { /// <p>The details of the last sync that's returned by Proton.</p> pub fn latest_sync(mut self, input: crate::model::ResourceSyncAttempt) -> Self { self.latest_sync = Some(input); self } /// <p>The details of the last sync that's returned by Proton.</p> pub fn set_latest_sync( mut self, input: std::option::Option<crate::model::ResourceSyncAttempt>, ) -> Self { self.latest_sync = input; self } /// <p>The details of the last successful sync that's returned by Proton.</p> pub fn latest_successful_sync(mut self, input: crate::model::ResourceSyncAttempt) -> Self { self.latest_successful_sync = Some(input); self } /// <p>The details of the last successful sync that's returned by Proton.</p> pub fn set_latest_successful_sync( mut self, input: std::option::Option<crate::model::ResourceSyncAttempt>, ) -> Self { self.latest_successful_sync = input; self } /// <p>The template sync desired state that's returned by Proton.</p> pub fn desired_state(mut self, input: crate::model::Revision) -> Self { self.desired_state = Some(input); self } /// <p>The template sync desired state that's returned by Proton.</p> pub fn set_desired_state( mut self, input: std::option::Option<crate::model::Revision>, ) -> Self { self.desired_state = input; self } /// Consumes the builder and constructs a [`GetTemplateSyncStatusOutput`](crate::output::GetTemplateSyncStatusOutput) pub fn build(self) -> crate::output::GetTemplateSyncStatusOutput { crate::output::GetTemplateSyncStatusOutput { latest_sync: self.latest_sync, latest_successful_sync: self.latest_successful_sync, desired_state: self.desired_state, } } } } impl GetTemplateSyncStatusOutput { /// Creates a new builder-style object to manufacture [`GetTemplateSyncStatusOutput`](crate::output::GetTemplateSyncStatusOutput) pub fn builder() -> crate::output::get_template_sync_status_output::Builder { crate::output::get_template_sync_status_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetRepositorySyncStatusOutput { /// <p>The repository sync status detail data that's returned by Proton.</p> pub latest_sync: std::option::Option<crate::model::RepositorySyncAttempt>, } impl GetRepositorySyncStatusOutput { /// <p>The repository sync status detail data that's returned by Proton.</p> pub fn latest_sync(&self) -> std::option::Option<&crate::model::RepositorySyncAttempt> { self.latest_sync.as_ref() } } impl std::fmt::Debug for GetRepositorySyncStatusOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetRepositorySyncStatusOutput"); formatter.field("latest_sync", &self.latest_sync); formatter.finish() } } /// See [`GetRepositorySyncStatusOutput`](crate::output::GetRepositorySyncStatusOutput) pub mod get_repository_sync_status_output { /// A builder for [`GetRepositorySyncStatusOutput`](crate::output::GetRepositorySyncStatusOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) latest_sync: std::option::Option<crate::model::RepositorySyncAttempt>, } impl Builder { /// <p>The repository sync status detail data that's returned by Proton.</p> pub fn latest_sync(mut self, input: crate::model::RepositorySyncAttempt) -> Self { self.latest_sync = Some(input); self } /// <p>The repository sync status detail data that's returned by Proton.</p> pub fn set_latest_sync( mut self, input: std::option::Option<crate::model::RepositorySyncAttempt>, ) -> Self { self.latest_sync = input; self } /// Consumes the builder and constructs a [`GetRepositorySyncStatusOutput`](crate::output::GetRepositorySyncStatusOutput) pub fn build(self) -> crate::output::GetRepositorySyncStatusOutput { crate::output::GetRepositorySyncStatusOutput { latest_sync: self.latest_sync, } } } } impl GetRepositorySyncStatusOutput { /// Creates a new builder-style object to manufacture [`GetRepositorySyncStatusOutput`](crate::output::GetRepositorySyncStatusOutput) pub fn builder() -> crate::output::get_repository_sync_status_output::Builder { crate::output::get_repository_sync_status_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CancelServicePipelineDeploymentOutput { /// <p>The service pipeline detail data that's returned by Proton.</p> pub pipeline: std::option::Option<crate::model::ServicePipeline>, } impl CancelServicePipelineDeploymentOutput { /// <p>The service pipeline detail data that's returned by Proton.</p> pub fn pipeline(&self) -> std::option::Option<&crate::model::ServicePipeline> { self.pipeline.as_ref() } } impl std::fmt::Debug for CancelServicePipelineDeploymentOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CancelServicePipelineDeploymentOutput"); formatter.field("pipeline", &self.pipeline); formatter.finish() } } /// See [`CancelServicePipelineDeploymentOutput`](crate::output::CancelServicePipelineDeploymentOutput) pub mod cancel_service_pipeline_deployment_output { /// A builder for [`CancelServicePipelineDeploymentOutput`](crate::output::CancelServicePipelineDeploymentOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) pipeline: std::option::Option<crate::model::ServicePipeline>, } impl Builder { /// <p>The service pipeline detail data that's returned by Proton.</p> pub fn pipeline(mut self, input: crate::model::ServicePipeline) -> Self { self.pipeline = Some(input); self } /// <p>The service pipeline detail data that's returned by Proton.</p> pub fn set_pipeline( mut self, input: std::option::Option<crate::model::ServicePipeline>, ) -> Self { self.pipeline = input; self } /// Consumes the builder and constructs a [`CancelServicePipelineDeploymentOutput`](crate::output::CancelServicePipelineDeploymentOutput) pub fn build(self) -> crate::output::CancelServicePipelineDeploymentOutput { crate::output::CancelServicePipelineDeploymentOutput { pipeline: self.pipeline, } } } } impl CancelServicePipelineDeploymentOutput { /// Creates a new builder-style object to manufacture [`CancelServicePipelineDeploymentOutput`](crate::output::CancelServicePipelineDeploymentOutput) pub fn builder() -> crate::output::cancel_service_pipeline_deployment_output::Builder { crate::output::cancel_service_pipeline_deployment_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CancelServiceInstanceDeploymentOutput { /// <p>The service instance summary data that's returned by Proton.</p> pub service_instance: std::option::Option<crate::model::ServiceInstance>, } impl CancelServiceInstanceDeploymentOutput { /// <p>The service instance summary data that's returned by Proton.</p> pub fn service_instance(&self) -> std::option::Option<&crate::model::ServiceInstance> { self.service_instance.as_ref() } } impl std::fmt::Debug for CancelServiceInstanceDeploymentOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CancelServiceInstanceDeploymentOutput"); formatter.field("service_instance", &self.service_instance); formatter.finish() } } /// See [`CancelServiceInstanceDeploymentOutput`](crate::output::CancelServiceInstanceDeploymentOutput) pub mod cancel_service_instance_deployment_output { /// A builder for [`CancelServiceInstanceDeploymentOutput`](crate::output::CancelServiceInstanceDeploymentOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) service_instance: std::option::Option<crate::model::ServiceInstance>, } impl Builder { /// <p>The service instance summary data that's returned by Proton.</p> pub fn service_instance(mut self, input: crate::model::ServiceInstance) -> Self { self.service_instance = Some(input); self } /// <p>The service instance summary data that's returned by Proton.</p> pub fn set_service_instance( mut self, input: std::option::Option<crate::model::ServiceInstance>, ) -> Self { self.service_instance = input; self } /// Consumes the builder and constructs a [`CancelServiceInstanceDeploymentOutput`](crate::output::CancelServiceInstanceDeploymentOutput) pub fn build(self) -> crate::output::CancelServiceInstanceDeploymentOutput { crate::output::CancelServiceInstanceDeploymentOutput { service_instance: self.service_instance, } } } } impl CancelServiceInstanceDeploymentOutput { /// Creates a new builder-style object to manufacture [`CancelServiceInstanceDeploymentOutput`](crate::output::CancelServiceInstanceDeploymentOutput) pub fn builder() -> crate::output::cancel_service_instance_deployment_output::Builder { crate::output::cancel_service_instance_deployment_output::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CancelEnvironmentDeploymentOutput { /// <p>The environment summary data that's returned by Proton.</p> pub environment: std::option::Option<crate::model::Environment>, } impl CancelEnvironmentDeploymentOutput { /// <p>The environment summary data that's returned by Proton.</p> pub fn environment(&self) -> std::option::Option<&crate::model::Environment> { self.environment.as_ref() } } impl std::fmt::Debug for CancelEnvironmentDeploymentOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CancelEnvironmentDeploymentOutput"); formatter.field("environment", &self.environment); formatter.finish() } } /// See [`CancelEnvironmentDeploymentOutput`](crate::output::CancelEnvironmentDeploymentOutput) pub mod cancel_environment_deployment_output { /// A builder for [`CancelEnvironmentDeploymentOutput`](crate::output::CancelEnvironmentDeploymentOutput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) environment: std::option::Option<crate::model::Environment>, } impl Builder { /// <p>The environment summary data that's returned by Proton.</p> pub fn environment(mut self, input: crate::model::Environment) -> Self { self.environment = Some(input); self } /// <p>The environment summary data that's returned by Proton.</p> pub fn set_environment( mut self, input: std::option::Option<crate::model::Environment>, ) -> Self { self.environment = input; self } /// Consumes the builder and constructs a [`CancelEnvironmentDeploymentOutput`](crate::output::CancelEnvironmentDeploymentOutput) pub fn build(self) -> crate::output::CancelEnvironmentDeploymentOutput { crate::output::CancelEnvironmentDeploymentOutput { environment: self.environment, } } } } impl CancelEnvironmentDeploymentOutput { /// Creates a new builder-style object to manufacture [`CancelEnvironmentDeploymentOutput`](crate::output::CancelEnvironmentDeploymentOutput) pub fn builder() -> crate::output::cancel_environment_deployment_output::Builder { crate::output::cancel_environment_deployment_output::Builder::default() } }
} impl ListServicesOutput { /// <p>A token to indicate the location of the next service in the array of services, after the current requested list of services.</p>
d4c798575877_create_favorites.py
"""empty message Revision ID: d4c798575877 Revises: 1daa601d3ae5 Create Date: 2018-05-09 10:28:22.931442 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd4c798575877' down_revision = '1daa601d3ae5' branch_labels = None depends_on = None def upgrade(): op.create_table('favorites', sa.Column('updated_at', sa.DateTime(timezone=True), nullable=False), sa.Column('created_at', sa.DateTime(timezone=True), nullable=False), sa.Column('id', sa.Integer(), nullable=False), sa.Column('object_type', sa.Unicode(length=255), nullable=False), sa.Column('object_id', sa.Integer(), nullable=False), sa.Column('user_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), sa.PrimaryKeyConstraint('id') ) def
(): op.drop_table('favorites')
downgrade
mod.rs
pub mod ap; pub(crate) mod communication_interface; pub mod component; pub(crate) mod core; pub mod dp; pub mod memory;
ApInformation, ArmChipInfo, ArmCommunicationInterface, DAPAccess, DapError, MemoryApInformation, }; pub use communication_interface::{PortType, Register}; pub use swo::{SwoAccess, SwoConfig, SwoMode}; pub use self::core::m0; pub use self::core::m33; pub use self::core::m4; pub use self::core::CortexDump; pub use communication_interface::ArmProbeInterface;
pub mod swo; pub use communication_interface::{
script.py
import sys, clr import ConfigParser from os.path import expanduser # Set system path home = expanduser("~") cfgfile = open(home + "\\STVTools.ini", 'r') config = ConfigParser.ConfigParser() config.read(home + "\\STVTools.ini")
syspath2 = config.get('SysDir','SecondaryPackage') sys.path.append(syspath2) import Selection clr.AddReference('System') from Autodesk.Revit.DB import Document, FilteredElementCollector, GraphicsStyle, Transaction, BuiltInCategory,\ RevitLinkInstance, UV, XYZ, SpatialElementBoundaryOptions, CurveArray, ElementId, View, RevitLinkType, WorksetTable,\ Workset, FilteredWorksetCollector, WorksetKind, RevitLinkType, RevitLinkInstance, View3D, ViewType,ElementClassFilter,\ ViewFamilyType, ViewFamily, BuiltInParameter, IndependentTag, Reference, TagMode, TagOrientation from pyrevit import revit, DB, forms clr. AddReferenceByPartialName('PresentationCore') clr.AddReferenceByPartialName('PresentationFramework') clr.AddReferenceByPartialName('System.Windows.Forms') import System.Windows.Forms uidoc = __revit__.ActiveUIDocument doc = __revit__.ActiveUIDocument.Document t = Transaction(doc, 'Tag Selected') t.Start() selection = Selection.get_selected_elements(doc) for a in selection: location = a.Location IndependentTag.Create(doc, doc.ActiveView.Id, Reference(a), True, TagMode.TM_ADDBY_MULTICATEGORY, TagOrientation.Horizontal, location.Point) print(location.Point) t.Commit()
# Master Path syspath1 = config.get('SysDir','MasterPackage') sys.path.append(syspath1) # Built Path
exceptions.py
from ..common.exceptions import DoesNotExist class
(DoesNotExist): entity_name = "User" class EmailAlreadyExists(Exception): def __init__(self, email: str) -> None: super().__init__(f"Email already exists: {email!r}") class LoginFailed(Exception): pass
UserDoesNotExist
outofband_sdk_steps.go
/* Copyright SecureKey Technologies Inc. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package outofband import ( "encoding/base64" "errors" "fmt" "strings" "time" "github.com/cucumber/godog" "github.com/google/uuid" didexClient "github.com/hyperledger/aries-framework-go/pkg/client/didexchange" "github.com/hyperledger/aries-framework-go/pkg/client/outofband" "github.com/hyperledger/aries-framework-go/pkg/client/outofbandv2" "github.com/hyperledger/aries-framework-go/pkg/didcomm/common/service" "github.com/hyperledger/aries-framework-go/pkg/didcomm/protocol/decorator" "github.com/hyperledger/aries-framework-go/pkg/didcomm/protocol/issuecredential" oobv2 "github.com/hyperledger/aries-framework-go/pkg/didcomm/protocol/outofbandv2" "github.com/hyperledger/aries-framework-go/pkg/didcomm/protocol/presentproof" "github.com/hyperledger/aries-framework-go/pkg/didcomm/transport" "github.com/hyperledger/aries-framework-go/test/bdd/pkg/context" bddDIDExchange "github.com/hyperledger/aries-framework-go/test/bdd/pkg/didexchange" "github.com/hyperledger/aries-framework-go/test/bdd/pkg/didresolver" bddIssueCred "github.com/hyperledger/aries-framework-go/test/bdd/pkg/issuecredential" ) // SDKSteps for the out-of-band protocol. type SDKSteps struct { context *context.BDDContext pendingInvitations map[string]*outofband.Invitation pendingV2Invites map[string]*oobv2.Invitation connectionIDs map[string]string bddDIDExchSDK *bddDIDExchange.SDKSteps bddIssueCredSDK *bddIssueCred.SDKSteps nextAction map[string]chan interface{} credName string accept string } // NewOutOfBandSDKSteps returns the out-of-band protocol's BDD steps using the SDK binding. func NewOutOfBandSDKSteps() *SDKSteps { return &SDKSteps{ pendingInvitations: make(map[string]*outofband.Invitation), pendingV2Invites: make(map[string]*oobv2.Invitation), connectionIDs: make(map[string]string), bddDIDExchSDK: bddDIDExchange.NewDIDExchangeSDKSteps(), bddIssueCredSDK: bddIssueCred.NewIssueCredentialSDKSteps(), nextAction: make(map[string]chan interface{}), } } // SetContext is called before every scenario is run with a fresh new context. func (sdk *SDKSteps) SetContext(ctx *context.BDDContext) { sdk.context = ctx sdk.bddDIDExchSDK = bddDIDExchange.NewDIDExchangeSDKSteps() sdk.bddDIDExchSDK.SetContext(ctx) sdk.bddIssueCredSDK = bddIssueCred.NewIssueCredentialSDKSteps() sdk.bddIssueCredSDK.SetContext(ctx) } func (sdk *SDKSteps) scenario(accept string) error { sdk.accept = accept return nil } // RegisterSteps registers the BDD steps on the suite. func (sdk *SDKSteps) RegisterSteps(suite *godog.Suite) { suite.Step(`^"([^"]*)" creates an out-of-band invitation$`, sdk.createOOBInvitation) suite.Step(`^options ""([^"]*)""$`, sdk.scenario) suite.Step( `^"([^"]*)" sends the invitation to "([^"]*)" through an out-of-band channel$`, sdk.sendInvitationThruOOBChannel) suite.Step(`^"([^"]*)" accepts the invitation and connects with "([^"]*)"$`, sdk.acceptInvitationAndConnect) suite.Step(`^"([^"]*)" and "([^"]*)" confirm their connection is "([^"]*)"$`, sdk.ConfirmConnections) suite.Step(`^"([^"]*)" creates an out-of-band invitation with a public DID$`, sdk.createOOBInvitationWithPubDID) suite.Step(`^"([^"]*)" connects with "([^"]*)" using the invitation$`, sdk.connectAndConfirmConnection) suite.Step(`^"([^"]*)" creates another out-of-band invitation with the same public DID$`, sdk.CreateInvitationWithDID) suite.Step(`^"([^"]*)" accepts the invitation from "([^"]*)" and both agents opt to reuse their connections$`, sdk.acceptInvitationAndConnectWithReuse) suite.Step(`^"([^"]*)" and "([^"]*)" confirm they reused their connections$`, sdk.confirmConnectionReuse) suite.Step(`^"([^"]*)" creates an out-of-band invitation with an attached offer-credential message$`, sdk.createOOBInvitationWithOfferCredential) suite.Step(`^"([^"]*)" accepts the offer-credential message from "([^"]*)"$`, sdk.acceptCredentialOffer) suite.Step(`^"([^"]*)" is issued the credential$`, sdk.confirmCredentialReceived) suite.Step( `^"([^"]*)" creates another out-of-band invitation with the same public DID and an attached `+ `offer-credential message$`, sdk.createOOBInvitationReusePubDIDAndOfferCredential) suite.Step(`^"([^"]*)" creates an out-of-band-v2 invitation with embedded present proof v3 request`+ ` as target service$`, sdk.createOOBV2WithPresentProof) suite.Step(`^"([^"]*)" creates an out-of-band-v2 invitation$`, sdk.createOOBV2) suite.Step(`^"([^"]*)" sends the request to "([^"]*)" and he accepts it by processing both OOBv2 and the `+ `embedded present proof v3 request$`, sdk.acceptOOBV2Invitation) suite.Step(`^"([^"]*)" sends the request to "([^"]*)", which accepts it$`, sdk.acceptOOBV2Invitation) } func (sdk *SDKSteps) createOOBInvitation(agentID string) error { err := sdk.registerClients(agentID) if err != nil { return fmt.Errorf("failed to register outofband client : %w", err) } inv, err := sdk.newInvitation(agentID) if err != nil { return err } sdk.pendingInvitations[agentID] = inv return nil } func (sdk *SDKSteps) createOOBInvitationWithPubDID(agentID string) error { err := sdk.registerClients(agentID) if err != nil { return fmt.Errorf("'%s' failed to create an OOB client: %w", agentID, err) } err = didresolver.CreateDIDDocument(sdk.context, agentID, "") if err != nil { return fmt.Errorf("'%s' failed to create a public DID: %w", agentID, err) } err = sdk.bddDIDExchSDK.WaitForPublicDID(agentID, 10) if err != nil { return fmt.Errorf("'%s' timed out waiting for their public DID to be ready: %w", agentID, err) } err = sdk.CreateInvitationWithDID(agentID) if err != nil { return fmt.Errorf("'%s' failed to create invitation with public DID: %w", agentID, err) } return nil } func (sdk *SDKSteps) createOOBInvitationWithOfferCredential(agent string) error { err := sdk.registerClients(agent) if err != nil { return fmt.Errorf("'%s' failed to register oob client: %w", agent, err) } inv, err := sdk.newInvitation(agent, &issuecredential.OfferCredentialV2{ Type: issuecredential.OfferCredentialMsgTypeV2, Comment: "test", }) if err != nil { return fmt.Errorf("'%s' failed to create invitation: %w", agent, err) } sdk.pendingInvitations[agent] = inv return nil } func (sdk *SDKSteps) createOOBInvitationReusePubDIDAndOfferCredential(agent string) error { err := sdk.registerClients(agent) if err != nil { return fmt.Errorf("'%s' failed to create an OOB client: %w", agent, err) } did, found := sdk.context.PublicDIDDocs[agent] if !found { return fmt.Errorf("no public did found for %s", agent) } client, found := sdk.context.OutOfBandClients[agent] if !found { return fmt.Errorf("no oob client found for %s", agent) } inv, err := client.CreateInvitation( []interface{}{did.ID}, outofband.WithLabel(agent), outofband.WithAttachments(&decorator.Attachment{ ID: uuid.New().String(), MimeType: "application/json", Data: decorator.AttachmentData{ JSON: &issuecredential.OfferCredentialV2{ Type: issuecredential.OfferCredentialMsgTypeV2, Comment: "test", }, }, }), ) if err != nil { return fmt.Errorf("failed to create oob invitation for %s : %w", agent, err) } sdk.pendingInvitations[agent] = inv return nil } func (sdk *SDKSteps) acceptCredentialOffer(holder, issuer string) error { err := sdk.bddIssueCredSDK.AcceptOffer(holder) if err != nil { return fmt.Errorf("'%s' failed to accept credential offer: %w", holder, err) } err = sdk.bddIssueCredSDK.AcceptRequest(issuer) if err != nil { return fmt.Errorf("'%s' failed to accept the request for credential: %w", issuer, err) } sdk.credName = uuid.New().String() err = sdk.bddIssueCredSDK.AcceptCredential(holder, sdk.credName, false) if err != nil { return fmt.Errorf("'%s' failed to accept the credential: %w", holder, err) } return nil } func (sdk *SDKSteps) confirmCredentialReceived(holder string) error { err := sdk.bddIssueCredSDK.CheckCredential(holder, sdk.credName) if err != nil { return fmt.Errorf( "'%s' failed to confirm they are holding a credential with name '%s': %w", holder, sdk.credName, err, ) } return nil } func (sdk *SDKSteps) sendInvitationThruOOBChannel(sender, receiver string) error { err := sdk.registerClients(sender, receiver) if err != nil { return fmt.Errorf("failed to register framework clients : %w", err) } inv, found := sdk.pendingInvitations[sender] if !found { return fmt.Errorf("no invitation found for %s", sender) } sdk.pendingInvitations[receiver] = inv return nil } func (sdk *SDKSteps) connectAndConfirmConnection(receiver, sender string) error { err := sdk.registerClients(receiver, sender) if err != nil { return fmt.Errorf("'%s' and '%s' failed to create an OOB client: %w", receiver, sender, err) } err = sdk.sendInvitationThruOOBChannel(sender, receiver) if err != nil { return fmt.Errorf("'%s' failed to send invitation to '%s': %w", sender, receiver, err) } err = sdk.acceptInvitationAndConnect(receiver, sender) if err != nil { return fmt.Errorf("'%s' and '%s' failed to connect: %w", receiver, sender, err) } err = sdk.ConfirmConnections(sender, receiver, "completed") if err != nil { return fmt.Errorf( "failed to confirm status 'completed' for connection b/w '%s' and '%s': %w", receiver, sender, err, ) } return nil } func (sdk *SDKSteps) acceptInvitationAndConnect(receiverID, senderID string) error { invitation, found := sdk.pendingInvitations[receiverID] if !found { return fmt.Errorf("no pending invitations found for %s", receiverID) } return sdk.acceptAndConnect(receiverID, senderID, func(client *outofband.Client) error { var err error sdk.connectionIDs[receiverID], err = client.AcceptInvitation(invitation, receiverID) if err != nil { return fmt.Errorf("%s failed to accept out-of-band invitation : %w", receiverID, err) } return nil }) } func (sdk *SDKSteps) acceptInvitationAndConnectWithReuse(receiver, sender string) error { invitation, found := sdk.pendingInvitations[receiver] if !found { return fmt.Errorf("no pending invitations found for %s", receiver) } var pubDID string for i := range invitation.Services { if s, ok := invitation.Services[i].(string); ok { pubDID = s break } } if pubDID == "" { return fmt.Errorf("no public DID in the invitation from '%s'", sender) } var err error sdk.connectionIDs[receiver], err = sdk.context.OutOfBandClients[receiver].AcceptInvitation( invitation, receiver, outofband.ReuseConnection(pubDID), ) if err != nil { return fmt.Errorf("%s failed to accept out-of-band invitation : %w", receiver, err) } sdk.ApproveHandshakeReuse(sender, nil) return nil } func (sdk *SDKSteps) acceptAndConnect( // nolint:gocyclo receiverID, senderID string, accept func(receiver *outofband.Client) error) error { receiver, found := sdk.context.OutOfBandClients[receiverID] if !found { return fmt.Errorf("no registered outofband client for %s", receiverID) } err := sdk.bddDIDExchSDK.RegisterPostMsgEvent(strings.Join([]string{senderID, receiverID}, ","), "completed") if err != nil { return fmt.Errorf("failed to register agents for didexchange post msg events : %w", err) } states := make(chan service.StateMsg) err = sdk.context.DIDExchangeClients[senderID].RegisterMsgEvent(states) if err != nil { return err } err = accept(receiver) if err != nil { return fmt.Errorf("failed to accept invite: %w", err) } var event service.StateMsg select { case event = <-states: err = sdk.context.DIDExchangeClients[senderID].UnregisterMsgEvent(states) if err != nil { return err } case <-time.After(time.Second): return fmt.Errorf("'%s' timed out waiting for state events", senderID) } conn, err := sdk.context.DIDExchangeClients[senderID].GetConnection(event.Properties.All()["connectionID"].(string)) if err != nil { return err } if strings.TrimSpace(conn.TheirLabel) == "" { return errors.New("their label is empty") } err = sdk.bddDIDExchSDK.ApproveRequest(senderID) if err != nil { return fmt.Errorf("failed to approve invitation for %s : %w", senderID, err) } return nil } // ConfirmConnections confirms the connection between the sender and receiver is at the given status. func (sdk *SDKSteps) ConfirmConnections(senderID, receiverID, status string) error { err := sdk.bddDIDExchSDK.WaitForPostEvent(strings.Join([]string{senderID, receiverID}, ","), status) if err != nil { return fmt.Errorf("failed to wait for post events : %w", err) } connSender, err := sdk.GetConnection(senderID, receiverID) if err != nil { return err } if connSender.State != status { return fmt.Errorf( "%s's connection with %s is in state %s but expected %s", senderID, receiverID, connSender.State, status, ) } connReceiver, err := sdk.GetConnection(receiverID, senderID) if err != nil { return err } if connReceiver.State != status { return fmt.Errorf( "%s's connection with %s is in state %s but expected %s", receiverID, senderID, connSender.State, status, ) } return nil } func (sdk *SDKSteps) confirmConnectionReuse(alice, bob string) error { err := sdk.verifyConnectionsCount(alice, bob, 1) if err != nil { return err } return sdk.verifyConnectionsCount(bob, alice, 1) } func (sdk *SDKSteps) verifyConnectionsCount(agentA, agentB string, expected int) error { agentAClient, err := didexClient.New(sdk.context.AgentCtx[agentA]) if err != nil { return fmt.Errorf("failed to create didexchange client for %s: %w", agentA, err) } records, err := agentAClient.QueryConnections(&didexClient.QueryConnectionsParams{}) if err != nil { return fmt.Errorf("failed to fetch %s'sconnection records: %w", agentA, err) } count := 0 for i := range records { r := records[i] if r.TheirLabel == agentB { count++ } } if count != expected { return fmt.Errorf("'%s' expected %d connection record with '%s' but has %d", agentA, expected, agentB, count) } return nil } // GetConnection returns connection between agents. func (sdk *SDKSteps) GetConnection(from, to string) (*didexClient.Connection, error) { connections, err := sdk.context.DIDExchangeClients[from].QueryConnections(&didexClient.QueryConnectionsParams{}) if err != nil { return nil, fmt.Errorf("%s failed to fetch their connections : %w", from, err) } for _, c := range connections { if c.TheirLabel == to { return c, nil } } return nil, fmt.Errorf("no connection %s -> %s", from, to) } func (sdk *SDKSteps) registerClients(agentIDs ...string) error { for _, agent := range agentIDs { err := sdk.CreateClients(agent) if err != nil { return fmt.Errorf("'%s' failed to create an outofband client: %w", agent, err) } err = sdk.bddDIDExchSDK.CreateDIDExchangeClient(agent) if err != nil { return fmt.Errorf("'%s' failed to create new didexchange client: %w", agent, err) } err = sdk.bddIssueCredSDK.CreateClient(agent) if err != nil { return fmt.Errorf("'%s' failed to create new issuecredential client: %w", agent, err) } } return nil } func (sdk *SDKSteps) newInvitation(agentID string, attachments ...interface{}) (*outofband.Invitation, error) { agent, found := sdk.context.OutOfBandClients[agentID] if !found { return nil, fmt.Errorf("no agent for %s was found", agentID) } var attachDecorators []*decorator.Attachment for i := range attachments { attachDecorators = append(attachDecorators, &decorator.Attachment{ ID: uuid.New().String(), Data: decorator.AttachmentData{ JSON: attachments[i], }, }) } opts := []outofband.MessageOption{ outofband.WithLabel(agentID), outofband.WithAttachments(attachDecorators...), } if sdk.accept != "" { opts = append(opts, outofband.WithAccept(sdk.accept)) } inv, err := agent.CreateInvitation( nil, opts..., ) if err != nil { return nil, fmt.Errorf("failed to create invitation for %s : %w", agentID, err) } return inv, nil } // CreateClients creates out-of-band clients for the given agents. // 'agents' is a comma-separated string of agent identifiers. // The out-of-band clients are registered in the BDD context under their respective identifier. func (sdk *SDKSteps) CreateClients(agents string) error { for _, agent := range strings.Split(agents, ",") { if _, exists := sdk.context.OutOfBandClients[agent]; exists { continue } client, err := outofband.New(sdk.context.AgentCtx[agent]) if err != nil { return fmt.Errorf("failed to create new oob client for %s : %w", agent, err) } actions := make(chan service.DIDCommAction) err = client.RegisterActionEvent(actions) if err != nil { return fmt.Errorf("failed to register %s to listen for oob action events : %w", agent, err) } sdk.context.OutOfBandClients[agent] = client sdk.nextAction[agent] = make(chan interface{}) go sdk.autoExecuteActionEvent(agent, actions) } return nil } // CreateOOBV2Clients creates out-of-band v2 clients for the given agents. // 'agents' is a comma-separated string of agent identifiers. // The out-of-band clients are registered in the BDD context under their respective identifier. func (sdk *SDKSteps) CreateOOBV2Clients(agents string) error { for _, agent := range strings.Split(agents, ",") { if _, exists := sdk.context.OutOfBandV2Clients[agent]; exists { continue } clientV2, err := outofbandv2.New(sdk.context.AgentCtx[agent]) if err != nil { return fmt.Errorf("failed to create new oobv2 client for %s : %w", agent, err) } sdk.context.OutOfBandV2Clients[agent] = clientV2 } return nil } func (sdk *SDKSteps) autoExecuteActionEvent(agentID string, ch <-chan service.DIDCommAction) { for e := range ch { // waits for the signal to approve this event e.Continue(<-sdk.nextAction[agentID]) } } // ApproveOOBInvitation approves an out-of-band request for this agent. func (sdk *SDKSteps) ApproveOOBInvitation(agentID string, args interface{}) { // sends the signal which automatically handles events sdk.nextAction[agentID] <- args } // ApproveHandshakeReuse makes the given agent approve a handshake-reuse message. func (sdk *SDKSteps) ApproveHandshakeReuse(agentID string, args interface{}) { sdk.nextAction[agentID] <- args } // ApproveDIDExchangeRequest approves a didexchange request for this agent. func (sdk *SDKSteps) ApproveDIDExchangeRequest(agentID string) error { return sdk.bddDIDExchSDK.ApproveRequest(agentID) } // CreateInvitationWithDID creates an out-of-band request message and sets its 'service' to a single // entry containing a public DID registered in the BDD context. // The request is registered internally. func (sdk *SDKSteps) CreateInvitationWithDID(agent string) error { did, found := sdk.context.PublicDIDDocs[agent] if !found { return fmt.Errorf("no public did found for %s", agent) } client, found := sdk.context.OutOfBandClients[agent] if !found { return fmt.Errorf("no oob client found for %s", agent) } mtps := sdk.context.AgentCtx[agent].MediaTypeProfiles() didCommV2 := false for _, mtp := range mtps { switch mtp { case transport.MediaTypeDIDCommV2Profile, transport.MediaTypeAIP2RFC0587Profile: didCommV2 = true } if didCommV2 { break } } if !didCommV2 && len(mtps) == 0 { mtps = []string{transport.MediaTypeAIP2RFC0019Profile} } inv, err := client.CreateInvitation( []interface{}{did.ID}, outofband.WithLabel(agent), outofband.WithAccept(mtps...), ) if err != nil { return fmt.Errorf("failed to create oob invitation for %s : %w", agent, err) } sdk.pendingInvitations[agent] = inv return nil } // ReceiveInvitation makes 'to' accept a pre-registered out-of-band invitation created by 'from'. func (sdk *SDKSteps) ReceiveInvitation(to, from string) error { inv, found := sdk.pendingInvitations[from] if !found { return fmt.Errorf("%s does not have a pending request", from) } receiver, found := sdk.context.OutOfBandClients[to] if !found { return fmt.Errorf("%s does not have a registered oob client", to) } connID, err := receiver.AcceptInvitation(inv, to) if err != nil { return fmt.Errorf("%s failed to accept invitation from %s : %w", to, from, err) } sdk.connectionIDs[to] = connID return nil } // ConnectAll connects all agents to each other. // 'agents' is a comma-separated string of agent identifiers. func (sdk *SDKSteps) ConnectAll(agents string) error { err := sdk.CreateClients(agents) if err != nil { return err } err = sdk.bddDIDExchSDK.CreateDIDExchangeClient(agents) if err != nil { return err } all := strings.Split(agents, ",") for i := 0; i < len(all)-1; i++ { inviter := all[i] err = sdk.createOOBInvitation(inviter) if err != nil { return err } for j := i + 1; j < len(all); j++ { invitee := all[j] // send outofband invitation to invitee err = sdk.sendInvitationThruOOBChannel(inviter, invitee) if err != nil { return err } // invitee accepts outofband invitation err = sdk.acceptInvitationAndConnect(invitee, inviter) if err != nil { return err } err = sdk.ConfirmConnections(inviter, invitee, "completed") if err != nil { return err } } } return nil } const ( vpStr = ` { "@context": [ "https://www.w3.org/2018/credentials/v1", "https://www.w3.org/2018/credentials/examples/v1" ], "id": "urn:uuid:3978344f-8596-4c3a-a978-8fcaba3903c5", "type": [ "VerifiablePresentation", "UniversityDegreeCredential" ], "verifiableCredential": [ { "@context": [ "https://www.w3.org/2018/credentials/v1", "https://www.w3.org/2018/credentials/examples/v1" ], "credentialSchema": [], "credentialSubject": { "degree": { "type": "BachelorDegree", "university": "MIT" }, "id": "%s", "name": "Jayden Doe", "spouse": "did:example:c276e12ec21ebfeb1f712ebc6f1" }, "expirationDate": "2025-01-01T19:23:24Z", "id": "http://example.edu/credentials/1872", "issuanceDate": "2010-01-01T19:23:24Z", "issuer": { "id": "did:example:76e12ec712ebc6f1c221ebfeb1f", "name": "Example University" }, "referenceNumber": 83294847, "type": [ "VerifiableCredential", "UniversityDegreeCredential" ] } ], "holder": "%s" } ` ppfGoal = "present-proof/3.0/request-presentation" ppfGoalCode = "https://didcomm.org/present-proof/3.0/request-presentation" ) func (sdk *SDKSteps) createOOBV2WithPresentProof(agent1 string) error { err := sdk.CreateOOBV2Clients(agent1) if err != nil { return fmt.Errorf("send OOBV2 failed to register %s client: %w", agent1, err) } oobv2Client1, ok := sdk.context.OutOfBandV2Clients[agent1] if !ok { return fmt.Errorf("missing oobv2 client for %s", agent1) } agentDIDDoc, ok := sdk.context.PublicDIDDocs[agent1] if !ok { return fmt.Errorf("oobv2: missing DID Doc for %s", agent1) } ppfv3Req := service.NewDIDCommMsgMap(presentproof.PresentationV3{ Type: presentproof.RequestPresentationMsgTypeV3, Attachments: []decorator.AttachmentV2{{ Data: decorator.AttachmentData{ Base64: base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(vpStr, agentDIDDoc.ID, agentDIDDoc.ID))), }, }}, }) ppfv3Attachment := []*decorator.AttachmentV2{{ ID: uuid.New().String(), Description: "PresentProof V3 propose presentation request", FileName: "presentproofv3.json", MediaType: "application/json", LastModTime: time.Time{}, Data: decorator.AttachmentData{ JSON: ppfv3Req, }, }} inv, err := oobv2Client1.CreateInvitation( outofbandv2.WithGoal(ppfGoal, ppfGoalCode), outofbandv2.WithAttachments(ppfv3Attachment...), outofbandv2.WithFrom(agentDIDDoc.ID), ) if err != nil { return fmt.Errorf("failed to create invitation: %w", err)
} sdk.pendingV2Invites[agent1] = inv return nil } func (sdk *SDKSteps) createOOBV2(agent1 string) error { err := sdk.CreateOOBV2Clients(agent1) if err != nil { return fmt.Errorf("send OOBV2 failed to register %s client: %w", agent1, err) } oobv2Client1, ok := sdk.context.OutOfBandV2Clients[agent1] if !ok { return fmt.Errorf("missing oobv2 client for %s", agent1) } agentDIDDoc, ok := sdk.context.PublicDIDDocs[agent1] if !ok { return fmt.Errorf("oobv2: missing DID Doc for %s", agent1) } inv, err := oobv2Client1.CreateInvitation( outofbandv2.WithGoal(ppfGoal, ppfGoalCode), outofbandv2.WithFrom(agentDIDDoc.ID), ) if err != nil { return fmt.Errorf("failed to create invitation: %w", err) } sdk.pendingV2Invites[agent1] = inv return nil } func (sdk *SDKSteps) acceptOOBV2Invitation(agent1, agent2 string) error { err := sdk.CreateOOBV2Clients(agent2) if err != nil { return fmt.Errorf("send OOBV2 failed to register %s client: %w", agent2, err) } oobv2Client2, ok := sdk.context.OutOfBandV2Clients[agent2] if !ok { return fmt.Errorf("missing oobv2 client for %s", agent2) } inv := sdk.pendingV2Invites[agent1] connID, err := oobv2Client2.AcceptInvitation(inv) if err != nil { return fmt.Errorf("failed to accept oobv2 invitation for %s : %w", agent1, err) } sdk.context.SaveConnectionID(agent2, agent1, connID) return nil }
conn_id.rs
use { crate :: { import::* }, super :: { unique_id::UniqueID }, }; /// A unique identifier for a service that is exposed to other processes. This will allow /// identifying the type to which the payload needs to be deserialized and the actor to which /// this message is to be delivered. /// /// Ideally we want to use 128 bits here to have globally unique identifiers with little chance /// of collision, but we use xxhash which for the moment only supports 64 bit. // #[ derive( Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize ) ] // pub struct ConnID { inner: UniqueID, } impl ConnID { /// Generate a random ID // pub fn random() -> Self { Self { inner: UniqueID::random() } } /// And empty ConnID. Can be used to signify the abscence of an id, would usually be all /// zero bytes. // pub fn null() -> Self { Self{ inner: UniqueID::null() } } /// Predicate for null values (all bytes are 0). // pub fn is_null( &self ) -> bool { self.inner.is_null() } } /// Internally is also represented as Bytes, so you just get a copy. // impl From< ConnID > for u64 { fn from( cid: ConnID ) -> u64 { cid.inner.into() } } /// The object will just keep the bytes as internal representation, no copies will be made // impl From< u64 > for ConnID { fn from( id: u64 ) -> Self { Self { inner: UniqueID::from( id ) } } } impl fmt::Display for ConnID { fn fmt( &self, f: &mut fmt::Formatter<'_> ) -> fmt::Result
} impl fmt::Debug for ConnID { fn fmt( &self, f: &mut fmt::Formatter<'_> ) -> fmt::Result { self.inner.fmt( f ) } } impl fmt::LowerHex for ConnID { fn fmt( &self, f: &mut fmt::Formatter<'_> ) -> fmt::Result { fmt::LowerHex::fmt( &self.inner, f ) } }
{ write!( f, "{:?}", self ) }
file_uploader.js
import { getOffset, getWidth } from '../core/utils/size'; import $ from '../core/renderer'; import Guid from '../core/guid'; import { getWindow } from '../core/utils/window'; import eventsEngine from '../events/core/events_engine'; import registerComponent from '../core/component_registrator'; import Callbacks from '../core/utils/callbacks'; import { isDefined, isFunction, isNumeric } from '../core/utils/type'; import { each } from '../core/utils/iterator'; import { extend } from '../core/utils/extend'; import { Deferred, fromPromise } from '../core/utils/deferred'; import ajax from '../core/utils/ajax'; import Editor from './editor/editor'; import Button from './button'; import ProgressBar from './progress_bar'; import devices from '../core/devices'; import { addNamespace, isTouchEvent } from '../events/utils/index'; import { name as clickEventName } from '../events/click'; import messageLocalization from '../localization/message'; import { isMaterial } from './themes'; import domAdapter from '../core/dom_adapter'; // STYLE fileUploader const window = getWindow(); const FILEUPLOADER_CLASS = 'dx-fileuploader'; const FILEUPLOADER_EMPTY_CLASS = 'dx-fileuploader-empty'; const FILEUPLOADER_SHOW_FILE_LIST_CLASS = 'dx-fileuploader-show-file-list'; const FILEUPLOADER_DRAGOVER_CLASS = 'dx-fileuploader-dragover'; const FILEUPLOADER_WRAPPER_CLASS = 'dx-fileuploader-wrapper'; const FILEUPLOADER_CONTAINER_CLASS = 'dx-fileuploader-container'; const FILEUPLOADER_CONTENT_CLASS = 'dx-fileuploader-content'; const FILEUPLOADER_INPUT_WRAPPER_CLASS = 'dx-fileuploader-input-wrapper'; const FILEUPLOADER_INPUT_CONTAINER_CLASS = 'dx-fileuploader-input-container'; const FILEUPLOADER_INPUT_LABEL_CLASS = 'dx-fileuploader-input-label'; const FILEUPLOADER_INPUT_CLASS = 'dx-fileuploader-input'; const FILEUPLOADER_FILES_CONTAINER_CLASS = 'dx-fileuploader-files-container'; const FILEUPLOADER_FILE_CONTAINER_CLASS = 'dx-fileuploader-file-container'; const FILEUPLOADER_FILE_INFO_CLASS = 'dx-fileuploader-file-info'; const FILEUPLOADER_FILE_STATUS_MESSAGE_CLASS = 'dx-fileuploader-file-status-message'; const FILEUPLOADER_FILE_CLASS = 'dx-fileuploader-file'; const FILEUPLOADER_FILE_NAME_CLASS = 'dx-fileuploader-file-name'; const FILEUPLOADER_FILE_SIZE_CLASS = 'dx-fileuploader-file-size'; const FILEUPLOADER_BUTTON_CLASS = 'dx-fileuploader-button'; const FILEUPLOADER_BUTTON_CONTAINER_CLASS = 'dx-fileuploader-button-container'; const FILEUPLOADER_CANCEL_BUTTON_CLASS = 'dx-fileuploader-cancel-button'; const FILEUPLOADER_UPLOAD_BUTTON_CLASS = 'dx-fileuploader-upload-button'; const FILEUPLOADER_INVALID_CLASS = 'dx-fileuploader-invalid'; const FILEUPLOADER_AFTER_LOAD_DELAY = 400; const FILEUPLOADER_CHUNK_META_DATA_NAME = 'chunkMetadata'; let renderFileUploaderInput = () => $('<input>').attr('type', 'file'); const isFormDataSupported = () => !!window.FormData; class FileUploader extends Editor { _supportedKeys() { const click = e => { e.preventDefault(); const $selectButton = this._selectButton.$element(); eventsEngine.trigger($selectButton, clickEventName); }; return extend(super._supportedKeys(), { space: click, enter: click }); } _setOptionsByReference() { super._setOptionsByReference(); extend(this._optionsByReference, { value: true }); } _getDefaultOptions() { return extend(super._getDefaultOptions(), { chunkSize: 0, value: [], selectButtonText: messageLocalization.format('dxFileUploader-selectFile'), uploadButtonText: messageLocalization.format('dxFileUploader-upload'), labelText: messageLocalization.format('dxFileUploader-dropFile'), name: 'files[]', multiple: false, accept: '', uploadUrl: '/', allowCanceling: true, showFileList: true, progress: 0, dialogTrigger: undefined, dropZone: undefined, readyToUploadMessage: messageLocalization.format('dxFileUploader-readyToUpload'), uploadedMessage: messageLocalization.format('dxFileUploader-uploaded'), uploadFailedMessage: messageLocalization.format('dxFileUploader-uploadFailedMessage'), uploadAbortedMessage: messageLocalization.format('dxFileUploader-uploadAbortedMessage'), uploadMode: 'instantly', uploadMethod: 'POST', uploadHeaders: {}, uploadCustomData: {}, onBeforeSend: null, onUploadStarted: null, onUploaded: null, onFilesUploaded: null, onProgress: null, onUploadError: null, onUploadAborted: null, onDropZoneEnter: null, onDropZoneLeave: null, allowedFileExtensions: [], maxFileSize: 0, minFileSize: 0, inputAttr: {}, invalidFileExtensionMessage: messageLocalization.format('dxFileUploader-invalidFileExtension'), invalidMaxFileSizeMessage: messageLocalization.format('dxFileUploader-invalidMaxFileSize'), invalidMinFileSizeMessage: messageLocalization.format('dxFileUploader-invalidMinFileSize'), /** * @name dxFileUploaderOptions.extendSelection * @type boolean * @default true * @hidden */ extendSelection: true, /** * @name dxFileUploaderOptions.validationMessageMode * @hidden */ validationMessageMode: 'always', uploadFile: null, uploadChunk: null, abortUpload: null, validationMessageOffset: { h: 0, v: 0 }, hoverStateEnabled: true, useNativeInputClick: false, useDragOver: true, nativeDropSupported: true, _uploadButtonType: 'normal' }); } _defaultOptionsRules() { return super._defaultOptionsRules().concat([ { device: () => devices.real().deviceType === 'desktop' && !devices.isSimulator(), options: { focusStateEnabled: true } }, { device: [ { platform: 'android' } ], options: { validationMessageOffset: { v: 0 } } }, { device: () => devices.real().deviceType !== 'desktop', options: { useDragOver: false } }, { device: () => !isFormDataSupported(), options: { uploadMode: 'useForm' } }, { device: () => devices.real().deviceType !== 'desktop', options: { nativeDropSupported: false } }, { device: () => isMaterial(), options: { _uploadButtonType: 'default' } } ]); } _initOptions(options) { const isLabelTextDefined = 'labelText' in options; super._initOptions(options); if(!isLabelTextDefined && !this._shouldDragOverBeRendered()) { this.option('labelText', ''); } } _init() { super._init(); this._initFileInput(); this._initLabel(); this._setUploadStrategy(); this._createFiles(); this._createBeforeSendAction(); this._createUploadStartedAction(); this._createUploadedAction(); this._createFilesUploadedAction(); this._createProgressAction(); this._createUploadErrorAction(); this._createUploadAbortedAction(); this._createDropZoneEnterAction(); this._createDropZoneLeaveAction(); } _setUploadStrategy() { let strategy = null; if(this.option('chunkSize') > 0) { const uploadChunk = this.option('uploadChunk'); strategy = uploadChunk && isFunction(uploadChunk) ? new CustomChunksFileUploadStrategy(this) : new DefaultChunksFileUploadStrategy(this); } else { const uploadFile = this.option('uploadFile'); strategy = uploadFile && isFunction(uploadFile) ? new CustomWholeFileUploadStrategy(this) : new DefaultWholeFileUploadStrategy(this); } this._uploadStrategy = strategy; } _initFileInput() { this._isCustomClickEvent = false; if(!this._$fileInput) { this._$fileInput = renderFileUploaderInput(); eventsEngine.on(this._$fileInput, 'change', this._inputChangeHandler.bind(this)); eventsEngine.on(this._$fileInput, 'click', e => { e.stopPropagation(); this._resetInputValue(); return this.option('useNativeInputClick') || this._isCustomClickEvent; }); } this._$fileInput.prop({ multiple: this.option('multiple'), accept: this.option('accept'), tabIndex: -1 }); } _inputChangeHandler() { if(this._doPreventInputChange) { return; } const fileName = this._$fileInput.val().replace(/^.*\\/, ''); const files = this._$fileInput.prop('files'); if(files && !files.length && this.option('uploadMode') !== 'useForm') { return; } const value = files ? this._getFiles(files) : [{ name: fileName }]; this._changeValue(value); if(this.option('uploadMode') === 'instantly') { this._uploadFiles(); } } _shouldFileListBeExtended() { return this.option('uploadMode') !== 'useForm' && this.option('extendSelection') && this.option('multiple'); } _changeValue(value) { const files = this._shouldFileListBeExtended() ? this.option('value').slice() : []; this.option('value', files.concat(value)); } _getFiles(fileList) { const values = []; each(fileList, (_, value) => values.push(value)); return values; } _getFile(fileData) { const targetFileValue = isNumeric(fileData) ? this.option('value')[fileData] : fileData; return this._files.filter(file => file.value === targetFileValue)[0]; } _initLabel() { if(!this._$inputLabel) { this._$inputLabel = $('<div>'); } this._updateInputLabelText(); } _updateInputLabelText() { const correctedValue = this._isInteractionDisabled() ? '' : this.option('labelText'); this._$inputLabel.text(correctedValue); } _focusTarget() { return this.$element().find('.' + FILEUPLOADER_BUTTON_CLASS); } _getSubmitElement() { return this._$fileInput; } _initMarkup() { super._initMarkup(); this.$element().addClass(FILEUPLOADER_CLASS); this._renderWrapper(); this._renderInputWrapper(); this._renderSelectButton(); this._renderInputContainer(); this._renderUploadButton(); this._preventRecreatingFiles = true; this._activeDropZone = null; } _render() { this._preventRecreatingFiles = false; this._attachDragEventHandlers(this._$inputWrapper); this._attachDragEventHandlers(this.option('dropZone')); this._renderFiles(); super._render(); } _createFileProgressBar(file) { file.progressBar = this._createProgressBar(file.value.size); file.progressBar.$element().appendTo(file.$file); this._initStatusMessage(file); this._ensureCancelButtonInitialized(file); } _setStatusMessage(file, message) { setTimeout(() => { if(this.option('showFileList')) { if(file.$statusMessage) { file.$statusMessage.text(message); file.$statusMessage.css('display', ''); file.progressBar.$element().remove(); } } }, FILEUPLOADER_AFTER_LOAD_DELAY); } _getUploadAbortedStatusMessage() { return this.option('uploadMode') === 'instantly' ? this.option('uploadAbortedMessage') : this.option('readyToUploadMessage'); } _createFiles() { const value = this.option('value'); if(this._files && (value.length === 0 || !this._shouldFileListBeExtended())) { this._preventFilesUploading(this._files); this._files = null; } if(!this._files) { this._files = []; } each(value.slice(this._files.length), (_, value) => { const file = this._createFile(value); this._validateFile(file); this._files.push(file); }); } _preventFilesUploading(files) { files.forEach(file => this._uploadStrategy.abortUpload(file)); } _validateFile(file) { file.isValidFileExtension = this._validateFileExtension(file); file.isValidMinSize = this._validateMinFileSize(file); file.isValidMaxSize = this._validateMaxFileSize(file); } _validateFileExtension(file) { const allowedExtensions = this.option('allowedFileExtensions'); const accept = this.option('accept'); const allowedTypes = this._getAllowedFileTypes(accept); const fileExtension = file.value.name.substring(file.value.name.lastIndexOf('.')).toLowerCase(); if(accept.length !== 0 && !this._isFileTypeAllowed(file.value, allowedTypes)) { return false; } if(allowedExtensions.length === 0) { return true; } for(let i = 0; i < allowedExtensions.length; i++) { if(fileExtension === allowedExtensions[i].toLowerCase()) { return true; } } return false; } _validateMaxFileSize(file) { const fileSize = file.value.size; const maxFileSize = this.option('maxFileSize'); return maxFileSize > 0 ? fileSize <= maxFileSize : true; } _validateMinFileSize(file) { const fileSize = file.value.size; const minFileSize = this.option('minFileSize'); return minFileSize > 0 ? fileSize >= minFileSize : true; } _createBeforeSendAction() { this._beforeSendAction = this._createActionByOption('onBeforeSend', { excludeValidators: ['readOnly'] }); } _createUploadStartedAction() { this._uploadStartedAction = this._createActionByOption('onUploadStarted', { excludeValidators: ['readOnly'] }); } _createUploadedAction() { this._uploadedAction = this._createActionByOption('onUploaded', { excludeValidators: ['readOnly'] }); } _createFilesUploadedAction() { this._filesUploadedAction = this._createActionByOption('onFilesUploaded', { excludeValidators: ['readOnly'] }); } _createProgressAction() { this._progressAction = this._createActionByOption('onProgress', { excludeValidators: ['readOnly'] }); } _createUploadAbortedAction() { this._uploadAbortedAction = this._createActionByOption('onUploadAborted', { excludeValidators: ['readOnly'] }); } _createUploadErrorAction() { this._uploadErrorAction = this._createActionByOption('onUploadError', { excludeValidators: ['readOnly'] }); } _createDropZoneEnterAction() { this._dropZoneEnterAction = this._createActionByOption('onDropZoneEnter'); } _createDropZoneLeaveAction() { this._dropZoneLeaveAction = this._createActionByOption('onDropZoneLeave'); } _createFile(value) { return { value: value, loadedSize: 0, onProgress: Callbacks(), onAbort: Callbacks(), onLoad: Callbacks(), onError: Callbacks(), onLoadStart: Callbacks(), isValidFileExtension: true, isValidMaxSize: true, isValidMinSize: true, isValid() { return this.isValidFileExtension && this.isValidMaxSize && this.isValidMinSize; }, isInitialized: false }; } _resetFileState(file) { file.isAborted = false; file.uploadStarted = false; file.isStartLoad = false; file.loadedSize = 0; file.chunksData = undefined; file.request = undefined; } _renderFiles() { const value = this.option('value'); if(!this._$filesContainer) { this._$filesContainer = $('<div>') .addClass(FILEUPLOADER_FILES_CONTAINER_CLASS) .appendTo(this._$content); } else if(!this._shouldFileListBeExtended() || value.length === 0) { this._$filesContainer.empty(); } const showFileList = this.option('showFileList'); if(showFileList) { each(this._files, (_, file) => { if(!file.$file) { this._renderFile(file); } }); } this.$element().toggleClass(FILEUPLOADER_SHOW_FILE_LIST_CLASS, showFileList); this._toggleFileUploaderEmptyClassName(); this._updateFileNameMaxWidth(); this._validationMessage?.repaint(); } _renderFile(file) { const value = file.value; const $fileContainer = $('<div>') .addClass(FILEUPLOADER_FILE_CONTAINER_CLASS) .appendTo(this._$filesContainer); this._renderFileButtons(file, $fileContainer); file.$file = $('<div>') .addClass(FILEUPLOADER_FILE_CLASS) .appendTo($fileContainer); const $fileInfo = $('<div>') .addClass(FILEUPLOADER_FILE_INFO_CLASS) .appendTo(file.$file); file.$statusMessage = $('<div>') .addClass(FILEUPLOADER_FILE_STATUS_MESSAGE_CLASS) .appendTo(file.$file); $('<div>') .addClass(FILEUPLOADER_FILE_NAME_CLASS) .text(value.name) .appendTo($fileInfo); if(isDefined(value.size)) { $('<div>') .addClass(FILEUPLOADER_FILE_SIZE_CLASS) .text(this._getFileSize(value.size)) .appendTo($fileInfo); } if(file.isValid()) { file.$statusMessage.text(this.option('readyToUploadMessage')); } else { if(!file.isValidFileExtension) { file.$statusMessage.append(this._createValidationElement('invalidFileExtensionMessage')); } if(!file.isValidMaxSize) { file.$statusMessage.append(this._createValidationElement('invalidMaxFileSizeMessage')); } if(!file.isValidMinSize) { file.$statusMessage.append(this._createValidationElement('invalidMinFileSizeMessage')); } $fileContainer.addClass(FILEUPLOADER_INVALID_CLASS); } } _createValidationElement(key) { return $('<span>').text(this.option(key)); } _updateFileNameMaxWidth() { const cancelButtonsCount = this.option('allowCanceling') && this.option('uploadMode') !== 'useForm' ? 1 : 0; const uploadButtonsCount = this.option('uploadMode') === 'useButtons' ? 1 : 0; const filesContainerWidth = getWidth( this._$filesContainer.find('.' + FILEUPLOADER_FILE_CONTAINER_CLASS).first() ) || getWidth(this._$filesContainer); const $buttonContainer = this._$filesContainer.find('.' + FILEUPLOADER_BUTTON_CONTAINER_CLASS).eq(0); const buttonsWidth = getWidth($buttonContainer) * (cancelButtonsCount + uploadButtonsCount); const $fileSize = this._$filesContainer.find('.' + FILEUPLOADER_FILE_SIZE_CLASS).eq(0); const prevFileSize = $fileSize.text(); $fileSize.text('1000 Mb'); const fileSizeWidth = getWidth($fileSize); $fileSize.text(prevFileSize); this._$filesContainer.find('.' + FILEUPLOADER_FILE_NAME_CLASS).css('maxWidth', filesContainerWidth - buttonsWidth - fileSizeWidth); } _renderFileButtons(file, $container) { const $cancelButton = this._getCancelButton(file); $cancelButton && $container.append($cancelButton); const $uploadButton = this._getUploadButton(file); $uploadButton && $container.append($uploadButton); } _getCancelButton(file) { if(this.option('uploadMode') === 'useForm') { return null; } file.cancelButton = this._createComponent( $('<div>').addClass(FILEUPLOADER_BUTTON_CLASS + ' ' + FILEUPLOADER_CANCEL_BUTTON_CLASS), Button, { onClick: () => this._removeFile(file), icon: 'close', visible: this.option('allowCanceling'), disabled: this.option('readOnly'), integrationOptions: {}, hoverStateEnabled: this.option('hoverStateEnabled') } ); return $('<div>') .addClass(FILEUPLOADER_BUTTON_CONTAINER_CLASS) .append(file.cancelButton.$element()); } _getUploadButton(file) { if(!file.isValid() || this.option('uploadMode') !== 'useButtons') { return null; } file.uploadButton = this._createComponent( $('<div>').addClass(FILEUPLOADER_BUTTON_CLASS + ' ' + FILEUPLOADER_UPLOAD_BUTTON_CLASS), Button, { onClick: () => this._uploadFile(file), icon: 'upload', hoverStateEnabled: this.option('hoverStateEnabled') } ); file.onLoadStart.add(() => file.uploadButton.option({ visible: false, disabled: true })); file.onAbort.add(() => file.uploadButton.option({ visible: true, disabled: false })); return $('<div>') .addClass(FILEUPLOADER_BUTTON_CONTAINER_CLASS) .append(file.uploadButton.$element()); } _removeFile(file) { file.$file?.parent().remove(); this._files.splice(this._files.indexOf(file), 1); const value = this.option('value').slice(); value.splice(value.indexOf(file.value), 1); this._preventRecreatingFiles = true; this.option('value', value); this._preventRecreatingFiles = false; this._toggleFileUploaderEmptyClassName(); this._resetInputValue(true); } removeFile(fileData) { if(this.option('uploadMode') === 'useForm' || !isDefined(fileData)) { return; } const file = this._getFile(fileData); if(file) { if(file.uploadStarted) { this._preventFilesUploading([file]); } this._removeFile(file); } } _toggleFileUploaderEmptyClassName() { this.$element().toggleClass(FILEUPLOADER_EMPTY_CLASS, !this._files.length || this._hasInvalidFile(this._files)); } _hasInvalidFile(files) { for(let i = 0; i < files.length; i++) { if(!files[i].isValid()) { return true; } } return false; } _getFileSize(size) { let i = 0; const labels = [ messageLocalization.format('dxFileUploader-bytes'), messageLocalization.format('dxFileUploader-kb'), messageLocalization.format('dxFileUploader-Mb'), messageLocalization.format('dxFileUploader-Gb') ]; const count = labels.length - 1; while(i < count && size >= 1024) { size /= 1024; i++; } return Math.round(size) + ' ' + labels[i]; } _renderSelectButton() { const $button = $('<div>') .addClass(FILEUPLOADER_BUTTON_CLASS) .appendTo(this._$inputWrapper); this._selectButton = this._createComponent($button, Button, { text: this.option('selectButtonText'), focusStateEnabled: false, integrationOptions: {}, disabled: this.option('readOnly'), hoverStateEnabled: this.option('hoverStateEnabled') }); this._selectFileDialogHandler = this._selectButtonClickHandler.bind(this); // NOTE: click triggering on input 'file' works correctly only in native click handler when device is used if(devices.real().deviceType === 'desktop') { this._selectButton.option('onClick', this._selectFileDialogHandler); } else { this._attachSelectFileDialogHandler(this._selectButton.$element()); } this._attachSelectFileDialogHandler(this.option('dialogTrigger')); } _selectButtonClickHandler() { if(this.option('useNativeInputClick')) { return; } if(this._isInteractionDisabled()) { return false; } this._isCustomClickEvent = true; eventsEngine.trigger(this._$fileInput, 'click'); this._isCustomClickEvent = false; } _attachSelectFileDialogHandler(target) { if(!isDefined(target)) { return; } this._detachSelectFileDialogHandler(target); eventsEngine.on($(target), 'click', this._selectFileDialogHandler); } _detachSelectFileDialogHandler(target) { if(!isDefined(target)) { return; } eventsEngine.off($(target), 'click', this._selectFileDialogHandler); } _renderUploadButton() { if(this.option('uploadMode') !== 'useButtons') { return; } const $uploadButton = $('<div>') .addClass(FILEUPLOADER_BUTTON_CLASS) .addClass(FILEUPLOADER_UPLOAD_BUTTON_CLASS) .appendTo(this._$content); this._uploadButton = this._createComponent($uploadButton, Button, { text: this.option('uploadButtonText'), onClick: this._uploadButtonClickHandler.bind(this), type: this.option('_uploadButtonType'), integrationOptions: {}, hoverStateEnabled: this.option('hoverStateEnabled') }); } _uploadButtonClickHandler() { this._uploadFiles(); } _shouldDragOverBeRendered() { return !this.option('readOnly') && (this.option('uploadMode') !== 'useForm' || this.option('nativeDropSupported')); } _isInteractionDisabled() { return this.option('readOnly') || this.option('disabled'); } _renderInputContainer() { this._$inputContainer = $('<div>') .addClass(FILEUPLOADER_INPUT_CONTAINER_CLASS) .appendTo(this._$inputWrapper); this._$fileInput .addClass(FILEUPLOADER_INPUT_CLASS); this._renderInput(); const labelId = `dx-fileuploader-input-label-${new Guid()}`; this._$inputLabel .attr('id', labelId) .addClass(FILEUPLOADER_INPUT_LABEL_CLASS) .appendTo(this._$inputContainer); this.setAria('labelledby', labelId, this._$fileInput); } _renderInput() { if(this.option('useNativeInputClick')) { this._selectButton.option('template', this._selectButtonInputTemplate.bind(this)); } else { this._$fileInput.appendTo(this._$inputContainer); this._selectButton.option('template', 'content'); } this._applyInputAttributes(this.option('inputAttr')); } _selectButtonInputTemplate(data, content) { const $content = $(content); const $text = $('<span>') .addClass('dx-button-text') .text(data.text); $content .append($text) .append(this._$fileInput); return $content; } _renderInputWrapper() { this._$inputWrapper = $('<div>') .addClass(FILEUPLOADER_INPUT_WRAPPER_CLASS) .appendTo(this._$content); } _detachDragEventHandlers(target) { if(!isDefined(target)) { return; } eventsEngine.off($(target), addNamespace('', this.NAME)); } _attachDragEventHandlers(target) { const isCustomTarget = target !== this._$inputWrapper; if(!isDefined(target) || !this._shouldDragOverBeRendered()) { return; } this._detachDragEventHandlers(target); target = $(target); eventsEngine.on(target, addNamespace('dragenter', this.NAME), this._dragEnterHandler.bind(this, isCustomTarget)); eventsEngine.on(target, addNamespace('dragover', this.NAME), this._dragOverHandler.bind(this, isCustomTarget)); eventsEngine.on(target, addNamespace('dragleave', this.NAME), this._dragLeaveHandler.bind(this, isCustomTarget)); eventsEngine.on(target, addNamespace('drop', this.NAME), this._dropHandler.bind(this, isCustomTarget)); } _applyInputAttributes(customAttributes) { this._$fileInput.attr(customAttributes); } _useInputForDrop() { return this.option('nativeDropSupported') && this.option('uploadMode') === 'useForm'; } _getDropZoneElement(isCustomTarget, e) { let targetList = isCustomTarget ? Array.from($(this.option('dropZone'))) : [this._$inputWrapper]; targetList = targetList.map(element => $(element).get(0)); return targetList[targetList.indexOf(e.currentTarget)]; } _dragEnterHandler(isCustomTarget, e) { if(this.option('disabled')) { return false; } if(!this._useInputForDrop()) { e.preventDefault(); } const dropZoneElement = this._getDropZoneElement(isCustomTarget, e); if(isDefined(dropZoneElement) && this._activeDropZone === null && this.isMouseOverElement(e, dropZoneElement, false)) { this._activeDropZone = dropZoneElement; this._tryToggleDropZoneActive(true, isCustomTarget, e); } } _dragOverHandler(isCustomTarget, e) { if(!this._useInputForDrop()) { e.preventDefault(); } e.originalEvent.dataTransfer.dropEffect = 'copy'; if(!isCustomTarget) { // only default dropzone has pseudoelements const dropZoneElement = this._getDropZoneElement(false, e); if(this._activeDropZone === null && this.isMouseOverElement(e, dropZoneElement, false)) { this._dragEnterHandler(false, e); } if(this._activeDropZone !== null && this._shouldRaiseDragLeave(e, false)) { this._dragLeaveHandler(false, e); } } } _dragLeaveHandler(isCustomTarget, e) { if(!this._useInputForDrop()) { e.preventDefault(); } if(this._activeDropZone === null) { return; } if(this._shouldRaiseDragLeave(e, isCustomTarget)) { this._tryToggleDropZoneActive(false, isCustomTarget, e); this._activeDropZone = null; } } _shouldRaiseDragLeave(e, isCustomTarget) { return !this.isMouseOverElement(e, this._activeDropZone, !isCustomTarget); } _tryToggleDropZoneActive(active, isCustom, event) { const classAction = active ? 'addClass' : 'removeClass'; const mouseAction = active ? '_dropZoneEnterAction' : '_dropZoneLeaveAction'; this[mouseAction]({ event, dropZoneElement: this._activeDropZone }); if(!isCustom) { this.$element()[classAction](FILEUPLOADER_DRAGOVER_CLASS); } } _dropHandler(isCustomTarget, e) { this._activeDropZone = null; if(!isCustomTarget) { this.$element().removeClass(FILEUPLOADER_DRAGOVER_CLASS); } if(this._useInputForDrop() || isCustomTarget && this._isInteractionDisabled()) { return; } e.preventDefault(); const fileList = e.originalEvent.dataTransfer.files; const files = this._getFiles(fileList); if(!this.option('multiple') && files.length > 1) { return; } this._changeValue(files); if(this.option('uploadMode') === 'instantly') { this._uploadFiles(); } } _handleAllFilesUploaded() { const areAllFilesLoaded = this._files.every(file => !file.isValid() || file._isError || file._isLoaded || file.isAborted); if(areAllFilesLoaded) { this._filesUploadedAction(); } } _getAllowedFileTypes(acceptSting) { if(!acceptSting.length) { return []; } return acceptSting.split(',').map(item => item.trim()); } _isFileTypeAllowed(file, allowedTypes) { for(let i = 0, n = allowedTypes.length; i < n; i++) { let allowedType = allowedTypes[i]; if(allowedType[0] === '.') { allowedType = allowedType.replace('.', '\\.'); if(file.name.match(new RegExp(allowedType + '$', 'i'))) { return true; } } else { allowedType = allowedType.replace(new RegExp('\\*', 'g'), ''); if(file.type.match(new RegExp(allowedType, 'i'))) { return true; } } } return false; } _renderWrapper() { const $wrapper = $('<div>') .addClass(FILEUPLOADER_WRAPPER_CLASS) .appendTo(this.$element()); const $container = $('<div>') .addClass(FILEUPLOADER_CONTAINER_CLASS) .appendTo($wrapper); this._$content = $('<div>') .addClass(FILEUPLOADER_CONTENT_CLASS) .appendTo($container); } _clean() { this._$fileInput.detach(); delete this._$filesContainer; this._detachSelectFileDialogHandler(this.option('dialogTrigger')); this._detachDragEventHandlers(this.option('dropZone')); if(this._files) { this._files.forEach(file => { file.$file = null; file.$statusMessage = null; }); } super._clean(); } abortUpload(fileData) { if(this.option('uploadMode') === 'useForm') { return; } if(isDefined(fileData)) { const file = this._getFile(fileData); if(file) { this._preventFilesUploading([file]); } } else { this._preventFilesUploading(this._files); } } upload(fileData) { if(this.option('uploadMode') === 'useForm') { return; } if(isDefined(fileData)) { const file = this._getFile(fileData); if(file && isFormDataSupported()) { this._uploadFile(file); } } else { this._uploadFiles(); } } _uploadFiles() { if(isFormDataSupported()) { each(this._files, (_, file) => this._uploadFile(file)); } } _uploadFile(file) { this._uploadStrategy.upload(file); } _updateProgressBar(file, loadedFileData) { file.progressBar && file.progressBar.option({ value: loadedFileData.loaded, showStatus: true }); this._progressAction({ file: file.value, segmentSize: loadedFileData.currentSegmentSize, bytesLoaded: loadedFileData.loaded, bytesTotal: loadedFileData.total, event: loadedFileData.event, request: file.request }); } _updateTotalProgress(totalFilesSize, totalLoadedFilesSize) { const progress = totalFilesSize ? this._getProgressValue(totalLoadedFilesSize / totalFilesSize) : 0; this.option('progress', progress); this._setLoadedSize(totalLoadedFilesSize); } _getProgressValue(ratio) { return Math.floor(ratio * 100); } _initStatusMessage(file) { file.$statusMessage.css('display', 'none'); } _ensureCancelButtonInitialized(file) { if(file.isInitialized) { return; } file.cancelButton.option('onClick', () => { this._preventFilesUploading([file]); this._removeFile(file); }); const hideCancelButton = () => { setTimeout(() => { file.cancelButton.option({ visible: false }); }, FILEUPLOADER_AFTER_LOAD_DELAY); }; file.onLoad.add(hideCancelButton); file.onError.add(hideCancelButton); } _createProgressBar(fileSize) { return this._createComponent($('<div>'), ProgressBar, { value: undefined, min: 0, max: fileSize, statusFormat: ratio => this._getProgressValue(ratio) + '%', showStatus: false, statusPosition: 'right' }); } _getTotalFilesSize() { if(!this._totalFilesSize) { this._totalFilesSize = 0; each(this._files, (_, file) => { this._totalFilesSize += file.value.size; }); } return this._totalFilesSize; } _getTotalLoadedFilesSize() { if(!this._totalLoadedFilesSize) { this._totalLoadedFilesSize = 0; each(this._files, (_, file) => { this._totalLoadedFilesSize += file.loadedSize; }); } return this._totalLoadedFilesSize; } _setLoadedSize(value) { this._totalLoadedFilesSize = value; } _recalculateProgress() { this._totalFilesSize = 0; this._totalLoadedFilesSize = 0; this._updateTotalProgress(this._getTotalFilesSize(), this._getTotalLoadedFilesSize()); } isMouseOverElement(mouseEvent, element, correctPseudoElements) { if(!element) return false; const beforeHeight = correctPseudoElements ? parseFloat(window.getComputedStyle(element, ':before').height) : 0; const afterHeight = correctPseudoElements ? parseFloat(window.getComputedStyle(element, ':after').height) : 0; const x = getOffset(element).left; const y = getOffset(element).top + beforeHeight; const w = element.offsetWidth; const h = element.offsetHeight - beforeHeight - afterHeight; const eventX = this._getEventX(mouseEvent); const eventY = this._getEventY(mouseEvent); return eventX >= x && eventX < (x + w) && eventY >= y && eventY < (y + h); } _getEventX(e) { return isTouchEvent(e) ? this._getTouchEventX(e) : e.clientX + this._getDocumentScrollLeft(); } _getEventY(e) { return isTouchEvent(e) ? this._getTouchEventY(e) : e.clientY + this._getDocumentScrollTop(); } _getTouchEventX(e) { let touchPoint = null; if(e.changedTouches.length > 0) { touchPoint = e.changedTouches; } else if(e.targetTouches.length > 0) { touchPoint = e.targetTouches; } return touchPoint ? touchPoint[0].pageX : 0; } _getTouchEventY(e) { let touchPoint = null; if(e.changedTouches.length > 0) { touchPoint = e.changedTouches; } else if(e.targetTouches.length > 0) { touchPoint = e.targetTouches; } return touchPoint ? touchPoint[0].pageY : 0; } _getDocumentScrollTop() { const document = domAdapter.getDocument(); return document.documentElement.scrollTop || document.body.scrollTop; } _getDocumentScrollLeft() { const document = domAdapter.getDocument(); return document.documentElement.scrollLeft || document.body.scrollLeft; } _updateReadOnlyState() { const readOnly = this.option('readOnly'); this._selectButton.option('disabled', readOnly); this._files.forEach(file => file.cancelButton?.option('disabled', readOnly)); this._updateInputLabelText(); this._attachDragEventHandlers(this._$inputWrapper); } _updateHoverState() { const value = this.option('hoverStateEnabled'); this._selectButton?.option('hoverStateEnabled', value); this._uploadButton?.option('hoverStateEnabled', value); this._files.forEach(file => { file.uploadButton?.option('hoverStateEnabled', value); file.cancelButton?.option('hoverStateEnabled', value); }); } _optionChanged(args) { const { name, value, previousValue } = args; switch(name) { case 'height': case 'width': this._updateFileNameMaxWidth(); super._optionChanged(args); break; case 'value': !value.length && this._$fileInput.val(''); if(!this._preventRecreatingFiles) { this._createFiles(); this._renderFiles(); } this._recalculateProgress(); super._optionChanged(args); break; case 'name': this._initFileInput(); super._optionChanged(args); break; case 'accept': this._initFileInput(); break; case 'multiple': this._initFileInput(); if(!args.value) { this.reset(); } break; case 'readOnly': this._updateReadOnlyState(); super._optionChanged(args); break; case 'disabled': this._updateInputLabelText(); super._optionChanged(args); break; case 'selectButtonText': this._selectButton.option('text', value); break; case 'uploadButtonText': this._uploadButton && this._uploadButton.option('text', value); break; case '_uploadButtonType': this._uploadButton && this._uploadButton.option('type', value); break; case 'dialogTrigger': this._detachSelectFileDialogHandler(previousValue); this._attachSelectFileDialogHandler(value); break; case 'dropZone': this._detachDragEventHandlers(previousValue); this._attachDragEventHandlers(value); break; case 'maxFileSize': case 'minFileSize': case 'allowedFileExtensions': case 'invalidFileExtensionMessage': case 'invalidMaxFileSizeMessage': case 'invalidMinFileSizeMessage': case 'readyToUploadMessage': case 'uploadedMessage': case 'uploadFailedMessage': case 'uploadAbortedMessage': this._invalidate(); break; case 'labelText': this._updateInputLabelText(); break; case 'showFileList': if(!this._preventRecreatingFiles) { this._renderFiles(); } break; case 'uploadFile': case 'uploadChunk': case 'chunkSize': this._setUploadStrategy(); break; case 'abortUpload': case 'uploadUrl': case 'progress': case 'uploadMethod': case 'uploadHeaders': case 'uploadCustomData': case 'extendSelection': break; case 'hoverStateEnabled': this._updateHoverState(); super._optionChanged(args); break; case 'allowCanceling': case 'uploadMode': this.reset(); this._invalidate(); break; case 'onBeforeSend': this._createBeforeSendAction(); break; case 'onUploadStarted': this._createUploadStartedAction(); break; case 'onUploaded': this._createUploadedAction(); break; case 'onFilesUploaded': this._createFilesUploadedAction(); break; case 'onProgress': this._createProgressAction(); break; case 'onUploadError': this._createUploadErrorAction(); break; case 'onUploadAborted': this._createUploadAbortedAction(); break; case 'onDropZoneEnter': this._createDropZoneEnterAction(); break; case 'onDropZoneLeave': this._createDropZoneLeaveAction(); break; case 'useNativeInputClick': this._renderInput(); break; case 'useDragOver': this._attachDragEventHandlers(this._$inputWrapper); break; case 'nativeDropSupported': this._invalidate(); break; case 'inputAttr': this._applyInputAttributes(this.option(name)); break; default: super._optionChanged(args); } } _resetInputValue(force) { if(this.option('uploadMode') === 'useForm' && !force) { return; } this._doPreventInputChange = true; this._$fileInput.val(''); this._doPreventInputChange = false; } reset() { this.option('value', []); } } ///#DEBUG FileUploader.__internals = { changeFileInputRenderer(renderer) { renderFileUploaderInput = renderer; }, resetFileInputTag() { renderFileUploaderInput = () => $('<input>').attr('type', 'file'); } }; ///#ENDDEBUG class FileBlobReader { constructor(file, chunkSize) { this.file = file; this.chunkSize = chunkSize; this.index = 0; } read() { if(!this.file) { return null; } const result = this.createBlobResult(this.file, this.index, this.chunkSize); if(result.isCompleted) { this.file = null; } this.index++; return result; } createBlobResult(file, index, chunkSize) { const currentPosition = index * chunkSize; return { blob: this.sliceFile(file, currentPosition, chunkSize), index: index, isCompleted: currentPosition + chunkSize >= file.size }; } sliceFile(file, startPos, length) { if(file.slice) { return file.slice(startPos, startPos + length); } if(file.webkitSlice) { return file.webkitSlice(startPos, startPos + length); } return null; } } class
{ constructor(fileUploader) { this.fileUploader = fileUploader; } upload(file) { if(file.isInitialized && file.isAborted) { this.fileUploader._resetFileState(file); } if(file.isValid() && !file.uploadStarted) { this._prepareFileBeforeUpload(file); this._uploadCore(file); } } abortUpload(file) { if(file._isError || file._isLoaded || file.isAborted || !file.uploadStarted) { return; } file.isAborted = true; file.request && file.request.abort(); if(this._isCustomCallback('abortUpload')) { const abortUpload = this.fileUploader.option('abortUpload'); const arg = this._createUploadArgument(file); let deferred = null; try { const result = abortUpload(file.value, arg); deferred = fromPromise(result); } catch(error) { deferred = new Deferred().reject(error).promise(); } deferred .done(() => file.onAbort.fire()) .fail(error => this._handleFileError(file, error)); } } _beforeSend(xhr, file) { const arg = this._createUploadArgument(file); this.fileUploader._beforeSendAction({ request: xhr, file: file.value, uploadInfo: arg }); file.request = xhr; } _createUploadArgument(file) { } _uploadCore(file) { } _isCustomCallback(name) { const callback = this.fileUploader.option(name); return callback && isFunction(callback); } _handleProgress(file, e) { if(file._isError) { return; } file._isProgressStarted = true; this._handleProgressCore(file, e); } _handleProgressCore(file, e) { } _handleFileError(file, error) { file._isError = true; file.onError.fire(error); } _prepareFileBeforeUpload(file) { if(file.$file) { file.progressBar?.dispose(); this.fileUploader._createFileProgressBar(file); } if(file.isInitialized) { return; } file.onLoadStart.add(this._onUploadStarted.bind(this, file)); file.onLoad.add(this._onLoadedHandler.bind(this, file)); file.onError.add(this._onErrorHandler.bind(this, file)); file.onAbort.add(this._onAbortHandler.bind(this, file)); file.onProgress.add(this._onProgressHandler.bind(this, file)); file.isInitialized = true; } _shouldHandleError(file, e) { return (this._isStatusError(e.status) || !file._isProgressStarted) && !file.isAborted; } _isStatusError(status) { return 400 <= status && status < 500 || 500 <= status && status < 600; } _onUploadStarted(file, e) { file.uploadStarted = true; this.fileUploader._uploadStartedAction({ file: file.value, event: e, request: file.request }); } _onAbortHandler(file, e) { const args = { file: file.value, event: e, request: file.request, message: this.fileUploader._getUploadAbortedStatusMessage() }; this.fileUploader._uploadAbortedAction(args); this.fileUploader._setStatusMessage(file, args.message); this.fileUploader._handleAllFilesUploaded(); } _onErrorHandler(file, error) { const args = { file: file.value, event: undefined, request: file.request, error, message: this.fileUploader.option('uploadFailedMessage') }; this.fileUploader._uploadErrorAction(args); this.fileUploader._setStatusMessage(file, args.message); this.fileUploader._handleAllFilesUploaded(); } _onLoadedHandler(file, e) { const args = { file: file.value, event: e, request: file.request, message: this.fileUploader.option('uploadedMessage') }; file._isLoaded = true; this.fileUploader._uploadedAction(args); this.fileUploader._setStatusMessage(file, args.message); this.fileUploader._handleAllFilesUploaded(); } _onProgressHandler(file, e) { if(file) { const totalFilesSize = this.fileUploader._getTotalFilesSize(); const totalLoadedFilesSize = this.fileUploader._getTotalLoadedFilesSize(); const loadedSize = Math.min(e.loaded, file.value.size); const segmentSize = loadedSize - file.loadedSize; file.loadedSize = loadedSize; this.fileUploader._updateTotalProgress(totalFilesSize, totalLoadedFilesSize + segmentSize); this.fileUploader._updateProgressBar(file, this._getLoadedData(loadedSize, e.total, segmentSize, e)); } } _getLoadedData(loaded, total, currentSegmentSize, event) { return { loaded: loaded, total: total, currentSegmentSize: currentSegmentSize }; } _extendFormData(formData) { const formDataEntries = this.fileUploader.option('uploadCustomData'); for(const entryName in formDataEntries) { if(Object.prototype.hasOwnProperty.call(formDataEntries, entryName) && isDefined(formDataEntries[entryName])) { formData.append(entryName, formDataEntries[entryName]); } } } } class ChunksFileUploadStrategyBase extends FileUploadStrategyBase { constructor(fileUploader) { super(fileUploader); this.chunkSize = this.fileUploader.option('chunkSize'); } _uploadCore(file) { const realFile = file.value; const chunksData = { name: realFile.name, loadedBytes: 0, type: realFile.type, blobReader: new FileBlobReader(realFile, this.chunkSize), guid: new Guid(), fileSize: realFile.size, count: Math.ceil(realFile.size / this.chunkSize), customData: {} }; file.chunksData = chunksData; this._sendChunk(file, chunksData); } _sendChunk(file, chunksData) { const chunk = chunksData.blobReader.read(); chunksData.currentChunk = chunk; if(chunk) { this._sendChunkCore(file, chunksData, chunk) .done(() => { if(file.isAborted) { return; } chunksData.loadedBytes += chunk.blob.size; file.onProgress.fire({ loaded: chunksData.loadedBytes, total: file.value.size }); if(chunk.isCompleted) { file.onLoad.fire(); } setTimeout(() => this._sendChunk(file, chunksData)); }) .fail(error => { if(this._shouldHandleError(file, error)) { this._handleFileError(file, error); } }); } } _sendChunkCore(file, chunksData, chunk) { } _tryRaiseStartLoad(file) { if(!file.isStartLoad) { file.isStartLoad = true; file.onLoadStart.fire(); } } _getEvent(e) { return null; } _createUploadArgument(file) { return this._createChunksInfo(file.chunksData); } _createChunksInfo(chunksData) { return { bytesUploaded: chunksData.loadedBytes, chunkCount: chunksData.count, customData: chunksData.customData, chunkBlob: chunksData.currentChunk.blob, chunkIndex: chunksData.currentChunk.index }; } } class DefaultChunksFileUploadStrategy extends ChunksFileUploadStrategyBase { _sendChunkCore(file, chunksData, chunk) { return ajax.sendRequest({ url: this.fileUploader.option('uploadUrl'), method: this.fileUploader.option('uploadMethod'), headers: this.fileUploader.option('uploadHeaders'), beforeSend: xhr => this._beforeSend(xhr, file), upload: { 'onprogress': e => this._handleProgress(file, e), 'onloadstart': () => this._tryRaiseStartLoad(file), 'onabort': () => file.onAbort.fire() }, data: this._createFormData({ fileName: chunksData.name, blobName: this.fileUploader.option('name'), blob: chunk.blob, index: chunk.index, count: chunksData.count, type: chunksData.type, guid: chunksData.guid, size: chunksData.fileSize }) }); } _createFormData(options) { const formData = new window.FormData(); formData.append(options.blobName, options.blob); formData.append(FILEUPLOADER_CHUNK_META_DATA_NAME, JSON.stringify({ FileName: options.fileName, Index: options.index, TotalCount: options.count, FileSize: options.size, FileType: options.type, FileGuid: options.guid })); this._extendFormData(formData); return formData; } } class CustomChunksFileUploadStrategy extends ChunksFileUploadStrategyBase { _sendChunkCore(file, chunksData) { this._tryRaiseStartLoad(file); const chunksInfo = this._createChunksInfo(chunksData); const uploadChunk = this.fileUploader.option('uploadChunk'); try { const result = uploadChunk(file.value, chunksInfo); return fromPromise(result); } catch(error) { return new Deferred().reject(error).promise(); } } _shouldHandleError(file, error) { return true; } } class WholeFileUploadStrategyBase extends FileUploadStrategyBase { _uploadCore(file) { file.loadedSize = 0; this._uploadFile(file) .done(() => { if(!file.isAborted) { file.onLoad.fire(); } }) .fail(error => { if(this._shouldHandleError(file, error)) { this._handleFileError(file, error); } }); } _uploadFile(file) { } _handleProgressCore(file, e) { file.onProgress.fire(e); } _getLoadedData(loaded, total, segmentSize, event) { const result = super._getLoadedData(loaded, total, segmentSize, event); result.event = event; return result; } } class DefaultWholeFileUploadStrategy extends WholeFileUploadStrategyBase { _uploadFile(file) { return ajax.sendRequest({ url: this.fileUploader.option('uploadUrl'), method: this.fileUploader.option('uploadMethod'), headers: this.fileUploader.option('uploadHeaders'), beforeSend: xhr => this._beforeSend(xhr, file), upload: { 'onprogress': e => this._handleProgress(file, e), 'onloadstart': () => file.onLoadStart.fire(), 'onabort': () => file.onAbort.fire() }, data: this._createFormData(this.fileUploader.option('name'), file.value) }); } _createFormData(fieldName, fieldValue) { const formData = new window.FormData(); formData.append(fieldName, fieldValue, fieldValue.name); this._extendFormData(formData); return formData; } } class CustomWholeFileUploadStrategy extends WholeFileUploadStrategyBase { _uploadFile(file) { file.onLoadStart.fire(); const progressCallback = loadedBytes => { const arg = { loaded: loadedBytes, total: file.value.size }; this._handleProgress(file, arg); }; const uploadFile = this.fileUploader.option('uploadFile'); try { const result = uploadFile(file.value, progressCallback); return fromPromise(result); } catch(error) { return new Deferred().reject(error).promise(); } } _shouldHandleError(file, e) { return true; } } registerComponent('dxFileUploader', FileUploader); export default FileUploader;
FileUploadStrategyBase
views.py
from django.http import HttpResponse from django.shortcuts import render # Create your views here. from rest_framework.views import APIView from rest_framework.response import Response from libs.captcha.captcha import captcha from django_redis import get_redis_connection from verifications.serializers import RegisterSmsSerializer import random from celery_tasks.sms.tasks import send_sms_code # 图片验证码 class RegisterImageCodeAPI(APIView): def get(self,request,image_code_id): # 1.接收uuid # 2.生成验证码 text,image = captcha.gen
ef get(self,request,mobile): # 1.接收数据 query_params = request.query_params # 2.校验数据 serializer = RegisterSmsSerializer(data=query_params) # 3.生成短信验证码 sms_code = '%06d' % random.randint(0,999999) print(sms_code) # 4.发送短信 send_sms_code.delay(mobile,sms_code) # 5.保存短信 redis_conn = get_redis_connection('code') redis_conn.setex('sms_'+mobile,5*60,sms_code) return Response({'msg':'ok'})
erate_captcha() print(text) # 3.将验证码保存在redis中 redis_conn = get_redis_connection('code') redis_conn.setex('img'+image_code_id,300,text) # 4.返回验证码 return HttpResponse(image,content_type='image/jpeg') # 手机验证码 class RegisterSmsCodeAPI(APIView): d
mingw.py
# MIT License # # Copyright The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """SCons.Tool.gcc Tool-specific initialization for MinGW (http://www.mingw.org/) There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ import os import os.path import glob import SCons.Action import SCons.Builder import SCons.Defaults import SCons.Tool import SCons.Util mingw_paths = [ r'c:\MinGW\bin', r'C:\cygwin64\bin', r'C:\msys64', r'C:\msys64\mingw64\bin', r'C:\cygwin\bin', r'C:\msys', r'C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin' ] def shlib_generator(target, source, env, for_signature): cmd = SCons.Util.CLVar(['$SHLINK', '$SHLINKFLAGS']) dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX') if dll: cmd.extend(['-o', dll]) cmd.extend(['$SOURCES', '$_LIBDIRFLAGS', '$_LIBFLAGS']) implib = env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX') if implib: cmd.append('-Wl,--out-implib,' + implib.get_string(for_signature)) def_target = env.FindIxes(target, 'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX') insert_def = env.subst("$WINDOWS_INSERT_DEF") if insert_def not in ['', '0', 0] and def_target: \ cmd.append('-Wl,--output-def,' + def_target.get_string(for_signature)) return [cmd] def shlib_emitter(target, source, env): dll = env.FindIxes(target, 'SHLIBPREFIX', 'SHLIBSUFFIX') no_import_lib = env.get('no_import_lib', 0) if not dll: raise SCons.Errors.UserError( "A shared library should have exactly one target with the suffix: %s Target(s) are:%s" % \ (env.subst("$SHLIBSUFFIX"), ",".join([str(t) for t in target]))) if not no_import_lib and \ not env.FindIxes(target, 'LIBPREFIX', 'LIBSUFFIX'): # Create list of target libraries as strings targetStrings = env.ReplaceIxes(dll, 'SHLIBPREFIX', 'SHLIBSUFFIX', 'LIBPREFIX', 'LIBSUFFIX') # Now add file nodes to target list target.append(env.fs.File(targetStrings)) # Append a def file target if there isn't already a def file target # or a def file source or the user has explicitly asked for the target # to be emitted. def_source = env.FindIxes(source, 'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX') def_target = env.FindIxes(target, 'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX') skip_def_insert = env.subst("$WINDOWS_INSERT_DEF") in ['', '0', 0] if not def_source and not def_target and not skip_def_insert: # Create list of target libraries and def files as strings targetStrings = env.ReplaceIxes(dll, 'SHLIBPREFIX', 'SHLIBSUFFIX', 'WINDOWSDEFPREFIX', 'WINDOWSDEFSUFFIX') # Now add file nodes to target list target.append(env.fs.File(targetStrings)) return (target, source) shlib_action = SCons.Action.Action(shlib_generator, '$SHLINKCOMSTR', generator=1) ldmodule_action = SCons.Action.Action(shlib_generator, '$LDMODULECOMSTR', generator=1) res_action = SCons.Action.Action('$RCCOM', '$RCCOMSTR') res_builder = SCons.Builder.Builder(action=res_action, suffix='.o', source_scanner=SCons.Tool.SourceFileScanner) SCons.Tool.SourceFileScanner.add_scanner('.rc', SCons.Defaults.CScan) # This is what we search for to find mingw: # key_program = 'mingw32-gcc' key_program = 'mingw32-make' def find_version_specific_mingw_paths(): r""" One example of default mingw install paths is: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev2\mingw64\bin Use glob'ing to find such and add to mingw_paths """ new_paths = glob.glob(r"C:\mingw-w64\*\mingw64\bin") return new_paths def generate(env): global mingw_paths # Check for reasoanble mingw default paths mingw_paths += find_version_specific_mingw_paths() mingw = SCons.Tool.find_program_path(env, key_program, default_paths=mingw_paths) if mingw: mingw_bin_dir = os.path.dirname(mingw) # Adjust path if we found it in a chocolatey install if mingw_bin_dir == r'C:\ProgramData\chocolatey\bin': mingw_bin_dir = r'C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin' env.AppendENVPath('PATH', mingw_bin_dir) # Most of mingw is the same as gcc and friends... gnu_tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas', 'gfortran', 'm4'] for tool in gnu_tools: SCons.Tool.Tool(tool)(env) # ... but a few things differ: env['CC'] = 'gcc' # make sure the msvc tool doesnt break us, it added a /flag if 'CCFLAGS' in env: # make sure its a CLVar to handle list or str cases if type(env['CCFLAGS']) is not SCons.Util.CLVar: env['CCFLAGS'] = SCons.Util.CLVar(env['CCFLAGS']) env['CCFLAGS'] = SCons.Util.CLVar(str(env['CCFLAGS']).replace('/nologo', '')) env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS') env['CXX'] = 'g++' env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS') env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -shared') env['SHLINKCOM'] = shlib_action env['SHLINKCOMSTR'] = shlib_generator env['LDMODULECOM'] = ldmodule_action env.Append(SHLIBEMITTER=[shlib_emitter]) env.Append(LDMODULEEMITTER=[shlib_emitter]) env['AS'] = 'as' env['WIN32DEFPREFIX'] = '' env['WIN32DEFSUFFIX'] = '.def' env['WINDOWSDEFPREFIX'] = '${WIN32DEFPREFIX}' env['WINDOWSDEFSUFFIX'] = '${WIN32DEFSUFFIX}' env['SHOBJSUFFIX'] = '.o' env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1 env['RC'] = 'windres' env['RCFLAGS'] = SCons.Util.CLVar('') env['RCINCFLAGS'] = '$( ${_concat(RCINCPREFIX, CPPPATH, RCINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)' env['RCINCPREFIX'] = '--include-dir ' env['RCINCSUFFIX'] = '' env['RCCOM'] = '$RC $_CPPDEFFLAGS $RCINCFLAGS ${RCINCPREFIX} ${SOURCE.dir} $RCFLAGS -i $SOURCE -o $TARGET' env['BUILDERS']['RES'] = res_builder # Some setting from the platform also have to be overridden: env['OBJSUFFIX'] = '.o' env['LIBPREFIX'] = 'lib' env['LIBSUFFIX'] = '.a' env['PROGSUFFIX'] = '.exe' # Handle new versioned shared library logic env['_SHLIBSUFFIX'] = '$SHLIBSUFFIX' env["SHLIBPREFIX"] = "" def
(env): mingw = SCons.Tool.find_program_path(env, key_program, default_paths=mingw_paths) if mingw: mingw_bin_dir = os.path.dirname(mingw) env.AppendENVPath('PATH', mingw_bin_dir) return mingw # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
exists
swagger.py
import copy import inspect from typing import Any, List, Dict, Optional, Union # noqa from chalice.app import Chalice, RouteEntry, Authorizer, CORSConfig # noqa from chalice.app import ChaliceAuthorizer from chalice.deploy.planner import StringFormat from chalice.deploy.models import RestAPI # noqa from chalice.utils import to_cfn_resource_name class SwaggerGenerator(object): _BASE_TEMPLATE = { 'swagger': '2.0', 'info': { 'version': '1.0', 'title': '' }, 'schemes': ['https'], 'paths': {}, 'definitions': { 'Empty': { 'type': 'object', 'title': 'Empty Schema', } } } # type: Dict[str, Any] def __init__(self, region, deployed_resources): # type: (str, Dict[str, Any]) -> None self._region = region self._deployed_resources = deployed_resources def generate_swagger(self, app, rest_api=None): # type: (Chalice, Optional[RestAPI]) -> Dict[str, Any] api = copy.deepcopy(self._BASE_TEMPLATE) api['info']['title'] = app.app_name self._add_binary_types(api, app) self._add_route_paths(api, app) self._add_resource_policy(api, rest_api) return api def _add_resource_policy(self, api, rest_api): # type: (Dict[str, Any], Optional[RestAPI]) -> None if rest_api and rest_api.policy: api['x-amazon-apigateway-policy'] = rest_api.policy.document def _add_binary_types(self, api, app): # type: (Dict[str, Any], Chalice) -> None api['x-amazon-apigateway-binary-media-types'] = app.api.binary_types def _add_route_paths(self, api, app): # type: (Dict[str, Any], Chalice) -> None for path, methods in app.routes.items(): swagger_for_path = {} # type: Dict[str, Any] api['paths'][path] = swagger_for_path cors_config = None methods_with_cors = [] for http_method, view in methods.items(): current = self._generate_route_method(view) if 'security' in current: self._add_to_security_definition( current['security'], api, view) swagger_for_path[http_method.lower()] = current if view.cors is not None: cors_config = view.cors methods_with_cors.append(http_method) # Chalice ensures that routes with multiple views have the same # CORS configuration. So if any entry has CORS enabled, use that # entry's CORS configuration for the preflight setup. if cors_config is not None: self._add_preflight_request( cors_config, methods_with_cors, swagger_for_path) def _generate_security_from_auth_obj(self, api_config, authorizer): # type: (Dict[str, Any], Authorizer) -> None if isinstance(authorizer, ChaliceAuthorizer): auth_config = authorizer.config config = { 'in': 'header', 'type': 'apiKey', 'name': 'Authorization', 'x-amazon-apigateway-authtype': 'custom' } api_gateway_authorizer = { 'type': 'token', 'authorizerUri': self._auth_uri(authorizer) } if auth_config.execution_role is not None: api_gateway_authorizer['authorizerCredentials'] = \ auth_config.execution_role if auth_config.ttl_seconds is not None: api_gateway_authorizer['authorizerResultTtlInSeconds'] = \ auth_config.ttl_seconds config['x-amazon-apigateway-authorizer'] = api_gateway_authorizer else: config = authorizer.to_swagger() api_config.setdefault( 'securityDefinitions', {})[authorizer.name] = config def _auth_uri(self, authorizer): # type: (ChaliceAuthorizer) -> str function_name = '%s-%s' % ( self._deployed_resources['api_handler_name'], authorizer.config.name ) return self._uri( self._deployed_resources['lambda_functions'][function_name]['arn']) def _add_to_security_definition(self, security, api_config, view): # type: (Any, Dict[str, Any], RouteEntry) -> None if view.authorizer is not None: self._generate_security_from_auth_obj(api_config, view.authorizer) for auth in security: name = list(auth.keys())[0] if name == 'api_key': # This is just the api_key_required=True config swagger_snippet = { 'type': 'apiKey', 'name': 'x-api-key', 'in': 'header', } # type: Dict[str, Any] api_config.setdefault( 'securityDefinitions', {})[name] = swagger_snippet def _generate_route_method(self, view): # type: (RouteEntry) -> Dict[str, Any] current = { 'consumes': view.content_types, 'produces': ['application/json'], 'responses': self._generate_precanned_responses(), 'x-amazon-apigateway-integration': self._generate_apig_integ( view), } # type: Dict[str, Any] docstring = inspect.getdoc(view.view_function) if docstring: doc_lines = docstring.splitlines() current['summary'] = doc_lines[0] if len(doc_lines) > 1: current['description'] = '\n'.join(doc_lines[1:]).strip('\n') if view.api_key_required: # When this happens we also have to add the relevant portions # to the security definitions. We have to someone indicate # this because this neeeds to be added to the global config # file. current.setdefault('security', []).append({'api_key': []}) if view.authorizer: current.setdefault('security', []).append( {view.authorizer.name: view.authorizer.scopes}) if view.view_args: self._add_view_args(current, view.view_args) return current def _generate_precanned_responses(self): # type: () -> Dict[str, Any] responses = { '200': { 'description': '200 response', 'schema': { '$ref': '#/definitions/Empty', } } } return responses def _uri(self, lambda_arn=None): # type: (Optional[str]) -> Any if lambda_arn is None: lambda_arn = self._deployed_resources['api_handler_arn'] return ('arn:aws:apigateway:{region}:lambda:path/2015-03-31' '/functions/{lambda_arn}/invocations').format( region=self._region, lambda_arn=lambda_arn) def _generate_apig_integ(self, view): # type: (RouteEntry) -> Dict[str, Any] apig_integ = { 'responses': { 'default': { 'statusCode': "200", } }, 'uri': self._uri(), 'passthroughBehavior': 'when_no_match', 'httpMethod': 'POST', 'contentHandling': 'CONVERT_TO_TEXT', 'type': 'aws_proxy', } return apig_integ def _add_view_args(self, single_method, view_args): # type: (Dict[str, Any], List[str]) -> None single_method['parameters'] = [ {'name': name, 'in': 'path', 'required': True, 'type': 'string'} for name in view_args ] def _add_preflight_request(self, cors, methods, swagger_for_path): # type: (CORSConfig, List[str], Dict[str, Any]) -> None methods = methods + ['OPTIONS'] allowed_methods = ','.join(methods) response_params = { 'Access-Control-Allow-Methods': '%s' % allowed_methods } response_params.update(cors.get_access_control_headers()) headers = {k: {'type': 'string'} for k, _ in response_params.items()} response_params = {'method.response.header.%s' % k: "'%s'" % v for k, v in response_params.items()} options_request = { "consumes": ["application/json"], "produces": ["application/json"], "responses": { "200": { "description": "200 response", "schema": {"$ref": "#/definitions/Empty"}, "headers": headers } }, "x-amazon-apigateway-integration": { "responses": { "default": { "statusCode": "200", "responseParameters": response_params, } }, "requestTemplates": { "application/json": "{\"statusCode\": 200}" }, "passthroughBehavior": "when_no_match", "type": "mock", "contentHandling": "CONVERT_TO_TEXT" } } swagger_for_path['options'] = options_request class CFNSwaggerGenerator(SwaggerGenerator): def __init__(self): # type: () -> None pass def _uri(self, lambda_arn=None): # type: (Optional[str]) -> Any return { 'Fn::Sub': ( 'arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31' '/functions/${APIHandler.Arn}/invocations' ) } def _auth_uri(self, authorizer): # type: (ChaliceAuthorizer) -> Any return { 'Fn::Sub': ( 'arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31' '/functions/${%s.Arn}/invocations' % to_cfn_resource_name( authorizer.name) ) } class TemplatedSwaggerGenerator(SwaggerGenerator): def __init__(self): # type: () -> None pass def _uri(self, lambda_arn=None): # type: (Optional[str]) -> Any return StringFormat( 'arn:aws:apigateway:{region_name}:lambda:path/2015-03-31' '/functions/{api_handler_lambda_arn}/invocations', ['region_name', 'api_handler_lambda_arn'], ) def
(self, authorizer): # type: (ChaliceAuthorizer) -> Any varname = '%s_lambda_arn' % authorizer.name return StringFormat( 'arn:aws:apigateway:{region_name}:lambda:path/2015-03-31' '/functions/{%s}/invocations' % varname, ['region_name', varname], ) class TerraformSwaggerGenerator(SwaggerGenerator): def __init__(self): # type: () -> None pass def _uri(self, lambda_arn=None): # type: (Optional[str]) -> Any return '${aws_lambda_function.api_handler.invoke_arn}' def _auth_uri(self, authorizer): # type: (ChaliceAuthorizer) -> Any return '${aws_lambda_function.%s.invoke_arn}' % (authorizer.name)
_auth_uri
base_kernel.py
import numpy as np import pickle class Kernel: def __init__(self): self.train_phi = None self.K_matrix = None self.test_phi = None self.X_train = None pass def build_gram_matrix(self, X): raise NotImplementedError("Method build_gram_matrix not implemented.") def test(self, x): raise NotImplementedError("Method test not implemented.") def save_kernel(self, path): with open(path, "wb") as f: pickle.dump(self, f) @staticmethod def load_kernel(path): with open(path, "rb") as f: kernel_class = pickle.load(f) return kernel_class class KernelIPExplicit(Kernel):
class KernelIPImplicit(Kernel): def __init__(self): super().__init__() def build_gram_matrix(self, X): n = X.shape[0] self.X_train = X output = np.zeros((n, n)) for i in range(n): for j in range(i, n): value1, value2 = X.loc[i, X.columns[1]], X.loc[j, X.columns[1]] output[i, j] = output[j, i] = self.K(value1, value2) self.K_matrix = output def test(self, x): X = self.X_train n = X.shape[0] output = np.zeros(n) for i in range(n): output[i] = self.K(X.loc[i, X.columns[1]], x) def K(self, item1, item2): raise NotImplementedError("Method K not implemented") class SumKernel: def __init__(self): self.train_phi = list() self.K_matrix = None self.test_phi = None self.X_train = None pass def build_gram_matrix(self, X): raise NotImplementedError("Method build_gram_matrix_sum not implemented.") def build_gram_matrix_one(self, X, param): raise NotImplementedError("Method build_gram_matrix not implemented.") def test(self, x): raise NotImplementedError("Method test not implemented.") def save_kernel(self, path): with open(path, "wb") as f: pickle.dump(self, f) @staticmethod def load_kernel(path): with open(path, "rb") as f: kernel_class = pickle.load(f) return kernel_class class SumKernelIPExplicitError(BaseException): pass class SumKernelIPExplicit(SumKernel): def __init__(self, lst_params): super().__init__() if not isinstance(lst_params, list): raise SumKernelIPExplicitError("If you want to use only one param, you should use the individual param " "class method.") self.lst_params = lst_params def build_gram_matrix(self, X): n = X.shape[0] output = np.zeros((n, n)) for params in self.lst_params: intermediate_output, train_phi = self.build_gram_matrix_one(X, params) self.train_phi.append(train_phi) output += intermediate_output self.K_matrix = output def build_gram_matrix_one(self, X, params): n = X.shape[0] output = np.zeros((n, n)) train_phi = list() for i in range(n): item = X.loc[i, X.columns[1]] train_phi.append(self.make_phi(item, True, params)) for i in range(n): for j in range(i, n): value = self.inner_product_phi(train_phi[i], train_phi[j]) output[i, j] = output[j, i] = value return output, train_phi def test(self, indice_x): n = len(self.train_phi[0]) output = np.zeros(n) for idx, params in enumerate(self.lst_params): current_output = 0 for i in range(n): current_output += self.inner_product_phi(self.train_phi[idx][i], self.test_phi[idx][indice_x]) return output def make_test_phi(self, X): n = X.shape[0] self.test_phi = [] for params in self.lst_params: current_test_phi = list() for i in range(n): item = X.loc[i, X.columns[1]] current_test_phi.append(self.make_phi(item, train=False, params=params)) self.test_phi.append(current_test_phi) return def make_phi(self, item, train=True, params=None): raise NotImplementedError("Method make_phi not implemented.") def inner_product_phi(self, phi1, phi2): raise NotImplementedError("Method inner_product_phi not implemented.")
def __init__(self): super().__init__() def build_gram_matrix(self, X): n = X.shape[0] output = np.zeros((n, n)) self.train_phi = list() for i in range(n): item = X.loc[i, X.columns[1]] self.train_phi.append(self.make_phi(item)) for i in range(n): for j in range(i, n): value = self.inner_product_phi(self.train_phi[i], self.train_phi[j]) output[i, j] = output[j, i] = value self.K_matrix = output def test(self, indice_x): n = len(self.train_phi) output = np.zeros(n) for i in range(n): output[i] = self.inner_product_phi(self.train_phi[i], self.test_phi[indice_x]) return output def make_test_phi(self, X): n = X.shape[0] self.test_phi = [] for i in range(n): item = X.loc[i, X.columns[1]] self.test_phi.append(self.make_phi(item, train=False)) return def make_phi(self, item, train=True): raise NotImplementedError("Method make_phi not implemented.") def inner_product_phi(self, phi1, phi2): raise NotImplementedError("Method inner_product_phi not implemented.")
grpcbenchmark.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: protobuff/grpcbenchmark.proto package protobuff import ( context "context" fmt "fmt" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" grpc "google.golang.org/grpc" math "math" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type AccountId struct { AccountId []byte `protobuf:"bytes,1,opt,name=accountId,proto3" json:"accountId,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *AccountId) Reset() { *m = AccountId{} } func (m *AccountId) String() string { return proto.CompactTextString(m) } func (*AccountId) ProtoMessage() {} func (*AccountId) Descriptor() ([]byte, []int) { return fileDescriptor_eadefbc03f358a67, []int{0} } func (m *AccountId) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_AccountId.Unmarshal(m, b) } func (m *AccountId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_AccountId.Marshal(b, m, deterministic) } func (m *AccountId) XXX_Merge(src proto.Message) { xxx_messageInfo_AccountId.Merge(m, src) } func (m *AccountId) XXX_Size() int { return xxx_messageInfo_AccountId.Size(m) } func (m *AccountId) XXX_DiscardUnknown() { xxx_messageInfo_AccountId.DiscardUnknown(m) } var xxx_messageInfo_AccountId proto.InternalMessageInfo func (m *AccountId) GetAccountId() []byte { if m != nil { return m.AccountId } return nil } type Account struct { AccountId *AccountId `protobuf:"bytes,1,opt,name=accountId,proto3" json:"accountId,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Balance uint64 `protobuf:"varint,3,opt,name=balance,proto3" json:"balance,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *Account) Reset() { *m = Account{} } func (m *Account) String() string { return proto.CompactTextString(m) } func (*Account) ProtoMessage() {} func (*Account) Descriptor() ([]byte, []int) { return fileDescriptor_eadefbc03f358a67, []int{1} } func (m *Account) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Account.Unmarshal(m, b) } func (m *Account) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Account.Marshal(b, m, deterministic) } func (m *Account) XXX_Merge(src proto.Message) { xxx_messageInfo_Account.Merge(m, src) } func (m *Account) XXX_Size() int { return xxx_messageInfo_Account.Size(m) } func (m *Account) XXX_DiscardUnknown() { xxx_messageInfo_Account.DiscardUnknown(m) } var xxx_messageInfo_Account proto.InternalMessageInfo func (m *Account) GetAccountId() *AccountId { if m != nil { return m.AccountId } return nil } func (m *Account) GetName() string { if m != nil { return m.Name } return "" } func (m *Account) GetBalance() uint64 { if m != nil { return m.Balance } return 0 } func init() { proto.RegisterType((*AccountId)(nil), "protobuff.AccountId") proto.RegisterType((*Account)(nil), "protobuff.Account") } func init() { proto.RegisterFile("protobuff/grpcbenchmark.proto", fileDescriptor_eadefbc03f358a67) } var fileDescriptor_eadefbc03f358a67 = []byte{ // 204 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8e, 0xcd, 0x4e, 0x86, 0x30, 0x10, 0x45, 0x53, 0x25, 0x92, 0x0e, 0xae, 0x26, 0x2e, 0x1a, 0xa2, 0x09, 0x61, 0x85, 0x0b, 0x4b, 0x82, 0xf1, 0x01, 0xfc, 0x5b, 0xb8, 0xe5, 0x0d, 0xda, 0x5a, 0x0a, 0x41, 0x3a, 0xa4, 0x81, 0xf7, 0x37, 0xa9, 0x16, 0x8d, 0x7e, 0xbb, 0x39, 0x67, 0xe6, 0xf6, 0x16, 0x6e, 0xd6, 0x40, 0x1b, 0xe9, 0x7d, 0x18, 0x5a, 0x17, 0x56, 0xa3, 0xad, 0x37, 0xe3, 0xa2, 0xc2, 0x2c, 0xa3, 0x47, 0x7e, 0xac, 0xcb, 0x3b, 0x37, 0x6d, 0xe3, 0xae, 0xa5, 0xa1, 0xa5, 0x75, 0xe4, 0xa8, 0x4d, 0xab, 0x48, 0x11, 0xe2, 0xf4, 0x95, 0xac, 0x6f, 0x81, 0x3f, 0x1a, 0x43, 0xbb, 0xdf, 0xde, 0xde, 0xf1, 0x1a, 0xb8, 0x4a, 0x20, 0x58, 0xc5, 0x9a, 0xcb, 0xfe, 0x47, 0xd4, 0x33, 0xe4, 0xdf, 0xa7, 0xd8, 0xfd, 0x3d, 0x2c, 0xba, 0x2b, 0x79, 0xfc, 0x41, 0x1e, 0x2f, 0xfe, 0x8a, 0x23, 0x42, 0xe6, 0xd5, 0x62, 0xc5, 0x59, 0xc5, 0x1a, 0xde, 0xc7, 0x19, 0x05, 0xe4, 0x5a, 0x7d, 0x28, 0x6f, 0xac, 0x38, 0xaf, 0x58, 0x93, 0xf5, 0x09, 0xbb, 0x17, 0x28, 0x9e, 0x29, 0xd8, 0x27, 0xe5, 0xe7, 0xc9, 0x3b, 0x7c, 0x80, 0xe2, 0xd5, 0x8c, 0x94, 0xfa, 0xf1, 0x7f, 0x59, 0x79, 0xc2, 0xe9, 0x8b, 0xa8, 0xee, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x6e, 0xc8, 0x85, 0x83, 0x3f, 0x01, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // CoreBankingClient is the client API for CoreBanking service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type CoreBankingClient interface { EchoAccount(ctx context.Context, in *Account, opts ...grpc.CallOption) (*Account, error) } type coreBankingClient struct { cc *grpc.ClientConn } func NewCoreBankingClient(cc *grpc.ClientConn) CoreBankingClient { return &coreBankingClient{cc} } func (c *coreBankingClient) EchoAccount(ctx context.Context, in *Account, opts ...grpc.CallOption) (*Account, error) { out := new(Account)
err := c.cc.Invoke(ctx, "/protobuff.CoreBanking/EchoAccount", in, out, opts...) if err != nil { return nil, err } return out, nil } // CoreBankingServer is the server API for CoreBanking service. type CoreBankingServer interface { EchoAccount(context.Context, *Account) (*Account, error) } func RegisterCoreBankingServer(s *grpc.Server, srv CoreBankingServer) { s.RegisterService(&_CoreBanking_serviceDesc, srv) } func _CoreBanking_EchoAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Account) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CoreBankingServer).EchoAccount(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/protobuff.CoreBanking/EchoAccount", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CoreBankingServer).EchoAccount(ctx, req.(*Account)) } return interceptor(ctx, in, info, handler) } var _CoreBanking_serviceDesc = grpc.ServiceDesc{ ServiceName: "protobuff.CoreBanking", HandlerType: (*CoreBankingServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "EchoAccount", Handler: _CoreBanking_EchoAccount_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "protobuff/grpcbenchmark.proto", }
rollup-stream.ts
import {SeqConfig} from "./seq-stream"; import {OfferStream} from "./offer-stream"; export class RollupStream<T extends object> extends OfferStream<T> {
private free = new Set<AsyncIterator<T>>(); private nextFree: Promise<AsyncIterator<T>>; private unFree = () => { this.nextFree = Promise.race(this.advanceAll()); this.free.clear(); }; constructor(streams: AsyncIterable<T>[], config: SeqConfig) { super(config); for (const stream of streams) this.free.add(stream[Symbol.asyncIterator]()); if (this.free.size === 0) throw new Error("rollup:zero"); this.nextFree = Promise.resolve(this.free.values().next().value); } protected onDemand() { if (this.free.size > 0) this.unFree(); else this.nextFree.then(this.unFree); } private async advance(iterator: AsyncIterator<T>) { try { this.demanded.add(iterator); const result = await iterator.next(); this.demanded.delete(iterator); if (!result.done) { this.free.add(iterator); const value = Object.assign({}, this.prevValue, result.value); this.offer({value, done: false}); } else if (this.demanded.size === 0 && this.free.size === 0) this.offer(result); return iterator; } catch (e) { console.error(e); return iterator; } } private* advanceAll() { for (const iterator of this.free.values()) { yield this.advance(iterator); } } } export function rollup<T1 extends object, T2 extends object>(streams: [ AsyncIterable<T1>, AsyncIterable<T2>], config?: SeqConfig): RollupStream<T1 & T2>; export function rollup<T1 extends object, T2 extends object, T3 extends object>(streams: [ AsyncIterable<T1>, AsyncIterable<T2>, AsyncIterable<T3>], config?: SeqConfig): RollupStream<T1 & T2 & T3>; export function rollup<T1 extends object, T2 extends object, T3 extends object, T4 extends object>(streams: [ AsyncIterable<T1>, AsyncIterable<T2>, AsyncIterable<T3>, AsyncIterable<T4>], config?: SeqConfig): RollupStream<T1 & T2 & T3 & T4>; export function rollup<T1 extends object, T2 extends object, T3 extends object, T4 extends object, T5 extends object>(streams: [ AsyncIterable<T1>, AsyncIterable<T2>, AsyncIterable<T3>, AsyncIterable<T4>, AsyncIterable<T5>], config?: SeqConfig): RollupStream<T1 & T2 & T3 & T4 & T5>; export function rollup<T1 extends object, T2 extends object, T3 extends object, T4 extends object, T5 extends object, T6 extends object>(streams: [ AsyncIterable<T1>, AsyncIterable<T2>, AsyncIterable<T3>, AsyncIterable<T4>, AsyncIterable<T5>, AsyncIterable<T6>], config?: SeqConfig): RollupStream<T1 & T2 & T3 & T4 & T5 & T6>; export function rollup<T1 extends object, T2 extends object, T3 extends object, T4 extends object, T5 extends object, T6 extends object, T7 extends object>(streams: [ AsyncIterable<T1>, AsyncIterable<T2>, AsyncIterable<T3>, AsyncIterable<T4>, AsyncIterable<T5>, AsyncIterable<T6>, AsyncIterable<T7>], config?: SeqConfig): RollupStream<T1 & T2 & T3 & T4 & T5 & T6 & T7>; export function rollup<T extends object>(streams: AsyncIterable<T>[], config: SeqConfig = {}) { return new RollupStream<T>(streams, config); }
protected prevValue?: T; private demanded = new Set<AsyncIterator<T>>();
knowledge_renderer.rs
use std::ops::Deref; use ecs::*; use game::*; use coord::Coord; pub trait KnowledgeRenderer { /// Resets any internal buffers fn reset_buffers(&mut self); /// Width of game window in cells fn width(&self) -> usize; /// Height of game window in cells fn height(&self) -> usize; /// Coordinate of top-left corner cell rendered in game window in world-space fn world_offset(&self) -> Coord; /// Given a coordinate in world-space, converts it into a coordinate in screen-space fn world_to_screen(&self, coord: Coord) -> Coord { coord - self.world_offset() } /// Offset required to make given coordinate in world-space appear in the centre of /// the game window fn centre_offset(&self, centre: Coord) -> Coord { centre - Coord::new(self.width() as isize / 2, self.height() as isize / 2) } /// Highest coordinate in world-space that appears in the game window fn world_limit(&self) -> Coord { self.world_offset() + Coord::new(self.width() as isize - 1, self.height() as isize - 1) } /// Returns true iff the given coordinate in world-space corresponds to a cell in /// the game window fn contains_world_coord(&self, coord: Coord) -> bool { coord >= self.world_offset() && coord < self.world_limit() } /// Update the contents of internal buffer of the contents of the game window. /// Does not update the display. fn update_game_window_buffer(&mut self, knowledge: &DrawableKnowledgeLevel, turn_id: u64, position: Coord); /// Returns the number of lines of the log that will be displayed at a time fn log_num_lines(&self) -> usize; /// Updates the contents of internal message log with the last `self.log_num_lines()` /// lines of the message log, translating into the given language. fn update_log_buffer(&mut self, messages: &MessageLog, language: &Box<Language>); /// Push the currently drawn content to the physical display fn publish(&mut self); /// Updates the game window with the contents of the internal buffer fn draw_game_window(&mut self); /// Updates the log with the contents of the internal message log fn draw_log(&mut self); /// Updates the hud based on a specified entity fn draw_hud_bottom(&mut self, entity: EntityRef, language: &Box<Language>); fn draw_hud(&mut self, entity: EntityRef, language: &Box<Language>); /// Updates the game window with the contents of the internal buffer /// drawing a specified overlay over the top fn draw_game_window_with_overlay(&mut self, overlay: &RenderOverlay); /// Display a fullscreen view of message log fn fullscreen_log(&mut self, message_log: &MessageLog, offset: usize, language: &Box<Language>); /// Number of lines in fullscreen message log view fn fullscreen_log_num_rows(&self) -> usize; /// Number of characters that fit in a single line of the fullscreen message log fn fullscreen_log_num_cols(&self) -> usize; /// Displays a message in fullscreen fn fullscreen_message(&mut self, message_type: MessageType, language: &Box<Language>) { let mut message = Message::new(); language.translate(message_type, &mut message); self.fullscreen_translated_message(&message, 0); } /// Wraps a message to fit in fullscreen fn fullscreen_wrap(&self, message: &Message, wrapped: &mut Vec<TextMessage>) { wrap_message(&message, self.fullscreen_log_num_cols(), wrapped); } /// Displays a translated message in fullscreen fn fullscreen_translated_message(&mut self, message: &Message, offset: usize) { let mut wrapped = Vec::new(); self.fullscreen_wrap(message, &mut wrapped); self.fullscreen_wrapped_translated_message(&wrapped, offset); } /// Displays a wrapped, translated message in fullscreen fn fullscreen_wrapped_translated_message(&mut self, wrapped: &Vec<TextMessage>, offset: usize); /// Display a fullscreen menu fn fullscreen_menu<T>(&mut self, prelude: Option<MessageType>, menu: &SelectMenu<T>, state: &SelectMenuState, language: &Box<Language>); fn publish_game_window(&mut self) { self.draw_game_window(); self.publish(); } fn publish_game_window_with_overlay(&mut self, overlay: &RenderOverlay) { self.draw_game_window_with_overlay(overlay); self.publish(); } fn publish_all_windows(&mut self, entity: EntityRef, language: &Box<Language>) { self.draw_game_window(); self.draw_log(); self.draw_hud(entity, language); self.publish(); } fn publish_all_windows_with_overlay(&mut self, entity: EntityRef, language: &Box<Language>, overlay: &RenderOverlay) { self.draw_game_window_with_overlay(overlay); self.draw_log(); self.draw_hud(entity, language); self.publish(); } fn update_and_publish_all_windows(&mut self, turn_id: u64, knowledge: &DrawableKnowledgeLevel, position: Coord, messages: &MessageLog, entity: EntityRef, language: &Box<Language>) { self.update_log_buffer(messages, language); self.update_game_window_buffer(knowledge, turn_id, position); self.draw_game_window(); self.draw_log(); self.draw_hud(entity, language); self.publish(); } fn update_and_publish_all_windows_with_overlay(&mut self, turn_id: u64, knowledge: &DrawableKnowledgeLevel, position: Coord, messages: &MessageLog, entity: EntityRef, language: &Box<Language>, overlay: &RenderOverlay) { self.update_log_buffer(messages, language); self.update_game_window_buffer(knowledge, turn_id, position); self.draw_game_window_with_overlay(overlay); self.draw_log(); self.draw_hud(entity, language); self.publish(); } fn update_and_publish_all_windows_for_entity(&mut self, turn_id: u64, level_id: LevelId, entity: EntityRef, language: &Box<Language>) { let knowledge = entity.drawable_knowledge_borrow().expect("Expected drawable_knowledge component"); let knowledge_level = knowledge.level(level_id); self.update_and_publish_all_windows(turn_id, knowledge_level, entity.position().expect("Expected position component"), entity.message_log_borrow().expect("Expected message_log component").deref(), entity, language); } fn update_and_publish_all_windows_for_entity_with_overlay(&mut self, turn_id: u64, level_id: LevelId, entity: EntityRef, language: &Box<Language>, overlay: &RenderOverlay) { let knowledge = entity.drawable_knowledge_borrow().expect("Expected drawable_knowledge component"); let knowledge_level = knowledge.level(level_id); self.update_and_publish_all_windows_with_overlay(turn_id, knowledge_level, entity.position().expect("Expected position component"), entity.message_log_borrow().expect("Expected message_log component").deref(), entity, language, overlay); } fn publish_fullscreen_menu<T>(&mut self, prelude: Option<MessageType>, menu: &SelectMenu<T>, state: &SelectMenuState, language: &Box<Language>) { self.fullscreen_menu(prelude, menu, state, language); self.publish(); } fn publish_fullscreen_menu_with_hud<T>(&mut self, prelude: Option<MessageType>, menu: &SelectMenu<T>, state: &SelectMenuState, language: &Box<Language>, entity: EntityRef) {
} fn publish_fullscreen_log(&mut self, message_log: &MessageLog, offset: usize, language: &Box<Language>) { self.fullscreen_log(message_log, offset, language); self.publish(); } fn publish_fullscreen_message(&mut self, message_type: MessageType, language: &Box<Language>) { self.fullscreen_message(message_type, language); self.publish(); } fn publish_fullscreen_translated_message(&mut self, message: &Message, offset: usize) { self.fullscreen_translated_message(message, offset); self.publish(); } }
self.fullscreen_menu(prelude, menu, state, language); self.draw_hud_bottom(entity, language); self.publish();
main.rs
#[global_allocator] static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; #[macro_use] extern crate clap; mod config; mod constants; mod utils; use libdoh::*; use crate::config::*; use crate::constants::*; use libdoh::reexports::tokio; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::Duration; fn
() { let mut runtime_builder = tokio::runtime::Builder::new_multi_thread(); runtime_builder.enable_all(); runtime_builder.thread_name("doh-proxy"); let runtime = runtime_builder.build().unwrap(); let mut globals = Globals { #[cfg(feature = "tls")] tls_cert_path: None, #[cfg(feature = "tls")] tls_cert_key_path: None, listen_address: LISTEN_ADDRESS.parse().unwrap(), local_bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), server_address: SERVER_ADDRESS.parse().unwrap(), path: PATH.to_string(), max_clients: MAX_CLIENTS, timeout: Duration::from_secs(TIMEOUT_SEC), clients_count: Default::default(), max_concurrent_streams: MAX_CONCURRENT_STREAMS, min_ttl: MIN_TTL, max_ttl: MAX_TTL, err_ttl: ERR_TTL, keepalive: true, disable_post: false, runtime_handle: runtime.handle().clone(), }; parse_opts(&mut globals); let doh = DoH { globals: Arc::new(globals), }; runtime.block_on(doh.entrypoint()).unwrap(); }
main
conftest.py
import pytest from ats.users.models import User from ats.users.tests.factories import UserFactory @pytest.fixture(autouse=True) def
(settings, tmpdir): settings.MEDIA_ROOT = tmpdir.strpath @pytest.fixture def user() -> User: return UserFactory()
media_storage
cli.py
import typer import uvicorn from .app import app from .config import settings cli = typer.Typer(name="fastapi_workshop API") @cli.command() def run( port: int = settings.server.port, host: str = settings.server.host, log_level: str = settings.server.log_level, reload: bool = settings.server.reload, ): # pragma: no cover """Run the API server.""" uvicorn.run( "fastapi_workshop.app:app", host=host, port=port, log_level=log_level, reload=reload, ) @cli.command() def
(): # pragma: no cover """Opens an interactive shell with objects auto imported""" _vars = { "app": app, "settings": settings, } typer.echo(f"Auto imports: {list(_vars.keys())}") try: from IPython import start_ipython start_ipython(argv=[], user_ns=_vars) except ImportError: import code code.InteractiveConsole(_vars).interact()
shell
inspect.go
// Copyright 2020 Nokia // Licensed under the BSD 3-Clause License. // SPDX-License-Identifier: BSD-3-Clause package cmd import ( "context" "encoding/json" "fmt" "os" "path/filepath" "sort" "strings" "github.com/olekukonko/tablewriter" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/srl-labs/containerlab/clab" "github.com/srl-labs/containerlab/runtime" "github.com/srl-labs/containerlab/types" ) var format string var details bool var all bool type containerDetails struct { LabName string `json:"lab_name,omitempty"` LabPath string `json:"labPath,omitempty"` Name string `json:"name,omitempty"` ContainerID string `json:"container_id,omitempty"` Image string `json:"image,omitempty"` Kind string `json:"kind,omitempty"` Group string `json:"group,omitempty"` State string `json:"state,omitempty"` IPv4Address string `json:"ipv4_address,omitempty"` IPv6Address string `json:"ipv6_address,omitempty"` } type BridgeDetails struct{} // inspectCmd represents the inspect command var inspectCmd = &cobra.Command{ Use: "inspect", Short: "inspect lab details", Long: "show details about a particular lab or all running labs\nreference: https://containerlab.srlinux.dev/cmd/inspect/", Aliases: []string{"ins", "i"}, PreRunE: sudoCheck, RunE: func(cmd *cobra.Command, args []string) error { if name == "" && topo == "" && !all { fmt.Println("provide either a lab name (--name) or a topology file path (--topo) or the flag --all") return nil } opts := []clab.ClabOption{ clab.WithTimeout(timeout), clab.WithRuntime(rt, &runtime.RuntimeConfig{ Debug: debug, Timeout: timeout, GracefulShutdown: graceful, }, ), } if topo != "" { opts = append(opts, clab.WithTopoFile(topo)) } c, err := clab.NewContainerLab(opts...) if err != nil { return fmt.Errorf("could not parse the topology file: %v", err) } if name == "" { name = c.Config.Name } ctx, cancel := context.WithCancel(context.Background()) defer cancel() var glabels []*types.GenericFilter if all { glabels = []*types.GenericFilter{{FilterType: "label", Field: "containerlab", Operator: "exists"}} } else { if name != "" { glabels = []*types.GenericFilter{{FilterType: "label", Match: name, Field: "containerlab", Operator: "="}} } else if topo != "" { glabels = []*types.GenericFilter{{FilterType: "label", Match: c.Config.Name, Field: "containerlab", Operator: "="}} } } containers, err := c.ListContainers(ctx, glabels) if err != nil { return fmt.Errorf("failed to list containers: %s", err) } if len(containers) == 0 { log.Println("no containers found") return nil } if details { b, err := json.MarshalIndent(containers, "", " ") if err != nil { return fmt.Errorf("failed to marshal containers struct: %v", err) } fmt.Println(string(b)) return nil } err = printContainerInspect(c, containers, format) return err }, } func init() { rootCmd.AddCommand(inspectCmd) inspectCmd.Flags().BoolVarP(&details, "details", "", false, "print all details of lab containers") inspectCmd.Flags().StringVarP(&format, "format", "f", "table", "output format. One of [table, json]") inspectCmd.Flags().BoolVarP(&all, "all", "a", false, "show all deployed containerlab labs") } func toTableData(det []containerDetails) [][]string { tabData := make([][]string, 0, len(det)) for i, d := range det { if all { tabData = append(tabData, []string{fmt.Sprintf("%d", i+1), d.LabPath, d.LabName, d.Name, d.ContainerID, d.Image, d.Kind, d.State, d.IPv4Address, d.IPv6Address}) continue } tabData = append(tabData, []string{fmt.Sprintf("%d", i+1), d.Name, d.ContainerID, d.Image, d.Kind, d.State, d.IPv4Address, d.IPv6Address}) } return tabData } func
(c *clab.CLab, containers []types.GenericContainer, format string) error { contDetails := make([]containerDetails, 0, len(containers)) // do not print published ports unless mysocketio kind is found printMysocket := false var mysocketCID string for _, cont := range containers { // get topo file path relative of the cwd cwd, _ := os.Getwd() path, _ := filepath.Rel(cwd, cont.Labels["clab-topo-file"]) cdet := containerDetails{ LabName: cont.Labels["containerlab"], LabPath: path, Image: cont.Image, State: cont.State, IPv4Address: getContainerIPv4(cont), IPv6Address: getContainerIPv6(cont), } cdet.ContainerID = cont.ShortID if len(cont.Names) > 0 { cdet.Name = strings.TrimLeft(cont.Names[0], "/") } if kind, ok := cont.Labels["clab-node-kind"]; ok { cdet.Kind = kind if kind == "mysocketio" { printMysocket = true mysocketCID = cont.ID } } if group, ok := cont.Labels["clab-node-group"]; ok { cdet.Group = group } contDetails = append(contDetails, cdet) } sort.Slice(contDetails, func(i, j int) bool { if contDetails[i].LabName == contDetails[j].LabName { return contDetails[i].Name < contDetails[j].Name } return contDetails[i].LabName < contDetails[j].LabName }) if format == "json" { b, err := json.MarshalIndent(contDetails, "", " ") if err != nil { return fmt.Errorf("failed to marshal container details: %v", err) } fmt.Println(string(b)) return nil } tabData := toTableData(contDetails) table := tablewriter.NewWriter(os.Stdout) header := []string{ "Lab Name", "Name", "Container ID", "Image", "Kind", "State", "IPv4 Address", "IPv6 Address", } if all { table.SetHeader(append([]string{"#", "Topo Path"}, header...)) } else { table.SetHeader(append([]string{"#"}, header[1:]...)) } table.SetAutoFormatHeaders(false) table.SetAutoWrapText(false) // merge cells with lab name and topo file path table.SetAutoMergeCellsByColumnIndex([]int{1, 2}) table.AppendBulk(tabData) table.Render() if !printMysocket { return nil } runtime := c.GlobalRuntime() stdout, stderr, err := runtime.Exec(context.Background(), mysocketCID, []string{"mysocketctl", "socket", "ls"}) if err != nil { return fmt.Errorf("failed to execute cmd: %v", err) } if len(stderr) > 0 { log.Infof("errors during listing mysocketio sockets: %s", string(stderr)) } fmt.Println("Published ports:") fmt.Println(string(stdout)) return nil } func getContainerIPv4(ctr types.GenericContainer) string { if ctr.NetworkSettings.IPv4addr == "" { return "N/A" } return fmt.Sprintf("%s/%d", ctr.NetworkSettings.IPv4addr, ctr.NetworkSettings.IPv4pLen) } func getContainerIPv6(ctr types.GenericContainer) string { if ctr.NetworkSettings.IPv6addr == "" { return "N/A" } return fmt.Sprintf("%s/%d", ctr.NetworkSettings.IPv6addr, ctr.NetworkSettings.IPv6pLen) }
printContainerInspect
zz_generated_pagers.go
//go:build go1.16 // +build go1.16 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. package armmanagedapplications import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "net/http" "reflect" ) // ApplicationClientListOperationsPager provides operations for iterating over paged responses. type ApplicationClientListOperationsPager struct { client *ApplicationClient current ApplicationClientListOperationsResponse err error requester func(context.Context) (*policy.Request, error) advancer func(context.Context, ApplicationClientListOperationsResponse) (*policy.Request, error) } // Err returns the last error encountered while paging. func (p *ApplicationClientListOperationsPager) Err() error { return p.err } // NextPage returns true if the pager advanced to the next page. // Returns false if there are no more pages or an error occurred. func (p *ApplicationClientListOperationsPager) NextPage(ctx context.Context) bool { var req *policy.Request var err error if !reflect.ValueOf(p.current).IsZero() { if p.current.OperationListResult.NextLink == nil || len(*p.current.OperationListResult.NextLink) == 0 { return false } req, err = p.advancer(ctx, p.current) } else { req, err = p.requester(ctx) } if err != nil { p.err = err return false } resp, err := p.client.pl.Do(req) if err != nil { p.err = err return false } if !runtime.HasStatusCode(resp, http.StatusOK) { p.err = runtime.NewResponseError(resp) return false } result, err := p.client.listOperationsHandleResponse(resp) if err != nil { p.err = err return false } p.current = result return true } // PageResponse returns the current ApplicationClientListOperationsResponse page. func (p *ApplicationClientListOperationsPager) PageResponse() ApplicationClientListOperationsResponse { return p.current } // ApplicationDefinitionsClientListByResourceGroupPager provides operations for iterating over paged responses. type ApplicationDefinitionsClientListByResourceGroupPager struct { client *ApplicationDefinitionsClient current ApplicationDefinitionsClientListByResourceGroupResponse err error requester func(context.Context) (*policy.Request, error) advancer func(context.Context, ApplicationDefinitionsClientListByResourceGroupResponse) (*policy.Request, error) } // Err returns the last error encountered while paging. func (p *ApplicationDefinitionsClientListByResourceGroupPager) Err() error { return p.err } // NextPage returns true if the pager advanced to the next page. // Returns false if there are no more pages or an error occurred. func (p *ApplicationDefinitionsClientListByResourceGroupPager) NextPage(ctx context.Context) bool { var req *policy.Request var err error if !reflect.ValueOf(p.current).IsZero() { if p.current.ApplicationDefinitionListResult.NextLink == nil || len(*p.current.ApplicationDefinitionListResult.NextLink) == 0 { return false } req, err = p.advancer(ctx, p.current) } else { req, err = p.requester(ctx) } if err != nil { p.err = err return false } resp, err := p.client.pl.Do(req) if err != nil { p.err = err return false } if !runtime.HasStatusCode(resp, http.StatusOK) { p.err = runtime.NewResponseError(resp) return false } result, err := p.client.listByResourceGroupHandleResponse(resp) if err != nil { p.err = err return false } p.current = result return true } // PageResponse returns the current ApplicationDefinitionsClientListByResourceGroupResponse page. func (p *ApplicationDefinitionsClientListByResourceGroupPager) PageResponse() ApplicationDefinitionsClientListByResourceGroupResponse { return p.current } // ApplicationsClientListByResourceGroupPager provides operations for iterating over paged responses. type ApplicationsClientListByResourceGroupPager struct { client *ApplicationsClient current ApplicationsClientListByResourceGroupResponse err error requester func(context.Context) (*policy.Request, error) advancer func(context.Context, ApplicationsClientListByResourceGroupResponse) (*policy.Request, error) } // Err returns the last error encountered while paging. func (p *ApplicationsClientListByResourceGroupPager) Err() error { return p.err } // NextPage returns true if the pager advanced to the next page. // Returns false if there are no more pages or an error occurred. func (p *ApplicationsClientListByResourceGroupPager) NextPage(ctx context.Context) bool { var req *policy.Request var err error if !reflect.ValueOf(p.current).IsZero() { if p.current.ApplicationListResult.NextLink == nil || len(*p.current.ApplicationListResult.NextLink) == 0 { return false } req, err = p.advancer(ctx, p.current) } else { req, err = p.requester(ctx) } if err != nil { p.err = err return false } resp, err := p.client.pl.Do(req) if err != nil { p.err = err return false } if !runtime.HasStatusCode(resp, http.StatusOK) { p.err = runtime.NewResponseError(resp) return false } result, err := p.client.listByResourceGroupHandleResponse(resp) if err != nil
p.current = result return true } // PageResponse returns the current ApplicationsClientListByResourceGroupResponse page. func (p *ApplicationsClientListByResourceGroupPager) PageResponse() ApplicationsClientListByResourceGroupResponse { return p.current } // ApplicationsClientListBySubscriptionPager provides operations for iterating over paged responses. type ApplicationsClientListBySubscriptionPager struct { client *ApplicationsClient current ApplicationsClientListBySubscriptionResponse err error requester func(context.Context) (*policy.Request, error) advancer func(context.Context, ApplicationsClientListBySubscriptionResponse) (*policy.Request, error) } // Err returns the last error encountered while paging. func (p *ApplicationsClientListBySubscriptionPager) Err() error { return p.err } // NextPage returns true if the pager advanced to the next page. // Returns false if there are no more pages or an error occurred. func (p *ApplicationsClientListBySubscriptionPager) NextPage(ctx context.Context) bool { var req *policy.Request var err error if !reflect.ValueOf(p.current).IsZero() { if p.current.ApplicationListResult.NextLink == nil || len(*p.current.ApplicationListResult.NextLink) == 0 { return false } req, err = p.advancer(ctx, p.current) } else { req, err = p.requester(ctx) } if err != nil { p.err = err return false } resp, err := p.client.pl.Do(req) if err != nil { p.err = err return false } if !runtime.HasStatusCode(resp, http.StatusOK) { p.err = runtime.NewResponseError(resp) return false } result, err := p.client.listBySubscriptionHandleResponse(resp) if err != nil { p.err = err return false } p.current = result return true } // PageResponse returns the current ApplicationsClientListBySubscriptionResponse page. func (p *ApplicationsClientListBySubscriptionPager) PageResponse() ApplicationsClientListBySubscriptionResponse { return p.current }
{ p.err = err return false }
domainReplicationTaskHandler.go
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package frontend import ( "errors" "github.com/uber-common/bark" "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" "github.com/uber/cadence/common" "github.com/uber/cadence/common/messaging" "github.com/uber/cadence/common/persistence" ) var ( // ErrInvalidDomainStatus is the error to indicate invalid domain status ErrInvalidDomainStatus = errors.New("invalid domain status attribute") ) // NOTE: the counterpart of domain replication receiving logic is in service/worker package type ( // DomainReplicator is the interface which can replicate the domain DomainReplicator interface { HandleTransmissionTask(domainOperation replicator.DomainOperation, info *persistence.DomainInfo, config *persistence.DomainConfig, replicationConfig *persistence.DomainReplicationConfig, configVersion int64, failoverVersion int64) error } domainReplicatorImpl struct { kafka messaging.Producer logger bark.Logger } ) // NewDomainReplicator create a new instance odf domain replicator func
(kafka messaging.Producer, logger bark.Logger) DomainReplicator { return &domainReplicatorImpl{ kafka: kafka, logger: logger, } } // HandleTransmissionTask handle transmission of the domain replication task func (domainReplicator *domainReplicatorImpl) HandleTransmissionTask(domainOperation replicator.DomainOperation, info *persistence.DomainInfo, config *persistence.DomainConfig, replicationConfig *persistence.DomainReplicationConfig, configVersion int64, failoverVersion int64) error { status, err := domainReplicator.convertDomainStatusToThrift(info.Status) if err != nil { return err } taskType := replicator.ReplicationTaskTypeDomain task := &replicator.DomainTaskAttributes{ DomainOperation: &domainOperation, ID: common.StringPtr(info.ID), Info: &shared.DomainInfo{ Name: common.StringPtr(info.Name), Status: status, Description: common.StringPtr(info.Description), OwnerEmail: common.StringPtr(info.OwnerEmail), }, Config: &shared.DomainConfiguration{ WorkflowExecutionRetentionPeriodInDays: common.Int32Ptr(config.Retention), EmitMetric: common.BoolPtr(config.EmitMetric), }, ReplicationConfig: &shared.DomainReplicationConfiguration{ ActiveClusterName: common.StringPtr(replicationConfig.ActiveClusterName), Clusters: domainReplicator.convertClusterReplicationConfigToThrift(replicationConfig.Clusters), }, ConfigVersion: common.Int64Ptr(configVersion), FailoverVersion: common.Int64Ptr(failoverVersion), } return domainReplicator.kafka.Publish(&replicator.ReplicationTask{ TaskType: &taskType, DomainTaskAttributes: task, }) } func (domainReplicator *domainReplicatorImpl) convertClusterReplicationConfigToThrift( input []*persistence.ClusterReplicationConfig) []*shared.ClusterReplicationConfiguration { output := []*shared.ClusterReplicationConfiguration{} for _, cluster := range input { clusterName := common.StringPtr(cluster.ClusterName) output = append(output, &shared.ClusterReplicationConfiguration{ClusterName: clusterName}) } return output } func (domainReplicator *domainReplicatorImpl) convertDomainStatusToThrift(input int) (*shared.DomainStatus, error) { switch input { case persistence.DomainStatusRegistered: output := shared.DomainStatusRegistered return &output, nil case persistence.DomainStatusDeprecated: output := shared.DomainStatusDeprecated return &output, nil default: return nil, ErrInvalidDomainStatus } }
NewDomainReplicator
aufs.go
// +build linux /* aufs driver directory structure . ├── layers // Metadata of layers │ ├── 1 │ ├── 2 │ └── 3 ├── diff // Content of the layer │ ├── 1 // Contains layers that need to be mounted for the id │ ├── 2 │ └── 3 └── mnt // Mount points for the rw layers to be mounted ├── 1 ├── 2 └── 3 */ package aufs import ( "bufio" "fmt" "io/ioutil" "os" "os/exec" "path" "strings" "sync" "syscall" "github.com/Sirupsen/logrus" "github.com/hyperhq/hypercli/daemon/graphdriver" "github.com/hyperhq/hypercli/pkg/archive" "github.com/hyperhq/hypercli/pkg/chrootarchive" "github.com/hyperhq/hypercli/pkg/directory" "github.com/hyperhq/hypercli/pkg/idtools" mountpk "github.com/hyperhq/hypercli/pkg/mount" "github.com/hyperhq/hypercli/pkg/stringid" "github.com/opencontainers/runc/libcontainer/label" ) var ( // ErrAufsNotSupported is returned if aufs is not supported by the host. ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") incompatibleFsMagic = []graphdriver.FsMagic{ graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, } backingFs = "<unknown>" enableDirpermLock sync.Once enableDirperm bool ) func init() { graphdriver.Register("aufs", Init) } type data struct { referenceCount int path string } // Driver contains information about the filesystem mounted. // root of the filesystem // sync.Mutex to protect against concurrent modifications // active maps mount id to the count type Driver struct { root string uidMaps []idtools.IDMap gidMaps []idtools.IDMap sync.Mutex // Protects concurrent modification to active active map[string]*data } // Init returns a new AUFS driver. // An error is returned if AUFS is not supported. func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { // Try to load the aufs kernel module if err := supportsAufs(); err != nil { return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(root) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } for _, magic := range incompatibleFsMagic { if fsMagic == magic { return nil, graphdriver.ErrIncompatibleFS } } paths := []string{ "mnt", "diff", "layers", } a := &Driver{ root: root, active: make(map[string]*data), uidMaps: uidMaps, gidMaps: gidMaps, } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } // Create the root aufs driver dir and return // if it already exists // If not populate the dir structure if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { if os.IsExist(err) { return a, nil } return nil, err } if err := mountpk.MakePrivate(root); err != nil { return nil, err } // Populate the dir structure for _, p := range paths { if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { return nil, err } } return a, nil } // Return a nil error if the kernel supports aufs // We cannot modprobe because inside dind modprobe fails // to run func supportsAufs() error { // We can try to modprobe aufs first before looking at // proc/filesystems for when aufs is supported exec.Command("modprobe", "aufs").Run() f, err := os.Open("/proc/filesystems") if err != nil { return err } defer f.Close() s := bufio.NewScanner(f) for s.Scan() { if strings.Contains(s.Text(), "aufs") { return nil } } return ErrAufsNotSupported } func (a *Driver) rootPath() string { return a.root } func (*Driver) String() string { return "aufs" } // Status returns current information about the filesystem such as root directory, number of directories mounted, etc. func (a *Driver) Status() [][2]string { ids, _ := loadIds(path.Join(a.rootPath(), "layers")) return [][2]string{ {"Root Dir", a.rootPath()}, {"Backing Filesystem", backingFs}, {"Dirs", fmt.Sprintf("%d", len(ids))}, {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, } } // GetMetadata not implemented func (a *Driver) GetMetadata(id string) (map[string]string, error) { return nil, nil } // Exists returns true if the given id is registered with // this driver func (a *Driver) Exists(id string) bool { if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { return false } return true } // Create three folders for each id // mnt, layers, and diff func (a *Driver) Create(id, parent, mountLabel string) error { if err := a.createDirsFor(id); err != nil { return err } // Write the layers metadata f, err := os.Create(path.Join(a.rootPath(), "layers", id)) if err != nil { return err } defer f.Close() if parent != "" { ids, err := getParentIds(a.rootPath(), parent) if err != nil { return err } if _, err := fmt.Fprintln(f, parent); err != nil { return err } for _, i := range ids { if _, err := fmt.Fprintln(f, i); err != nil { return err } } } a.active[id] = &data{} return nil } func (a *Driver) createDirsFor(id string) error { paths := []string{ "mnt", "diff", } rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) if err != nil { return err } for _, p := range paths { if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { return err } } return nil } // Remove will unmount and remove the given id. func (a *Driver) Remove(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() m := a.active[id] if m != nil { if m.referenceCount > 0 { return nil } // Make sure the dir is umounted first if err := a.unmount(m); err != nil { return err } } tmpDirs := []string{ "mnt", "diff", } // Atomically remove each directory in turn by first moving it out of the // way (so that docker doesn't find it anymore) before doing removal of // the whole tree. for _, p := range tmpDirs { realPath := path.Join(a.rootPath(), p, id) tmpPath := path.Join(a.rootPath(), p, fmt.Sprintf("%s-removing", id)) if err := os.Rename(realPath, tmpPath); err != nil && !os.IsNotExist(err) { return err } defer os.RemoveAll(tmpPath) } // Remove the layers file for the id if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { return err } return nil } // Get returns the rootfs path for the id. // This will mount the dir at it's given path func (a *Driver) Get(id, mountLabel string) (string, error) { ids, err := getParentIds(a.rootPath(), id) if err != nil { if !os.IsNotExist(err) { return "", err } ids = []string{} } // Protect the a.active from concurrent access a.Lock() defer a.Unlock() m := a.active[id] if m == nil { m = &data{} a.active[id] = m } // If a dir does not have a parent ( no layers )do not try to mount // just return the diff path to the data m.path = path.Join(a.rootPath(), "diff", id) if len(ids) > 0 { m.path = path.Join(a.rootPath(), "mnt", id) if m.referenceCount == 0 { if err := a.mount(id, m, mountLabel); err != nil { return "", err } } } m.referenceCount++ return m.path, nil } // Put unmounts and updates list of active mounts. func (a *Driver) Put(id string) error { // Protect the a.active from concurrent access a.Lock() defer a.Unlock() m := a.active[id] if m == nil { // but it might be still here if a.Exists(id) { path := path.Join(a.rootPath(), "mnt", id) err := Unmount(path) if err != nil { logrus.Debugf("Failed to unmount %s aufs: %v", id, err) } } return nil } if count := m.referenceCount; count > 1 { m.referenceCount = count - 1 } else { ids, _ := getParentIds(a.rootPath(), id) // We only mounted if there are any parents if ids != nil && len(ids) > 0 { a.unmount(m) } delete(a.active, id) } return nil } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (a *Driver) Diff(id, parent string) (archive.Archive, error) { // AUFS doesn't need the parent layer to produce a diff. return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ Compression: archive.Uncompressed, ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, UIDMaps: a.uidMaps, GIDMaps: a.gidMaps, }) } // DiffPath returns path to the directory that contains files for the layer // differences. Used for direct access for tar-split. func (a *Driver) DiffPath(id string) (string, func() error, error) { return path.Join(a.rootPath(), "diff", id), func() error { return nil }, nil } func (a *Driver) applyDiff(id string, diff archive.Reader) error { return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ UIDMaps: a.uidMaps, GIDMaps: a.gidMaps, }) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (a *Driver) DiffSize(id, parent string) (size int64, err error) { // AUFS doesn't need the parent layer to calculate the diff size. return directory.Size(path.Join(a.rootPath(), "diff", id)) } // ApplyDiff extracts the changeset from the given diff into the // layer with the specified id and parent, returning the size of the // new layer in bytes. func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { // AUFS doesn't need the parent id to apply the diff. if err = a.applyDiff(id, diff); err != nil { return } return a.DiffSize(id, parent) } // Changes produces a list of changes between the specified layer // and its parent layer. If parent is "", then all changes will be ADD changes. func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { // AUFS doesn't have snapshots, so we need to get changes from all parent // layers. layers, err := a.getParentLayerPaths(id) if err != nil { return nil, err } return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) } func (a *Driver) getParentLayerPaths(id string) ([]string, error) { parentIds, err := getParentIds(a.rootPath(), id) if err != nil { return nil, err } layers := make([]string, len(parentIds)) // Get the diff paths for all the parent ids for i, p := range parentIds { layers[i] = path.Join(a.rootPath(), "diff", p) } return layers, nil } func (a *Driver) mount(id string, m *data, mountLabel string) error { // If the id is mounted or we get an error return if mounted, err := a.mounted(m); err != nil || mounted { return err } var ( target = m.path rw = path.Join(a.rootPath(), "diff", id) ) layers, err := a.getParentLayerPaths(id) if err != nil { return err } if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { return fmt.Errorf("error creating aufs mount to %s: %v", target, err) } return nil } func (a *Driver) unmount(m *data) error { if mounted, err := a.mounted(m); err != nil || !mounted { return err } return Unmount(m.path) } func (a *Driver) mounted(m *data) (bool, error) { return mountpk.Mounted(m.path) } // Cleanup aufs and unmount all mountpoints func (a *Driver) Cleanup() error { for id, m := range a.active { if err := a.unmount(m); err != nil { logrus.Errorf("Unmounting %s: %s", stringid.TruncateID(id), err) } } return mountpk.Unmount(a.root) } func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { defer func() { if err != nil { Unmount(target) } }() // Mount options are clipped to page size(4096 bytes
layers then these are remounted individually using append. offset := 54 if useDirperm() { offset += len("dirperm1") } b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) firstMount := true i := 0 for { for ; i < len(ro); i++ { layer := fmt.Sprintf(":%s=ro+wh", ro[i]) if firstMount { if bp+len(layer) > len(b) { break } bp += copy(b[bp:], layer) } else { data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { return } } } if firstMount { opts := "dio,xino=/dev/shm/aufs.xino" if useDirperm() { opts += ",dirperm1" } data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) if err = mount("none", target, "aufs", 0, data); err != nil { return } firstMount = false } if i == len(ro) { break } } return } // useDirperm checks dirperm1 mount option can be used with the current // version of aufs. func useDirperm() bool { enableDirpermLock.Do(func() { base, err := ioutil.TempDir("", "docker-aufs-base") if err != nil { logrus.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(base) union, err := ioutil.TempDir("", "docker-aufs-union") if err != nil { logrus.Errorf("error checking dirperm1: %v", err) return } defer os.RemoveAll(union) opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) if err := mount("none", union, "aufs", 0, opts); err != nil { return } enableDirperm = true if err := Unmount(union); err != nil { logrus.Errorf("error checking dirperm1: failed to unmount %v", err) } }) return enableDirperm }
). If there are more //
db7.ts
import enabledModules from '../src/modules' const program = require('commander') const config = require('config') const common = require('../migrations/.common') const es = require('../src/lib/elastic') const { aggregateElasticSearchSchema } = require('../src/lib/module/index') const aggregatedSchema = aggregateElasticSearchSchema(enabledModules, { config }) program .command('rebuild') .option('-i|--indexName <indexName>', 'name of the Elasticsearch index', config.elasticsearch.indices[0]) .action((cmd) => { // TODO: add parallel processing
if (!cmd.indexName) { console.error('error: indexName must be specified'); process.exit(1); } let waitingCounter = 0 for (var collectionName in aggregatedSchema.schemas) { console.log('** Hello! I am going to rebuild EXISTING ES index to fix the schema') const originalIndex = cmd.indexName + '_' + collectionName; const tempIndex = originalIndex + '_' + Math.round(+new Date() / 1000) console.log(`** Creating temporary index ${tempIndex}`) es.createIndex(common.db, tempIndex, aggregatedSchema.schemas[collectionName], (err) => { if (err) { console.log(err) } console.log(`** We will reindex ${originalIndex} with the current schema`) es.reIndex(common.db, originalIndex, tempIndex, (err) => { if (err) { console.log(err) } console.log('** Removing the original index') es.deleteIndex(common.db, originalIndex, (err) => { if (err) { console.log(err) } console.log('** Creating alias') es.putAlias(common.db, tempIndex, originalIndex, (err) => { waitingCounter++ }) }) }) }) } setInterval(() => { if (waitingCounter === Object.keys(aggregatedSchema.schemas).length) process.exit(0) }, 1000) }) program .command('new') .option('-i|--indexName <indexName>', 'name of the Elasticsearch index', config.elasticsearch.indices[0]) .action((cmd) => { // TODO: add parallel processing if (!cmd.indexName) { console.error('error: indexName must be specified'); process.exit(1); } console.log('** Hello! I am going to create NEW ES index') const indexName = cmd.indexName let waitingCounter = 0 for (var collectionName in aggregatedSchema.schemas) { es.createIndex(common.db, indexName + '_' + collectionName, aggregatedSchema.schemas[collectionName], (err) => { if (err) { console.log(err) } waitingCounter++ }) } setInterval(() => { if (waitingCounter === Object.keys(aggregatedSchema.schemas).length) process.exit(0) }, 1000) }) program .on('command:*', () => { console.error('Invalid command: %s\nSee --help for a list of available commands.', program.args.join(' ')); process.exit(1); }); program .parse(process.argv) process.on('unhandledRejection', (reason, p) => { console.error(`Unhandled Rejection at: Promise ${p}, reason: ${reason}`) // application specific logging, throwing an error, or other logic here }) process.on('uncaughtException', (exception) => { console.error(exception) // to see your exception details in the console // if you are on production, maybe you can send the exception details to your // email as well ? })
handler.go
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package chaincode import ( "fmt" "io" "strconv" "strings" "sync" "time" "github.com/VoneChain-CS/fabric-gm/common/channelconfig" "github.com/VoneChain-CS/fabric-gm/common/flogging" commonledger "github.com/VoneChain-CS/fabric-gm/common/ledger" "github.com/VoneChain-CS/fabric-gm/core/aclmgmt/resources" "github.com/VoneChain-CS/fabric-gm/core/common/ccprovider" "github.com/VoneChain-CS/fabric-gm/core/common/privdata" "github.com/VoneChain-CS/fabric-gm/core/common/sysccprovider" "github.com/VoneChain-CS/fabric-gm/core/container/ccintf" "github.com/VoneChain-CS/fabric-gm/core/ledger" "github.com/VoneChain-CS/fabric-gm/core/scc" "github.com/golang/protobuf/proto" pb "github.com/hyperledger/fabric-protos-go/peer" "github.com/pkg/errors" ) var chaincodeLogger = flogging.MustGetLogger("chaincode") // An ACLProvider performs access control checks when invoking // chaincode. type ACLProvider interface { CheckACL(resName string, channelID string, idinfo interface{}) error } // A Registry is responsible for tracking handlers. type Registry interface { Register(*Handler) error Ready(string) Failed(string, error) Deregister(string) error } // An Invoker invokes chaincode. type Invoker interface { Invoke(txParams *ccprovider.TransactionParams, chaincodeName string, spec *pb.ChaincodeInput) (*pb.ChaincodeMessage, error) } // TransactionRegistry tracks active transactions for each channel. type TransactionRegistry interface { Add(channelID, txID string) bool Remove(channelID, txID string) } // A ContextRegistry is responsible for managing transaction contexts. type ContextRegistry interface { Create(txParams *ccprovider.TransactionParams) (*TransactionContext, error) Get(channelID, txID string) *TransactionContext Delete(channelID, txID string) Close() } // QueryResponseBuilder is responsible for building QueryResponse messages for query // transactions initiated by chaincode. type QueryResponseBuilder interface { BuildQueryResponse(txContext *TransactionContext, iter commonledger.ResultsIterator, iterID string, isPaginated bool, totalReturnLimit int32) (*pb.QueryResponse, error) } // LedgerGetter is used to get ledgers for chaincode. type LedgerGetter interface { GetLedger(cid string) ledger.PeerLedger } // UUIDGenerator is responsible for creating unique query identifiers. type UUIDGenerator interface { New() string } type UUIDGeneratorFunc func() string func (u UUIDGeneratorFunc) New() string { return u() } // ApplicationConfigRetriever to retrieve the application configuration for a channel type ApplicationConfigRetriever interface { // GetApplicationConfig returns the channelconfig.Application for the channel // and whether the Application config exists GetApplicationConfig(cid string) (channelconfig.Application, bool) } // Handler implements the peer side of the chaincode stream. type Handler struct { // Keepalive specifies the interval at which keep-alive messages are sent. Keepalive time.Duration // TotalQueryLimit specifies the maximum number of results to return for // chaincode queries. TotalQueryLimit int // Invoker is used to invoke chaincode. Invoker Invoker // Registry is used to track active handlers. Registry Registry // ACLProvider is used to check if a chaincode invocation should be allowed. ACLProvider ACLProvider // TXContexts is a collection of TransactionContext instances // that are accessed by channel name and transaction ID. TXContexts ContextRegistry // activeTransactions holds active transaction identifiers. ActiveTransactions TransactionRegistry // BuiltinSCCs can be used to determine if a name is associated with a system chaincode BuiltinSCCs scc.BuiltinSCCs // QueryResponseBuilder is used to build query responses QueryResponseBuilder QueryResponseBuilder // LedgerGetter is used to get the ledger associated with a channel LedgerGetter LedgerGetter // DeployedCCInfoProvider is used to initialize the Collection Store DeployedCCInfoProvider ledger.DeployedChaincodeInfoProvider // UUIDGenerator is used to generate UUIDs UUIDGenerator UUIDGenerator // AppConfig is used to retrieve the application config for a channel AppConfig ApplicationConfigRetriever // Metrics holds chaincode handler metrics Metrics *HandlerMetrics // state holds the current handler state. It will be created, established, or // ready. state State // chaincodeID holds the ID of the chaincode that registered with the peer. chaincodeID string // serialLock is used to serialize sends across the grpc chat stream. serialLock sync.Mutex // chatStream is the bidirectional grpc stream used to communicate with the // chaincode instance. chatStream ccintf.ChaincodeStream // errChan is used to communicate errors from the async send to the receive loop errChan chan error // mutex is used to serialze the stream closed chan. mutex sync.Mutex // streamDoneChan is closed when the chaincode stream terminates. streamDoneChan chan struct{} } // handleMessage is called by ProcessStream to dispatch messages. func (h *Handler) handleMessage(msg *pb.ChaincodeMessage) error { chaincodeLogger.Debugf("[%s] Fabric side handling ChaincodeMessage of type: %s in state %s", shorttxid(msg.Txid), msg.Type, h.state) if msg.Type == pb.ChaincodeMessage_KEEPALIVE { return nil } switch h.state { case Created: return h.handleMessageCreatedState(msg) case Ready: return h.handleMessageReadyState(msg) default: return errors.Errorf("handle message: invalid state %s for transaction %s", h.state, msg.Txid) } } func (h *Handler) handleMessageCreatedState(msg *pb.ChaincodeMessage) error { switch msg.Type { case pb.ChaincodeMessage_REGISTER: h.HandleRegister(msg) default: return fmt.Errorf("[%s] Fabric side handler cannot handle message (%s) while in created state", msg.Txid, msg.Type) } return nil } func (h *Handler) handleMessageReadyState(msg *pb.ChaincodeMessage) error { switch msg.Type { case pb.ChaincodeMessage_COMPLETED, pb.ChaincodeMessage_ERROR: h.Notify(msg) case pb.ChaincodeMessage_PUT_STATE: go h.HandleTransaction(msg, h.HandlePutState) case pb.ChaincodeMessage_DEL_STATE: go h.HandleTransaction(msg, h.HandleDelState) case pb.ChaincodeMessage_INVOKE_CHAINCODE: go h.HandleTransaction(msg, h.HandleInvokeChaincode) case pb.ChaincodeMessage_GET_STATE: go h.HandleTransaction(msg, h.HandleGetState) case pb.ChaincodeMessage_GET_STATE_BY_RANGE: go h.HandleTransaction(msg, h.HandleGetStateByRange) case pb.ChaincodeMessage_GET_QUERY_RESULT: go h.HandleTransaction(msg, h.HandleGetQueryResult) case pb.ChaincodeMessage_GET_HISTORY_FOR_KEY: go h.HandleTransaction(msg, h.HandleGetHistoryForKey) case pb.ChaincodeMessage_QUERY_STATE_NEXT: go h.HandleTransaction(msg, h.HandleQueryStateNext) case pb.ChaincodeMessage_QUERY_STATE_CLOSE: go h.HandleTransaction(msg, h.HandleQueryStateClose) case pb.ChaincodeMessage_GET_PRIVATE_DATA_HASH: go h.HandleTransaction(msg, h.HandleGetPrivateDataHash) case pb.ChaincodeMessage_GET_STATE_METADATA: go h.HandleTransaction(msg, h.HandleGetStateMetadata) case pb.ChaincodeMessage_PUT_STATE_METADATA: go h.HandleTransaction(msg, h.HandlePutStateMetadata) default: return fmt.Errorf("[%s] Fabric side handler cannot handle message (%s) while in ready state", msg.Txid, msg.Type) } return nil } type MessageHandler interface { Handle(*pb.ChaincodeMessage, *TransactionContext) (*pb.ChaincodeMessage, error) } type handleFunc func(*pb.ChaincodeMessage, *TransactionContext) (*pb.ChaincodeMessage, error) // HandleTransaction is a middleware function that obtains and verifies a transaction // context prior to forwarding the message to the provided delegate. Response messages // returned by the delegate are sent to the chat stream. Any errors returned by the // delegate are packaged as chaincode error messages. func (h *Handler) HandleTransaction(msg *pb.ChaincodeMessage, delegate handleFunc) { chaincodeLogger.Debugf("[%s] handling %s from chaincode", shorttxid(msg.Txid), msg.Type.String()) if !h.registerTxid(msg) { return } startTime := time.Now() var txContext *TransactionContext var err error if msg.Type == pb.ChaincodeMessage_INVOKE_CHAINCODE { txContext, err = h.getTxContextForInvoke(msg.ChannelId, msg.Txid, msg.Payload, "") } else { txContext, err = h.isValidTxSim(msg.ChannelId, msg.Txid, "no ledger context") } meterLabels := []string{ "type", msg.Type.String(), "channel", msg.ChannelId, "chaincode", h.chaincodeID, } h.Metrics.ShimRequestsReceived.With(meterLabels...).Add(1) var resp *pb.ChaincodeMessage if err == nil { resp, err = delegate(msg, txContext) } if err != nil { err = errors.Wrapf(err, "%s failed: transaction ID: %s", msg.Type, msg.Txid) chaincodeLogger.Errorf("[%s] Failed to handle %s. error: %+v", shorttxid(msg.Txid), msg.Type, err) resp = &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_ERROR, Payload: []byte(err.Error()), Txid: msg.Txid, ChannelId: msg.ChannelId} } chaincodeLogger.Debugf("[%s] Completed %s. Sending %s", shorttxid(msg.Txid), msg.Type, resp.Type) h.ActiveTransactions.Remove(msg.ChannelId, msg.Txid) h.serialSendAsync(resp) meterLabels = append(meterLabels, "success", strconv.FormatBool(resp.Type != pb.ChaincodeMessage_ERROR)) h.Metrics.ShimRequestDuration.With(meterLabels...).Observe(time.Since(startTime).Seconds()) h.Metrics.ShimRequestsCompleted.With(meterLabels...).Add(1) } func shorttxid(txid string) string { if len(txid) < 8 { return txid } return txid[0:8] } // ParseName parses a chaincode name into a ChaincodeInstance. The name should // be of the form "chaincode-name:version/channel-name" with optional elements. func ParseName(ccName string) *sysccprovider.ChaincodeInstance { ci := &sysccprovider.ChaincodeInstance{} z := strings.SplitN(ccName, "/", 2) if len(z) == 2 { ci.ChannelID = z[1] } z = strings.SplitN(z[0], ":", 2) if len(z) == 2 { ci.ChaincodeVersion = z[1] } ci.ChaincodeName = z[0] return ci } // serialSend serializes msgs so gRPC will be happy func (h *Handler) serialSend(msg *pb.ChaincodeMessage) error { h.serialLock.Lock() defer h.serialLock.Unlock() if err := h.chatStream.Send(msg); err != nil { err = errors.WithMessagef(err, "[%s] error sending %s", shorttxid(msg.Txid), msg.Type) chaincodeLogger.Errorf("%+v", err) return err } return nil } // serialSendAsync serves the same purpose as serialSend (serialize msgs so gRPC will // be happy). In addition, it is also asynchronous so send-remoterecv--localrecv loop // can be nonblocking. Only errors need to be handled and these are handled by // communication on supplied error channel. A typical use will be a non-blocking or // nil channel func (h *Handler) serialSendAsync(msg *pb.ChaincodeMessage) { go func() { if err := h.serialSend(msg); err != nil { // provide an error response to the caller resp := &pb.ChaincodeMessage{ Type: pb.ChaincodeMessage_ERROR, Payload: []byte(err.Error()), Txid: msg.Txid, ChannelId: msg.ChannelId, } h.Notify(resp) // surface send error to stream processing h.errChan <- err } }() } // Check if the transactor is allow to call this chaincode on this channel func (h *Handler) checkACL(signedProp *pb.SignedProposal, proposal *pb.Proposal, ccIns *sysccprovider.ChaincodeInstance) error { // if we are here, all we know is that the invoked chaincode is either // - a system chaincode that *is* invokable through a cc2cc // (but we may still have to determine whether the invoker can perform this invocation) // - an application chaincode // (and we still need to determine whether the invoker can invoke it) if h.BuiltinSCCs.IsSysCC(ccIns.ChaincodeName) { // Allow this call return nil } // A Nil signedProp will be rejected for non-system chaincodes if signedProp == nil { return errors.Errorf("signed proposal must not be nil from caller [%s]", ccIns.String()) } return h.ACLProvider.CheckACL(resources.Peer_ChaincodeToChaincode, ccIns.ChannelID, signedProp) } func (h *Handler) deregister() { h.Registry.Deregister(h.chaincodeID) } func (h *Handler) streamDone() <-chan struct{} { h.mutex.Lock() defer h.mutex.Unlock() return h.streamDoneChan } func (h *Handler) ProcessStream(stream ccintf.ChaincodeStream) error { defer h.deregister() h.mutex.Lock() h.streamDoneChan = make(chan struct{}) h.mutex.Unlock() defer close(h.streamDoneChan) h.chatStream = stream h.errChan = make(chan error, 1) var keepaliveCh <-chan time.Time if h.Keepalive != 0 { ticker := time.NewTicker(h.Keepalive) defer ticker.Stop() keepaliveCh = ticker.C } // holds return values from gRPC Recv below type recvMsg struct { msg *pb.ChaincodeMessage err error } msgAvail := make(chan *recvMsg, 1) receiveMessage := func() { in, err := h.chatStream.Recv() msgAvail <- &recvMsg{in, err} } go receiveMessage() for { select { case rmsg := <-msgAvail: switch { // Defer the deregistering of the this handler. case rmsg.err == io.EOF: chaincodeLogger.Debugf("received EOF, ending chaincode support stream: %s", rmsg.err) return rmsg.err case rmsg.err != nil: err := errors.Wrap(rmsg.err, "receive from chaincode support stream failed") chaincodeLogger.Debugf("%+v", err) return err case rmsg.msg == nil: err := errors.New("received nil message, ending chaincode support stream") chaincodeLogger.Debugf("%+v", err) return err default: err := h.handleMessage(rmsg.msg) if err != nil { err = errors.WithMessage(err, "error handling message, ending stream") chaincodeLogger.Errorf("[%s] %+v", shorttxid(rmsg.msg.Txid), err) return err } go receiveMessage() } case sendErr := <-h.errChan: err := errors.Wrapf(sendErr, "received error while sending message, ending chaincode support stream") chaincodeLogger.Errorf("%s", err) return err case <-keepaliveCh: // if no error message from serialSend, KEEPALIVE happy, and don't care about error // (maybe it'll work later) h.serialSendAsync(&pb.ChaincodeMessage{Type: pb.ChaincodeMessage_KEEPALIVE}) continue } } } // sendReady sends READY to chaincode serially (just like REGISTER) func (h *Handler) sendReady() error { chaincodeLogger.Debugf("sending READY for chaincode %s", h.chaincodeID) ccMsg := &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_READY} // if error in sending tear down the h if err := h.serialSend(ccMsg); err != nil { chaincodeLogger.Errorf("error sending READY (%s) for chaincode %s", err, h.chaincodeID) return err } h.state = Ready chaincodeLogger.Debugf("Changed to state ready for chaincode %s", h.chaincodeID) return nil } // notifyRegistry will send ready on registration success and // update the launch state of the chaincode in the handler registry. func (h *Handler) notifyRegistry(err error) { if err == nil { err = h.sendReady() } if err != nil { h.Registry.Failed(h.chaincodeID, err) chaincodeLogger.Errorf("failed to start %s -- %s", h.chaincodeID, err) return } h.Registry.Ready(h.chaincodeID) } // handleRegister is invoked when chaincode tries to register. func (h *Handler) HandleRegister(msg *pb.ChaincodeMessage) { chaincodeLogger.Debugf("Received %s in state %s", msg.Type, h.state) chaincodeID := &pb.ChaincodeID{} err := proto.Unmarshal(msg.Payload, chaincodeID) if err != nil { chaincodeLogger.Errorf("Error in received %s, could NOT unmarshal registration info: %s", pb.ChaincodeMessage_REGISTER, err) return } // Now register with the chaincodeSupport // Note: chaincodeID.Name is actually of the form name:version for older chaincodes, and // of the form label:hash for newer chaincodes. Either way, it is the handle by which // we track the chaincode's registration. if chaincodeID.Name == "" { h.notifyRegistry(errors.New("error in handling register chaincode, chaincodeID name is empty")) return } h.chaincodeID = chaincodeID.Name err = h.Registry.Register(h) if err != nil { h.notifyRegistry(err) return } chaincodeLogger.Debugf("Got %s for chaincodeID = %s, sending back %s", pb.ChaincodeMessage_REGISTER, h.chaincodeID, pb.ChaincodeMessage_REGISTERED) if err := h.serialSend(&pb.ChaincodeMessage{Type: pb.ChaincodeMessage_REGISTERED}); err != nil { chaincodeLogger.Errorf("error sending %s: %s", pb.ChaincodeMessage_REGISTERED, err) h.notifyRegistry(err) return } h.state = Established chaincodeLogger.Debugf("Changed state to established for %s", h.chaincodeID) // for dev mode this will also move to ready automatically h.notifyRegistry(nil) } func (h *Handler) Notify(msg *pb.ChaincodeMessage) { tctx := h.TXContexts.Get(msg.ChannelId, msg.Txid) if tctx == nil { chaincodeLogger.Debugf("notifier Txid:%s, channelID:%s does not exist for handling message %s", msg.Txid, msg.ChannelId, msg.Type) return } chaincodeLogger.Debugf("[%s] notifying Txid:%s, channelID:%s", shorttxid(msg.Txid), msg.Txid, msg.ChannelId) tctx.ResponseNotifier <- msg tctx.CloseQueryIterators() } // is this a txid for which there is a valid txsim func (h *Handler) isValidTxSim(channelID string, txid string, fmtStr string, args ...interface{}) (*TransactionContext, error) { txContext := h.TXContexts.Get(channelID, txid) if txContext == nil || txContext.TXSimulator == nil { err := errors.Errorf(fmtStr, args...) chaincodeLogger.Errorf("no ledger context: %s %s\n\n %+v", channelID, txid, err) return nil, err } return txContext, nil } // register Txid to prevent overlapping handle messages from chaincode func (h *Handler) registerTxid(msg *pb.ChaincodeMessage) bool { // Check if this is the unique state request from this chaincode txid if h.ActiveTransactions.Add(msg.ChannelId, msg.Txid) { return true } // Log the issue and drop the request chaincodeLogger.Errorf("[%s] Another request pending for this CC: %s, Txid: %s, ChannelID: %s. Cannot process.", shorttxid(msg.Txid), h.chaincodeID, msg.Txid, msg.ChannelId) return false } func (h *Handler) checkMetadataCap(msg *pb.ChaincodeMessage) error { ac, exists := h.AppConfig.GetApplicationConfig(msg.ChannelId) if !exists { return errors.Errorf("application config does not exist for %s", msg.ChannelId) } if !ac.Capabilities().KeyLevelEndorsement() { return errors.New("key level endorsement is not enabled, channel application capability of V1_3 or later is required") } return nil } func errorIfCreatorHasNoReadPermission(chaincodeName, collection string, txContext *TransactionContext) error { rwPermission, err := getReadWritePermission(chaincodeName, collection, txContext) if err != nil { return err } if !rwPermission.read { return errors.Errorf("tx creator does not have read access permission on privatedata in chaincodeName:%s collectionName: %s", chaincodeName, collection) } return nil } func errorIfCreatorHasNoWritePermission(chaincodeName, collection string, txContext *TransactionContext) error { rwPermission, err := getReadWritePermission(chaincodeName, collection, txContext) if err != nil { return err } if !rwPermission.write { return errors.Errorf("tx creator does not have write access permission on privatedata in chaincodeName:%s collectionName: %s", chaincodeName, collection) } return nil } func getReadWritePermission(chaincodeName, collection string, txContext *TransactionContext) (*readWritePermission, error) { // check to see if read access has already been checked in the scope of this chaincode simulation if rwPermission := txContext.CollectionACLCache.get(collection); rwPermission != nil { return rwPermission, nil } cc := privdata.CollectionCriteria{ Channel: txContext.ChannelID, Namespace: chaincodeName, Collection: collection, } readP, writeP, err := txContext.CollectionStore.RetrieveReadWritePermission(cc, txContext.SignedProp, txContext.TXSimulator) if err != nil { return nil, err } rwPermission := &readWritePermission{read: readP, write: writeP} txContext.CollectionACLCache.put(collection, rwPermission) return rwPermission, nil } // Handles query to ledger to get state func (h *Handler) HandleGetState(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { getState := &pb.GetState{} err := proto.Unmarshal(msg.Payload, getState) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } var res []byte namespaceID := txContext.NamespaceID collection := getState.Collection chaincodeLogger.Debugf("[%s] getting state for chaincode %s, key %s, channel %s", shorttxid(msg.Txid), namespaceID, getState.Key, txContext.ChannelID) if isCollectionSet(collection) { if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } if err := errorIfCreatorHasNoReadPermission(namespaceID, collection, txContext); err != nil { return nil, err } res, err = txContext.TXSimulator.GetPrivateData(namespaceID, collection, getState.Key) } else { res, err = txContext.TXSimulator.GetState(namespaceID, getState.Key) } if err != nil { return nil, errors.WithStack(err) } if res == nil { chaincodeLogger.Debugf("[%s] No state associated with key: %s. Sending %s with an empty payload", shorttxid(msg.Txid), getState.Key, pb.ChaincodeMessage_RESPONSE) } // Send response msg back to chaincode. GetState will not trigger event return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: res, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } func (h *Handler) HandleGetPrivateDataHash(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { getState := &pb.GetState{} err := proto.Unmarshal(msg.Payload, getState) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } var res []byte namespaceID := txContext.NamespaceID collection := getState.Collection chaincodeLogger.Debugf("[%s] getting private data hash for chaincode %s, key %s, channel %s", shorttxid(msg.Txid), namespaceID, getState.Key, txContext.ChannelID) if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } res, err = txContext.TXSimulator.GetPrivateDataHash(namespaceID, collection, getState.Key) if err != nil { return nil, errors.WithStack(err) } if res == nil { chaincodeLogger.Debugf("[%s] No state associated with key: %s. Sending %s with an empty payload", shorttxid(msg.Txid), getState.Key, pb.ChaincodeMessage_RESPONSE) } // Send response msg back to chaincode. GetState will not trigger event return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: res, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } // Handles query to ledger to get state metadata func (h *Handler) HandleGetStateMetadata(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { err := h.checkMetadataCap(msg) if err != nil { return nil, err } getStateMetadata := &pb.GetStateMetadata{} err = proto.Unmarshal(msg.Payload, getStateMetadata) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } namespaceID := txContext.NamespaceID collection := getStateMetadata.Collection chaincodeLogger.Debugf("[%s] getting state metadata for chaincode %s, key %s, channel %s", shorttxid(msg.Txid), namespaceID, getStateMetadata.Key, txContext.ChannelID) var metadata map[string][]byte if isCollectionSet(collection) { if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } if err := errorIfCreatorHasNoReadPermission(namespaceID, collection, txContext); err != nil { return nil, err } metadata, err = txContext.TXSimulator.GetPrivateDataMetadata(namespaceID, collection, getStateMetadata.Key) } else { metadata, err = txContext.TXSimulator.GetStateMetadata(namespaceID, getStateMetadata.Key) } if err != nil { return nil, errors.WithStack(err) } var metadataResult pb.StateMetadataResult for metakey := range metadata { md := &pb.StateMetadata{Metakey: metakey, Value: metadata[metakey]} metadataResult.Entries = append(metadataResult.Entries, md) } res, err := proto.Marshal(&metadataResult) if err != nil { return nil, errors.WithStack(err) } // Send response msg back to chaincode. GetState will not trigger event return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: res, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } // Handles query to ledger to rage query state func (h *Handler) HandleGetStateByRange(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { getStateByRange := &pb.GetStateByRange{} err := proto.Unmarshal(msg.Payload, getStateByRange) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } metadata, err := getQueryMetadataFromBytes(getStateByRange.Metadata) if err != nil { return nil, err } totalReturnLimit := h.calculateTotalReturnLimit(metadata) iterID := h.UUIDGenerator.New() var rangeIter commonledger.ResultsIterator isPaginated := false namespaceID := txContext.NamespaceID collection := getStateByRange.Collection if isCollectionSet(collection) { if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } if err := errorIfCreatorHasNoReadPermission(namespaceID, collection, txContext); err != nil { return nil, err } rangeIter, err = txContext.TXSimulator.GetPrivateDataRangeScanIterator(namespaceID, collection, getStateByRange.StartKey, getStateByRange.EndKey) } else if isMetadataSetForPagination(metadata) { isPaginated = true startKey := getStateByRange.StartKey if isMetadataSetForPagination(metadata) { if metadata.Bookmark != "" { startKey = metadata.Bookmark } } rangeIter, err = txContext.TXSimulator.GetStateRangeScanIteratorWithPagination(namespaceID, startKey, getStateByRange.EndKey, metadata.PageSize) } else { rangeIter, err = txContext.TXSimulator.GetStateRangeScanIterator(namespaceID, getStateByRange.StartKey, getStateByRange.EndKey) } if err != nil { return nil, errors.WithStack(err) } txContext.InitializeQueryContext(iterID, rangeIter) payload, err := h.QueryResponseBuilder.BuildQueryResponse(txContext, rangeIter, iterID, isPaginated, totalReturnLimit) if err != nil { txContext.CleanupQueryContext(iterID) return nil, errors.WithStack(err) } payloadBytes, err := proto.Marshal(payload) if err != nil { txContext.CleanupQueryContext(iterID) return nil, errors.Wrap(err, "marshal failed") } chaincodeLogger.Debugf("Got keys and values. Sending %s", pb.ChaincodeMessage_RESPONSE) return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } // Handles query to ledger for query state next func (h *Handler) HandleQueryStateNext(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { queryStateNext := &pb.QueryStateNext{} err := proto.Unmarshal(msg.Payload, queryStateNext) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } queryIter := txContext.GetQueryIterator(queryStateNext.Id) if queryIter == nil { return nil, errors.New("query iterator not found") } totalReturnLimit := h.calculateTotalReturnLimit(nil) payload, err := h.QueryResponseBuilder.BuildQueryResponse(txContext, queryIter, queryStateNext.Id, false, totalReturnLimit) if err != nil { txContext.CleanupQueryContext(queryStateNext.Id) return nil, errors.WithStack(err) } payloadBytes, err := proto.Marshal(payload) if err != nil { txContext.CleanupQueryContext(queryStateNext.Id) return nil, errors.Wrap(err, "marshal failed") } return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } // Handles the closing of a state iterator func (h *Handler) HandleQueryStateClose(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { queryStateClose := &pb.QueryStateClose{} err := proto.Unmarshal(msg.Payload, queryStateClose) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } iter := txContext.GetQueryIterator(queryStateClose.Id) if iter != nil { txContext.CleanupQueryContext(queryStateClose.Id) } payload := &pb.QueryResponse{HasMore: false, Id: queryStateClose.Id} payloadBytes, err := proto.Marshal(payload) if err != nil { return nil, errors.Wrap(err, "marshal failed") } return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } // Handles query to ledger to execute query state func (h *Handler) HandleGetQueryResult(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { iterID := h.UUIDGenerator.New() getQueryResult := &pb.GetQueryResult{} err := proto.Unmarshal(msg.Payload, getQueryResult) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } metadata, err := getQueryMetadataFromBytes(getQueryResult.Metadata) if err != nil { return nil, err } totalReturnLimit := h.calculateTotalReturnLimit(metadata) isPaginated := false var executeIter commonledger.ResultsIterator namespaceID := txContext.NamespaceID collection := getQueryResult.Collection if isCollectionSet(collection) { if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } if err := errorIfCreatorHasNoReadPermission(namespaceID, collection, txContext); err != nil { return nil, err } executeIter, err = txContext.TXSimulator.ExecuteQueryOnPrivateData(namespaceID, collection, getQueryResult.Query) } else if isMetadataSetForPagination(metadata) { isPaginated = true executeIter, err = txContext.TXSimulator.ExecuteQueryWithPagination(namespaceID, getQueryResult.Query, metadata.Bookmark, metadata.PageSize) } else { executeIter, err = txContext.TXSimulator.ExecuteQuery(namespaceID, getQueryResult.Query) } if err != nil { return nil, errors.WithStack(err) } txContext.InitializeQueryContext(iterID, executeIter) payload, err := h.QueryResponseBuilder.BuildQueryResponse(txContext, executeIter, iterID, isPaginated, totalReturnLimit) if err != nil { txContext.CleanupQueryContext(iterID) return nil, errors.WithStack(err) } payloadBytes, err := proto.Marshal(payload) if err != nil { txContext.CleanupQueryContext(iterID) return nil, errors.Wrap(err, "marshal failed") } chaincodeLogger.Debugf("Got keys and values. Sending %s", pb.ChaincodeMessage_RESPONSE) return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } // Handles query to ledger history db func (h *Handler) HandleGetHistoryForKey(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { if txContext.HistoryQueryExecutor == nil { return nil, errors.New("history database is not enabled") } iterID := h.UUIDGenerator.New() namespaceID := txContext.NamespaceID getHistoryForKey := &pb.GetHistoryForKey{} err := proto.Unmarshal(msg.Payload, getHistoryForKey) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } historyIter, err := txContext.HistoryQueryExecutor.GetHistoryForKey(namespaceID, getHistoryForKey.Key) if err != nil { return nil, errors.WithStack(err) } totalReturnLimit := h.calculateTotalReturnLimit(nil) txContext.InitializeQueryContext(iterID, historyIter) payload, err := h.QueryResponseBuilder.BuildQueryResponse(txContext, historyIter, iterID, false, totalReturnLimit) if err != nil { txContext.CleanupQueryContext(iterID) return nil, errors.WithStack(err) } payloadBytes, err := proto.Marshal(payload) if err != nil { txContext.CleanupQueryContext(iterID) return nil, errors.Wrap(err, "marshal failed") } chaincodeLogger.Debugf("Got keys and values. Sending %s", pb.ChaincodeMessage_RESPONSE) return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: payloadBytes, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } func isCollectionSet(collection string) bool { return collection != "" } func isMetadataSetForPagination(metadata *pb.QueryMetadata) bool
func getQueryMetadataFromBytes(metadataBytes []byte) (*pb.QueryMetadata, error) { if metadataBytes != nil { metadata := &pb.QueryMetadata{} err := proto.Unmarshal(metadataBytes, metadata) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } return metadata, nil } return nil, nil } func (h *Handler) calculateTotalReturnLimit(metadata *pb.QueryMetadata) int32 { totalReturnLimit := int32(h.TotalQueryLimit) if metadata != nil { pageSize := int32(metadata.PageSize) if pageSize > 0 && pageSize < totalReturnLimit { totalReturnLimit = pageSize } } return totalReturnLimit } func (h *Handler) getTxContextForInvoke(channelID string, txid string, payload []byte, format string, args ...interface{}) (*TransactionContext, error) { // if we have a channelID, just get the txsim from isValidTxSim if channelID != "" { return h.isValidTxSim(channelID, txid, "could not get valid transaction") } chaincodeSpec := &pb.ChaincodeSpec{} err := proto.Unmarshal(payload, chaincodeSpec) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } // Get the chaincodeID to invoke. The chaincodeID to be called may // contain composite info like "chaincode-name:version/channel-name" // We are not using version now but default to the latest targetInstance := ParseName(chaincodeSpec.ChaincodeId.Name) // If targetInstance is not an SCC, isValidTxSim should be called which will return an err. // We do not want to propagate calls to user CCs when the original call was to a SCC // without a channel context (ie, no ledger context). if !h.BuiltinSCCs.IsSysCC(targetInstance.ChaincodeName) { // normal path - UCC invocation with an empty ("") channel: isValidTxSim will return an error return h.isValidTxSim("", txid, "could not get valid transaction") } // Calling SCC without a ChannelID, then the assumption this is an external SCC called by the client (special case) and no UCC involved, // so no Transaction Simulator validation needed as there are no commits to the ledger, get the txContext directly if it is not nil txContext := h.TXContexts.Get(channelID, txid) if txContext == nil { return nil, errors.New("failed to get transaction context") } return txContext, nil } func (h *Handler) HandlePutState(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { putState := &pb.PutState{} err := proto.Unmarshal(msg.Payload, putState) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } namespaceID := txContext.NamespaceID collection := putState.Collection if isCollectionSet(collection) { if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } if err := errorIfCreatorHasNoWritePermission(namespaceID, collection, txContext); err != nil { return nil, err } err = txContext.TXSimulator.SetPrivateData(namespaceID, collection, putState.Key, putState.Value) } else { err = txContext.TXSimulator.SetState(namespaceID, putState.Key, putState.Value) } if err != nil { return nil, errors.WithStack(err) } return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } func (h *Handler) HandlePutStateMetadata(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { err := h.checkMetadataCap(msg) if err != nil { return nil, err } putStateMetadata := &pb.PutStateMetadata{} err = proto.Unmarshal(msg.Payload, putStateMetadata) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } metadata := make(map[string][]byte) metadata[putStateMetadata.Metadata.Metakey] = putStateMetadata.Metadata.Value namespaceID := txContext.NamespaceID collection := putStateMetadata.Collection if isCollectionSet(collection) { if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } if err := errorIfCreatorHasNoWritePermission(namespaceID, collection, txContext); err != nil { return nil, err } err = txContext.TXSimulator.SetPrivateDataMetadata(namespaceID, collection, putStateMetadata.Key, metadata) } else { err = txContext.TXSimulator.SetStateMetadata(namespaceID, putStateMetadata.Key, metadata) } if err != nil { return nil, errors.WithStack(err) } return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } func (h *Handler) HandleDelState(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { delState := &pb.DelState{} err := proto.Unmarshal(msg.Payload, delState) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } namespaceID := txContext.NamespaceID collection := delState.Collection if isCollectionSet(collection) { if txContext.IsInitTransaction { return nil, errors.New("private data APIs are not allowed in chaincode Init()") } if err := errorIfCreatorHasNoWritePermission(namespaceID, collection, txContext); err != nil { return nil, err } err = txContext.TXSimulator.DeletePrivateData(namespaceID, collection, delState.Key) } else { err = txContext.TXSimulator.DeleteState(namespaceID, delState.Key) } if err != nil { return nil, errors.WithStack(err) } // Send response msg back to chaincode. return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } // Handles requests that modify ledger state func (h *Handler) HandleInvokeChaincode(msg *pb.ChaincodeMessage, txContext *TransactionContext) (*pb.ChaincodeMessage, error) { chaincodeLogger.Debugf("[%s] C-call-C", shorttxid(msg.Txid)) chaincodeSpec := &pb.ChaincodeSpec{} err := proto.Unmarshal(msg.Payload, chaincodeSpec) if err != nil { return nil, errors.Wrap(err, "unmarshal failed") } // Get the chaincodeID to invoke. The chaincodeID to be called may // contain composite info like "chaincode-name:version/channel-name". // We are not using version now but default to the latest. targetInstance := ParseName(chaincodeSpec.ChaincodeId.Name) chaincodeSpec.ChaincodeId.Name = targetInstance.ChaincodeName if targetInstance.ChannelID == "" { // use caller's channel as the called chaincode is in the same channel targetInstance.ChannelID = txContext.ChannelID } chaincodeLogger.Debugf("[%s] C-call-C %s on channel %s", shorttxid(msg.Txid), targetInstance.ChaincodeName, targetInstance.ChannelID) err = h.checkACL(txContext.SignedProp, txContext.Proposal, targetInstance) if err != nil { chaincodeLogger.Errorf( "[%s] C-call-C %s on channel %s failed check ACL [%v]: [%s]", shorttxid(msg.Txid), targetInstance.ChaincodeName, targetInstance.ChannelID, txContext.SignedProp, err, ) return nil, errors.WithStack(err) } // Set up a new context for the called chaincode if on a different channel // We grab the called channel's ledger simulator to hold the new state txParams := &ccprovider.TransactionParams{ TxID: msg.Txid, ChannelID: targetInstance.ChannelID, SignedProp: txContext.SignedProp, Proposal: txContext.Proposal, TXSimulator: txContext.TXSimulator, HistoryQueryExecutor: txContext.HistoryQueryExecutor, } if targetInstance.ChannelID != txContext.ChannelID { lgr := h.LedgerGetter.GetLedger(targetInstance.ChannelID) if lgr == nil { return nil, errors.Errorf("failed to find ledger for channel: %s", targetInstance.ChannelID) } sim, err := lgr.NewTxSimulator(msg.Txid) if err != nil { return nil, errors.WithStack(err) } defer sim.Done() hqe, err := lgr.NewHistoryQueryExecutor() if err != nil { return nil, errors.WithStack(err) } txParams.TXSimulator = sim txParams.HistoryQueryExecutor = hqe } // Execute the chaincode... this CANNOT be an init at least for now responseMessage, err := h.Invoker.Invoke(txParams, targetInstance.ChaincodeName, chaincodeSpec.Input) if err != nil { return nil, errors.Wrap(err, "execute failed") } // payload is marshalled and sent to the calling chaincode's shim which unmarshals and // sends it to chaincode res, err := proto.Marshal(responseMessage) if err != nil { return nil, errors.Wrap(err, "marshal failed") } return &pb.ChaincodeMessage{Type: pb.ChaincodeMessage_RESPONSE, Payload: res, Txid: msg.Txid, ChannelId: msg.ChannelId}, nil } func (h *Handler) Execute(txParams *ccprovider.TransactionParams, namespace string, msg *pb.ChaincodeMessage, timeout time.Duration) (*pb.ChaincodeMessage, error) { chaincodeLogger.Debugf("Entry") defer chaincodeLogger.Debugf("Exit") txParams.CollectionStore = h.getCollectionStore(msg.ChannelId) txParams.IsInitTransaction = (msg.Type == pb.ChaincodeMessage_INIT) txParams.NamespaceID = namespace txctx, err := h.TXContexts.Create(txParams) if err != nil { return nil, err } defer h.TXContexts.Delete(msg.ChannelId, msg.Txid) if err := h.setChaincodeProposal(txParams.SignedProp, txParams.Proposal, msg); err != nil { return nil, err } h.serialSendAsync(msg) var ccresp *pb.ChaincodeMessage select { case ccresp = <-txctx.ResponseNotifier: // response is sent to user or calling chaincode. ChaincodeMessage_ERROR // are typically treated as error case <-time.After(timeout): err = errors.New("timeout expired while executing transaction") h.Metrics.ExecuteTimeouts.With("chaincode", h.chaincodeID).Add(1) case <-h.streamDone(): err = errors.New("chaincode stream terminated") } return ccresp, err } func (h *Handler) setChaincodeProposal(signedProp *pb.SignedProposal, prop *pb.Proposal, msg *pb.ChaincodeMessage) error { if prop != nil && signedProp == nil { return errors.New("failed getting proposal context. Signed proposal is nil") } // TODO: This doesn't make a lot of sense. Feels like both are required or // neither should be set. Check with a knowledgeable expert. if prop != nil { msg.Proposal = signedProp } return nil } func (h *Handler) getCollectionStore(channelID string) privdata.CollectionStore { return privdata.NewSimpleCollectionStore( h.LedgerGetter.GetLedger(channelID), h.DeployedCCInfoProvider, ) } func (h *Handler) State() State { return h.state } func (h *Handler) Close() { h.TXContexts.Close() } type State int const ( Created State = iota Established Ready ) func (s State) String() string { switch s { case Created: return "created" case Established: return "established" case Ready: return "ready" default: return "UNKNOWN" } }
{ if metadata == nil { return false } if metadata.PageSize == 0 && metadata.Bookmark == "" { return false } return true }
index.ts
export { default as PaymentAddStackScreen } from "./PaymentAdd";
export { default as PaymentSelectStackScreen } from "./PaymentSelect";
test_transaction.rs
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0. use futures::executor::block_on; use grpcio::{ChannelBuilder, Environment}; use kvproto::kvrpcpb::{ self as pb, ApiVersion, AssertionLevel, Context, Op, PessimisticLockRequest, PrewriteRequest, }; use kvproto::tikvpb::TikvClient; use raftstore::store::util::new_peer; use std::sync::Arc; use std::{sync::mpsc::channel, thread, time::Duration}; use storage::mvcc::tests::must_get; use storage::mvcc::{self, tests::must_locked}; use storage::txn::{self, commands}; use test_raftstore::new_server_cluster; use tikv::storage::kv::SnapshotExt; use tikv::storage::txn::tests::{ must_acquire_pessimistic_lock, must_commit, must_pessimistic_prewrite_put, must_pessimistic_prewrite_put_err, must_prewrite_put, must_prewrite_put_err, }; use tikv::storage::{ self, lock_manager::DummyLockManager, Snapshot, TestEngineBuilder, TestStorageBuilder, }; use tikv_util::HandyRwLock; use txn_types::{Key, Mutation, PessimisticLock, TimeStamp}; #[test] fn test_txn_failpoints() { let engine = TestEngineBuilder::new().build().unwrap(); let (k, v) = (b"k", b"v"); fail::cfg("prewrite", "return(WriteConflict)").unwrap(); must_prewrite_put_err(&engine, k, v, k, 10); fail::remove("prewrite"); must_prewrite_put(&engine, k, v, k, 10); fail::cfg("commit", "delay(100)").unwrap(); must_commit(&engine, k, 10, 20); fail::remove("commit"); let v1 = b"v1"; let (k2, v2) = (b"k2", b"v2"); must_acquire_pessimistic_lock(&engine, k, k, 30, 30); fail::cfg("pessimistic_prewrite", "return()").unwrap(); must_pessimistic_prewrite_put_err(&engine, k, v1, k, 30, 30, true); must_prewrite_put(&engine, k2, v2, k2, 31); fail::remove("pessimistic_prewrite"); must_pessimistic_prewrite_put(&engine, k, v1, k, 30, 30, true); must_commit(&engine, k, 30, 40); must_commit(&engine, k2, 31, 41); must_get(&engine, k, 50, v1); must_get(&engine, k2, 50, v2); } #[test] fn test_atomic_getting_max_ts_and_storing_memory_lock() { let engine = TestEngineBuilder::new().build().unwrap(); let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr( engine,
.unwrap(); let (prewrite_tx, prewrite_rx) = channel(); // sleep a while between getting max ts and store the lock in memory fail::cfg("before-set-lock-in-memory", "sleep(500)").unwrap(); storage .sched_txn_command( commands::Prewrite::new( vec![Mutation::make_put(Key::from_raw(b"k"), b"v".to_vec())], b"k".to_vec(), 40.into(), 20000, false, 1, TimeStamp::default(), TimeStamp::default(), Some(vec![]), false, AssertionLevel::Off, Context::default(), ), Box::new(move |res| { prewrite_tx.send(res).unwrap(); }), ) .unwrap(); // sleep a while so prewrite gets max ts before get is triggered thread::sleep(Duration::from_millis(200)); match block_on(storage.get(Context::default(), Key::from_raw(b"k"), 100.into())) { // In this case, min_commit_ts is smaller than the start ts, but the lock is visible // to the get. Err(storage::Error(box storage::ErrorInner::Txn(txn::Error( box txn::ErrorInner::Mvcc(mvcc::Error(box mvcc::ErrorInner::KeyIsLocked(lock))), )))) => { assert_eq!(lock.get_min_commit_ts(), 41); } res => panic!("unexpected result: {:?}", res), } let res = prewrite_rx.recv().unwrap().unwrap(); assert_eq!(res.min_commit_ts, 41.into()); } #[test] fn test_snapshot_must_be_later_than_updating_max_ts() { let engine = TestEngineBuilder::new().build().unwrap(); let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr( engine, DummyLockManager {}, ApiVersion::V1, ) .build() .unwrap(); // Suppose snapshot was before updating max_ts, after sleeping for 500ms the following prewrite should complete. fail::cfg("after-snapshot", "sleep(500)").unwrap(); let read_ts = 20.into(); let get_fut = storage.get(Context::default(), Key::from_raw(b"j"), read_ts); thread::sleep(Duration::from_millis(100)); fail::remove("after-snapshot"); let (prewrite_tx, prewrite_rx) = channel(); storage .sched_txn_command( commands::Prewrite::new( vec![Mutation::make_put(Key::from_raw(b"j"), b"v".to_vec())], b"j".to_vec(), 10.into(), 20000, false, 1, TimeStamp::default(), TimeStamp::default(), Some(vec![]), false, AssertionLevel::Off, Context::default(), ), Box::new(move |res| { prewrite_tx.send(res).unwrap(); }), ) .unwrap(); let has_lock = block_on(get_fut).is_err(); let res = prewrite_rx.recv().unwrap().unwrap(); // We must make sure either the lock is visible to the reader or min_commit_ts > read_ts. assert!(res.min_commit_ts > read_ts || has_lock); } #[test] fn test_update_max_ts_before_scan_memory_locks() { let engine = TestEngineBuilder::new().build().unwrap(); let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr( engine, DummyLockManager {}, ApiVersion::V1, ) .build() .unwrap(); fail::cfg("before-storage-check-memory-locks", "sleep(500)").unwrap(); let get_fut = storage.get(Context::default(), Key::from_raw(b"k"), 100.into()); thread::sleep(Duration::from_millis(200)); let (prewrite_tx, prewrite_rx) = channel(); storage .sched_txn_command( commands::Prewrite::new( vec![Mutation::make_put(Key::from_raw(b"k"), b"v".to_vec())], b"k".to_vec(), 10.into(), 20000, false, 1, TimeStamp::default(), TimeStamp::default(), Some(vec![]), false, AssertionLevel::Off, Context::default(), ), Box::new(move |res| { prewrite_tx.send(res).unwrap(); }), ) .unwrap(); // The prewritten lock is not seen by the reader assert_eq!(block_on(get_fut).unwrap().0, None); // But we make sure in this case min_commit_ts is greater than start_ts. let res = prewrite_rx.recv().unwrap().unwrap(); assert_eq!(res.min_commit_ts, 101.into()); } /// Generates a test that checks the correct behavior of holding and dropping locks, /// during the process of a single prewrite command. macro_rules! lock_release_test { ($test_name:ident, $lock_exists:ident, $before_actions:expr, $middle_actions:expr, $after_actions:expr, $should_succeed:expr) => { #[test] fn $test_name() { let engine = TestEngineBuilder::new().build().unwrap(); let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr( engine, DummyLockManager {}, ApiVersion::V1, ) .build() .unwrap(); let key = Key::from_raw(b"k"); let cm = storage.get_concurrency_manager(); let $lock_exists = || cm.read_key_check(&key, |_| Err(())).is_err(); $before_actions; let (prewrite_tx, prewrite_rx) = channel(); storage .sched_txn_command( commands::Prewrite::new( vec![Mutation::make_put(key.clone(), b"v".to_vec())], b"k".to_vec(), 10.into(), 20000, false, 1, TimeStamp::default(), TimeStamp::default(), Some(vec![]), false, AssertionLevel::Off, Context::default(), ), Box::new(move |res| { prewrite_tx.send(res).unwrap(); }), ) .unwrap(); $middle_actions; let res = prewrite_rx.recv(); assert_eq!(res.unwrap().is_ok(), $should_succeed); $after_actions; } }; } // Must release lock after prewrite fails. lock_release_test!( test_lock_lifetime_on_prewrite_failure, lock_exists, { fail::cfg( "rockskv_async_write", "return(Err(KvError::from(KvErrorInner::EmptyRequest)))", ) .unwrap(); assert!(!lock_exists()); }, {}, assert!(!lock_exists()), false ); // Must hold lock until prewrite ends. Must release lock after prewrite succeeds. lock_release_test!( test_lock_lifetime_on_prewrite_success, lock_exists, { fail::cfg("rockskv_async_write", "sleep(500)").unwrap(); assert!(!lock_exists()); }, { thread::sleep(Duration::from_millis(200)); assert!(lock_exists()); }, assert!(!lock_exists()), true ); #[test] fn test_max_commit_ts_error() { let engine = TestEngineBuilder::new().build().unwrap(); let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr( engine, DummyLockManager {}, ApiVersion::V1, ) .build() .unwrap(); let cm = storage.get_concurrency_manager(); fail::cfg("after_prewrite_one_key", "sleep(500)").unwrap(); let (prewrite_tx, prewrite_rx) = channel(); storage .sched_txn_command( commands::Prewrite::new( vec![ Mutation::make_put(Key::from_raw(b"k1"), b"v".to_vec()), Mutation::make_put(Key::from_raw(b"k2"), b"v".to_vec()), ], b"k1".to_vec(), 10.into(), 20000, false, 2, TimeStamp::default(), 100.into(), Some(vec![b"k2".to_vec()]), false, AssertionLevel::Off, Context::default(), ), Box::new(move |res| { prewrite_tx.send(res).unwrap(); }), ) .unwrap(); thread::sleep(Duration::from_millis(200)); assert!( cm.read_key_check(&Key::from_raw(b"k1"), |_| Err(())) .is_err() ); cm.update_max_ts(200.into()); let res = prewrite_rx.recv().unwrap().unwrap(); assert!(res.min_commit_ts.is_zero()); assert!(res.one_pc_commit_ts.is_zero()); // There should not be any memory lock left. assert!(cm.read_range_check(None, None, |_, _| Err(())).is_ok()); // Two locks should be written, the second one does not async commit. let l1 = must_locked(&storage.get_engine(), b"k1", 10); let l2 = must_locked(&storage.get_engine(), b"k2", 10); assert!(l1.use_async_commit); assert!(!l2.use_async_commit); } #[test] fn test_exceed_max_commit_ts_in_the_middle_of_prewrite() { let engine = TestEngineBuilder::new().build().unwrap(); let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr( engine, DummyLockManager {}, ApiVersion::V1, ) .build() .unwrap(); let cm = storage.get_concurrency_manager(); let (prewrite_tx, prewrite_rx) = channel(); // Pause between getting max ts and store the lock in memory fail::cfg("before-set-lock-in-memory", "pause").unwrap(); cm.update_max_ts(40.into()); let mutations = vec![ Mutation::make_put(Key::from_raw(b"k1"), b"v".to_vec()), Mutation::make_put(Key::from_raw(b"k2"), b"v".to_vec()), ]; storage .sched_txn_command( commands::Prewrite::new( mutations.clone(), b"k1".to_vec(), 10.into(), 20000, false, 2, 11.into(), 50.into(), Some(vec![]), false, AssertionLevel::Off, Context::default(), ), Box::new(move |res| { prewrite_tx.send(res).unwrap(); }), ) .unwrap(); // sleep a while so the first key gets max ts. thread::sleep(Duration::from_millis(200)); cm.update_max_ts(51.into()); fail::remove("before-set-lock-in-memory"); let res = prewrite_rx.recv().unwrap().unwrap(); assert!(res.min_commit_ts.is_zero()); assert!(res.one_pc_commit_ts.is_zero()); let locks = block_on(storage.scan_lock( Context::default(), 20.into(), Some(Key::from_raw(b"k1")), None, 2, )) .unwrap(); assert_eq!(locks.len(), 2); assert_eq!(locks[0].get_key(), b"k1"); assert!(locks[0].get_use_async_commit()); assert_eq!(locks[0].get_min_commit_ts(), 41); assert_eq!(locks[1].get_key(), b"k2"); assert!(!locks[1].get_use_async_commit()); // Send a duplicated request to test the idempotency of prewrite when falling back to 2PC. let (prewrite_tx, prewrite_rx) = channel(); storage .sched_txn_command( commands::Prewrite::new( mutations, b"k1".to_vec(), 10.into(), 20000, false, 2, 11.into(), 50.into(), Some(vec![]), false, AssertionLevel::Off, Context::default(), ), Box::new(move |res| { prewrite_tx.send(res).unwrap(); }), ) .unwrap(); let res = prewrite_rx.recv().unwrap().unwrap(); assert!(res.min_commit_ts.is_zero()); assert!(res.one_pc_commit_ts.is_zero()); } #[test] fn test_pessimistic_lock_check_epoch() { let mut cluster = new_server_cluster(0, 2); cluster.cfg.pessimistic_txn.pipelined = true; cluster.cfg.pessimistic_txn.in_memory = true; cluster.run(); cluster.must_transfer_leader(1, new_peer(1, 1)); let region = cluster.get_region(b""); let leader = region.get_peers()[0].clone(); let epoch = cluster.get_region_epoch(region.id); let mut ctx = Context::default(); ctx.set_region_id(region.id); ctx.set_peer(leader.clone()); ctx.set_region_epoch(epoch); fail::cfg("acquire_pessimistic_lock", "pause").unwrap(); let env = Arc::new(Environment::new(1)); let channel = ChannelBuilder::new(env).connect(&cluster.sim.rl().get_addr(leader.get_store_id())); let client = TikvClient::new(channel); let mut ctx = Context::default(); ctx.set_region_id(region.get_id()); ctx.set_region_epoch(region.get_region_epoch().clone()); ctx.set_peer(leader); let mut mutation = pb::Mutation::default(); mutation.set_op(Op::PessimisticLock); mutation.key = b"key".to_vec(); let mut req = PessimisticLockRequest::default(); req.set_context(ctx.clone()); req.set_mutations(vec![mutation].into()); req.set_start_version(10); req.set_for_update_ts(10); req.set_primary_lock(b"key".to_vec()); let lock_resp = thread::spawn(move || client.kv_pessimistic_lock(&req).unwrap()); thread::sleep(Duration::from_millis(300)); // Transfer leader out and back, so the term should have changed. cluster.must_transfer_leader(1, new_peer(2, 2)); cluster.must_transfer_leader(1, new_peer(1, 1)); fail::remove("acquire_pessimistic_lock"); let resp = lock_resp.join().unwrap(); // Region leader changes, so we should get a StaleCommand error. assert!(resp.get_region_error().has_stale_command()); } #[test] fn test_pessimistic_lock_check_valid() { let mut cluster = new_server_cluster(0, 1); cluster.cfg.pessimistic_txn.pipelined = true; cluster.cfg.pessimistic_txn.in_memory = true; cluster.run(); cluster.must_transfer_leader(1, new_peer(1, 1)); let txn_ext = cluster .must_get_snapshot_of_region(1) .ext() .get_txn_ext() .unwrap() .clone(); let region = cluster.get_region(b""); let leader = region.get_peers()[0].clone(); fail::cfg("acquire_pessimistic_lock", "pause").unwrap(); let env = Arc::new(Environment::new(1)); let channel = ChannelBuilder::new(env).connect(&cluster.sim.rl().get_addr(leader.get_store_id())); let client = TikvClient::new(channel); let mut ctx = Context::default(); ctx.set_region_id(region.get_id()); ctx.set_region_epoch(region.get_region_epoch().clone()); ctx.set_peer(leader); let mut mutation = pb::Mutation::default(); mutation.set_op(Op::PessimisticLock); mutation.key = b"key".to_vec(); let mut req = PessimisticLockRequest::default(); req.set_context(ctx.clone()); req.set_mutations(vec![mutation].into()); req.set_start_version(10); req.set_for_update_ts(10); req.set_primary_lock(b"key".to_vec()); let lock_resp = thread::spawn(move || client.kv_pessimistic_lock(&req).unwrap()); thread::sleep(Duration::from_millis(300)); // Set `is_valid` to false, but the region remains available to serve. txn_ext.pessimistic_locks.write().is_valid = false; fail::remove("acquire_pessimistic_lock"); let resp = lock_resp.join().unwrap(); // There should be no region error. assert!(!resp.has_region_error()); // The lock should not be written to the in-memory pessimistic lock table. assert!(txn_ext.pessimistic_locks.read().is_empty()); } #[test] fn test_concurrent_write_after_transfer_leader_invalidates_locks() { let mut cluster = new_server_cluster(0, 1); cluster.cfg.pessimistic_txn.pipelined = true; cluster.cfg.pessimistic_txn.in_memory = true; cluster.run(); cluster.must_transfer_leader(1, new_peer(1, 1)); let txn_ext = cluster .must_get_snapshot_of_region(1) .ext() .get_txn_ext() .unwrap() .clone(); let lock = PessimisticLock { primary: b"key".to_vec().into_boxed_slice(), start_ts: 10.into(), ttl: 3000, for_update_ts: 20.into(), min_commit_ts: 30.into(), }; assert!( txn_ext .pessimistic_locks .write() .insert(vec![(Key::from_raw(b"key"), lock.clone())]) .is_ok() ); let region = cluster.get_region(b""); let leader = region.get_peers()[0].clone(); fail::cfg("invalidate_locks_before_transfer_leader", "pause").unwrap(); let env = Arc::new(Environment::new(1)); let channel = ChannelBuilder::new(env).connect(&cluster.sim.rl().get_addr(leader.get_store_id())); let client = TikvClient::new(channel); let mut ctx = Context::default(); ctx.set_region_id(region.get_id()); ctx.set_region_epoch(region.get_region_epoch().clone()); ctx.set_peer(leader); let mut mutation = pb::Mutation::default(); mutation.set_op(Op::Put); mutation.key = b"key".to_vec(); let mut req = PrewriteRequest::default(); req.set_context(ctx); req.set_mutations(vec![mutation].into()); // Set a different start_ts. It should fail because the memory lock is still visible. req.set_start_version(20); req.set_primary_lock(b"key".to_vec()); // Prewrite should not be blocked because we have downgrade the write lock // to a read lock, and it should return a locked error because it encounters // the memory lock. let resp = client.kv_prewrite(&req).unwrap(); assert_eq!( resp.get_errors()[0].get_locked(), &lock.into_lock().into_lock_info(b"key".to_vec()) ); }
DummyLockManager {}, ApiVersion::V1, ) .build()
node_free.py
from client_database_connection import mycursor import os sql = "INSERT INTO free_node (node_id) VALUES (%s)"
mycursor.execute(sql, val) command = 'python get_code_when_free.py' os.system(command)
val = (node_id)
sqlite.go
package infra import ( "database/sql" "path" _ "github.com/mattn/go-sqlite3" "github.com/yuki-eto/5ch-slack-bot/config" ) var dbMap map[string]*sql.DB func init()
func NewSqliteDB(dbName string) (*sql.DB, error) { if _, e := dbMap[dbName]; e { return dbMap[dbName], nil } cfg := config.GetEnvConfig() dbFilePath := path.Join(cfg.DatabasePath, dbName + ".db") db, err := sql.Open("sqlite3", dbFilePath) if err != nil { return nil, err } dbMap[dbName] = db return db, nil }
{ dbMap = map[string]*sql.DB{} }
SavedCode.js
// This is the code that will be loaded into the editor! // You can create a standardised template for answers here // Question 1: Say hi! function
() { // Your code here! } helloWorld(); dfbdbf
helloWorld
Factorial - FCTRL.py
def
(): from sys import stdin, stdout rl = stdin.readline pl = stdout.write int1 = int str1 = str xr = range sum1 = sum arr = [5] for k in xr(1, 13): arr[k] = arr[k - 1] * 5 for _ in xr(int1(rl())): n = int1(rl()) c = sum1(n / i for i in arr) pl(str1(c) + "\n") main()
main
home.component.ts
import { Component, OnInit, Sanitizer } from '@angular/core'; import { ApiService } from '../shared/api.service'; @Component({ selector: 'my-home', templateUrl: './home.component.html', styleUrls: ['./home.component.scss'] }) export class
implements OnInit { basicRowHeight = 80; fitListHeight = '400px'; ratioGutter = 1; persons: any; prefix: string ='data:image/png;base64,'; recognizedName: string = ''; constructor(private apiService: ApiService, private sanitizer: Sanitizer) { this.apiService.getPictures().then(data => { for (let i = 0; i < data.persons.length; i++ ) { data.persons[i].images = data.persons[i].images.slice(1); } this.persons = data.persons; }); } getPrefixed(data: string) { return this.prefix + data; } onImageClick(filePath: string) { this.apiService.makeRecognition(filePath).then(data => this.recognizedName = data.person); } ngOnInit() { console.log('Hello Home'); } }
HomeComponent
main.ts
import axios from "axios" import { connect, ErrorCallback, HttpClient, MqttClient, unpublishRecursively } from "@artcom/mqtt-topping" import { createLogger, Winston } from "@artcom/logger" import { BootstrapData, InitData, Options, QueryConfig, QueryParams } from "./types" export = async function init( url: string, serviceId: string, { timeout = 2000, retryDelay = 10000, debugBootstrapData = null, onParseError = null } : Options = {} ): Promise<InitData> { const logger = createLogger() const data = await retrieveBootstrapData(url, timeout, retryDelay, logger, debugBootstrapData) return { logger, data, mqttClient: connectMqttClient(serviceId, data, onParseError, logger), httpClient: new HttpClient(data.httpBrokerUri), queryConfig: createQueryConfig(data.configServerUri), unpublishRecursively } }
async function retrieveBootstrapData( url: string, timeout: number, retryDelay: number, logger: Winston.Logger, debugBootstrapData: BootstrapData ) : Promise<BootstrapData> { if (debugBootstrapData) { logger.info("Using debug bootstrap data", { ...debugBootstrapData }) return debugBootstrapData } else { logger.info("Querying bootstrap data", { url }) while (true) { // eslint-disable-line no-constant-condition try { const { data } = await axios.get(url, { timeout }) logger.info("Bootstrap data received", { ...data }) return data } catch (error) { logger.error(`Query failed. Retrying in ${retryDelay}ms...`, { error: error.message }) await delay(retryDelay) } } } } function delay(time: number) { return new Promise(resolve => setTimeout(resolve, time)) } function connectMqttClient( serviceId: string, { device, tcpBrokerUri }: BootstrapData, onParseError: ErrorCallback, logger: Winston.Logger ): MqttClient { const clientId = createClientId(serviceId, device) logger.info("Connecting to Broker", { tcpBrokerUri, clientId }) const mqttClient = connect(tcpBrokerUri, { clientId, onParseError }) mqttClient.on("connect", () => { logger.info("Connected to Broker") }) mqttClient.on("close", () => { logger.error("Disconnected from Broker") }) mqttClient.on("error", () => { logger.error("Error Connecting to Broker") }) return mqttClient } function createClientId(serviceId: string, device: string) { const uuid = Math.random().toString(16).substr(2, 8) return `${serviceId}-${device}-${uuid}` } function createQueryConfig(configServerUri: string) : QueryConfig { return async (configPath: string, params: QueryParams = {}) => { const { version = "master", listFiles = false, includeCommitHash = false, parseJSON = true } = params const query: any = { url: `${configServerUri}/${version}/${configPath}?listFiles=${listFiles}`, transformResponse: parseJSON ? undefined : [] } return axios(query) .then(response => includeCommitHash ? { data: response.data, commitHash: response.headers["git-commit-hash"] } : response.data) } }
listen.rs
fn main() -> ssam::Result<()> { let mut device = ssam::connect()?; // We assume that battery events (category 0x02) are already enabled... // If not, they should be enabled via device.event_enable(...) here. device.notifier_register(0x02, 0)?;
} Ok(()) }
for event in device.events()? { println!("{:?}", event?);
api_op_ResetDBParameterGroup.go
// Code generated by smithy-go-codegen DO NOT EDIT. package neptune import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/neptune/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Modifies the parameters of a DB parameter group to the engine/system default // value. To reset specific parameters, provide a list of the following: // ParameterName and ApplyMethod. To reset the entire DB parameter group, specify // the DBParameterGroup name and ResetAllParameters parameters. When resetting the // entire group, dynamic parameters are updated immediately and static parameters // are set to pending-reboot to take effect on the next DB instance restart or // RebootDBInstance request. func (c *Client) ResetDBParameterGroup(ctx context.Context, params *ResetDBParameterGroupInput, optFns ...func(*Options)) (*ResetDBParameterGroupOutput, error) { if params == nil { params = &ResetDBParameterGroupInput{} } result, metadata, err := c.invokeOperation(ctx, "ResetDBParameterGroup", params, optFns, addOperationResetDBParameterGroupMiddlewares) if err != nil { return nil, err } out := result.(*ResetDBParameterGroupOutput) out.ResultMetadata = metadata return out, nil } type ResetDBParameterGroupInput struct { // The name of the DB parameter group. Constraints: // // * Must match the name of an // existing DBParameterGroup. // // This member is required. DBParameterGroupName *string // To reset the entire DB parameter group, specify the DBParameterGroup name and // ResetAllParameters parameters. To reset specific parameters, provide a list of // the following: ParameterName and ApplyMethod. A maximum of 20 parameters can be // modified in a single request. Valid Values (for Apply method): pending-reboot Parameters []types.Parameter // Specifies whether (true) or not (false) to reset all parameters in the DB // parameter group to default values. Default: true ResetAllParameters bool } type ResetDBParameterGroupOutput struct { // Provides the name of the DB parameter group. DBParameterGroupName *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationResetDBParameterGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsAwsquery_serializeOpResetDBParameterGroup{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsquery_deserializeOpResetDBParameterGroup{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil {
if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addOpResetDBParameterGroupValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opResetDBParameterGroup(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opResetDBParameterGroup(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "rds", OperationName: "ResetDBParameterGroup", } }
return err }
mongo.rs
use crate::cli::export::{ExportParams, ExportStrategy}; use crate::cli::import::ImportStrategy; use crate::sampler::{Sampler, SamplerOutput}; use anyhow::Result; use chrono::{DateTime, Utc}; use mongodb::bson::Bson; use mongodb::options::FindOptions; use mongodb::{bson::Document, options::ClientOptions, sync::Client}; use serde_json::Value as JsonValue; use std::collections::BTreeMap; use std::convert::TryFrom; use std::str::FromStr; use synth_core::graph::prelude::content::number_content::U64; use synth_core::graph::prelude::number_content::I64; use synth_core::graph::prelude::{ChronoValue, Number, NumberContent, ObjectContent, RangeStep}; use synth_core::schema::number_content::F64; use synth_core::schema::{ ArrayContent, BoolContent, Categorical, ChronoValueType, DateTimeContent, RegexContent, StringContent, }; use synth_core::{Content, Name, Namespace, Value}; #[derive(Clone, Debug)] pub struct MongoExportStrategy { pub uri: String, } #[derive(Clone, Debug)] pub struct MongoImportStrategy { pub uri: String, } impl ImportStrategy for MongoImportStrategy { fn import(&self) -> Result<Namespace> { let client_options = ClientOptions::parse(&self.uri)?; info!("Connecting to database at {} ...", &self.uri); let client = Client::with_options(client_options)?; let db_name = parse_db_name(&self.uri)?; // 0: Initialise empty Namespace let mut namespace = Namespace::default(); let database = client.database(db_name); // 1: First pass - create master schema for collection_name in database.list_collection_names(None)? { let collection = database.collection(&collection_name); // This may be useful later // let count = collection.estimated_document_count(None)?; if let Ok(Some(some_obj)) = collection.find_one(None, None) { let as_array = Content::Array(ArrayContent::from_content_default_length( doc_to_content(&some_obj), )); namespace.put_collection(&Name::from_str(&collection_name)?, as_array)?; } else { info!("Collection {} is empty. Skipping...", collection_name); continue; } } // 2: Run an ingest step with 10 documents for collection_name in database.list_collection_names(None)? { let collection = database.collection(&collection_name); // This may be useful later // let count = collection.estimated_document_count(None)?; let mut find_options = FindOptions::default(); find_options.limit = Some(10); let mut random_sample: Vec<Document> = collection .find(None, find_options)? .collect::<Result<Vec<Document>, _>>()?; random_sample.iter_mut().for_each(|doc| { doc.remove("_id"); }); namespace.default_try_update( &Name::from_str(&collection_name)?, &serde_json::to_value(random_sample)?, )?; } Ok(namespace) } fn import_collection(&self, name: &Name) -> Result<Content> { self.import()? .collections .remove(name) .ok_or_else(|| anyhow!("Could not find table '{}' in MongoDb database.", name)) } fn as_value(&self) -> Result<JsonValue> { unreachable!() } } fn doc_to_content(doc: &Document) -> Content { let mut root = BTreeMap::new(); // Notice this `filter` here is a hack as we don't support id's out of the box. for (name, bson) in doc.iter().filter(|(name, _)| name.as_str() != "_id") { let content = bson_to_content(bson); root.insert(name.clone(), content); } Content::Object(ObjectContent { fields: root, ..Default::default() }) } fn
(bson: &Bson) -> Content { match bson { Bson::Double(d) => Content::Number(NumberContent::F64(F64::Range(RangeStep::new( *d, *d + 1., 0.1, )))), Bson::String(_) => Content::String(StringContent::default()), Bson::Array(array) => { let length = Content::Number(NumberContent::U64(U64::Constant(array.len() as u64))); let content_iter = array.iter().map(bson_to_content); Content::Array(ArrayContent { length: Box::new(length), content: Box::new(Content::OneOf(content_iter.collect())), }) } Bson::Document(doc) => doc_to_content(doc), Bson::Boolean(_) => Content::Bool(BoolContent::Categorical(Categorical::default())), Bson::Null => Content::null(), Bson::RegularExpression(regex) => Content::String(StringContent::Pattern( RegexContent::pattern(regex.pattern.clone()).unwrap_or_default(), )), Bson::JavaScriptCode(_) => { Content::String(StringContent::Categorical(Categorical::default())) } Bson::JavaScriptCodeWithScope(_) => { Content::String(StringContent::Categorical(Categorical::default())) } Bson::Int32(i) => Content::Number(NumberContent::I64(I64::Range(RangeStep::new( *i as i64, *i as i64 + 1, 1, )))), Bson::Int64(i) => Content::Number(NumberContent::I64(I64::Range(RangeStep::new( *i, *i + 1, 1, )))), Bson::DateTime(_) => Content::DateTime(DateTimeContent { format: "".to_string(), type_: ChronoValueType::DateTime, begin: None, end: None, }), // There should be a more explicit enumeration here, but we don't support // all the required types here. _ => Content::String(StringContent::default()), } } impl ExportStrategy for MongoExportStrategy { fn export(&self, params: ExportParams) -> Result<()> { let mut client = Client::with_uri_str(&self.uri)?; let sampler = Sampler::try_from(&params.namespace)?; let output = sampler.sample_seeded(params.collection_name.clone(), params.target, params.seed)?; match output { SamplerOutput::Collection(values) => self.insert_data( params.collection_name.unwrap().to_string(), &values, &mut client, ), SamplerOutput::Namespace(namespace) => { for (name, values) in namespace { self.insert_data(name, &values, &mut client)?; } Ok(()) } } } } impl MongoExportStrategy { fn insert_data( &self, collection_name: String, collection: &[Value], client: &mut Client, ) -> Result<()> { let db_name = parse_db_name(&self.uri)?; let mut docs = Vec::new(); for value in collection { docs.push(match value_to_bson(value.clone()) { Bson::Document(doc) => doc, _ => bail!("invalid bson document"), }); } let n_values = docs.len(); client .database(db_name) .collection(&collection_name) .insert_many(docs, None)?; info!( "Inserted {} rows into collection {} ...", n_values, collection_name ); Ok(()) } } fn value_to_bson(value: Value) -> Bson { match value { Value::Null(_) => Bson::Null, Value::Bool(b) => Bson::Boolean(b), Value::Number(n) => number_to_bson(n), Value::String(s) => Bson::String(s), Value::DateTime(dt) => date_time_to_bson(dt.value), //TODO: format instead? Value::Object(obj) => object_to_bson(obj), Value::Array(arr) => array_to_bson(arr), } } fn array_to_bson(array: Vec<Value>) -> Bson { Bson::Array(array.into_iter().map(value_to_bson).collect()) } fn object_to_bson(obj: BTreeMap<String, Value>) -> Bson { let obj = obj .into_iter() .map(|(name, value)| (name, value_to_bson(value))) .collect(); Bson::Document(obj) } fn date_time_to_bson(datetime: ChronoValue) -> Bson { Bson::DateTime(mongodb::bson::DateTime::from(match datetime { // those are not optimal as BSON doesn't have a way to specify dates or times, just both at once ChronoValue::NaiveDate(nd) => DateTime::<Utc>::from_utc(nd.and_hms(0, 0, 0), Utc), ChronoValue::NaiveTime(nt) => { DateTime::<Utc>::from_utc(chrono::naive::MIN_DATE.and_time(nt), Utc) } ChronoValue::NaiveDateTime(ndt) => DateTime::<Utc>::from_utc(ndt, Utc), ChronoValue::DateTime(dt) => dt.into(), })) } fn number_to_bson(number: Number) -> Bson { match number { Number::I8(i8) => Bson::Int32(i8 as i32), Number::I16(i16) => Bson::Int32(i16 as i32), Number::I32(i32) => Bson::Int32(i32), Number::I64(i64) => Bson::Int64(i64), Number::I128(i128) => Bson::Int64(i128 as i64), Number::U8(u8) => Bson::Int32(u8 as i32), Number::U16(u16) => Bson::Int32(u16 as i32), Number::U32(u32) => Bson::Int64(u32 as i64), Number::U64(u64) => Bson::Int64(u64 as i64), Number::U128(u128) => Bson::Int64(u128 as i64), Number::F32(f32) => Bson::Double(*f32 as f64), Number::F64(f64) => Bson::Double(*f64), } } fn parse_db_name(uri: &str) -> Result<&str> { // this may require a parser instead of `split` uri.split('/') .last() .ok_or_else(|| anyhow!("Cannot export data. No database name specified in the uri")) }
bson_to_content
pidfile_test.go
// SPDX-License-Identifier: Apache-2.0 // Copyright 2018 Authors of Cilium //go:build !privileged_tests // +build !privileged_tests package pidfile import ( "fmt" "os" "os/exec" "testing" "github.com/cilium/cilium/pkg/checker" . "gopkg.in/check.v1" ) const ( path = "/tmp/cilium-test-pidfile" ) // Hook up gocheck into the "go test" runner. func
(t *testing.T) { TestingT(t) } type PidfileTestSuite struct{} var _ = Suite(&PidfileTestSuite{}) func (s *PidfileTestSuite) TestWrite(c *C) { err := Write(path) c.Assert(err, IsNil) defer Remove(path) content, err := os.ReadFile(path) c.Assert(err, IsNil) c.Assert(content, checker.DeepEquals, []byte(fmt.Sprintf("%d\n", os.Getpid()))) } func (s *PidfileTestSuite) TestKill(c *C) { cmd := exec.Command("sleep", "inf") err := cmd.Start() c.Assert(err, IsNil) err = write(path, cmd.Process.Pid) c.Assert(err, IsNil) defer Remove(path) pid, err := Kill(path) c.Assert(err, IsNil) c.Assert(pid, Not(Equals), 0) err = cmd.Wait() c.Assert(err, ErrorMatches, "signal: killed") } func (s *PidfileTestSuite) TestKillAlreadyFinished(c *C) { cmd := exec.Command("sleep", "0") err := cmd.Start() c.Assert(err, IsNil) err = write(path, cmd.Process.Pid) c.Assert(err, IsNil) defer Remove(path) err = cmd.Wait() c.Assert(err, IsNil) pid, err := Kill(path) c.Assert(err, IsNil) c.Assert(pid, Equals, 0) } func (s *PidfileTestSuite) TestKillPidfileNotExist(c *C) { _, err := Kill("/tmp/cilium-foo-bar-some-not-existing-file") c.Assert(err, IsNil) } func (s *PidfileTestSuite) TestKillPidfilePermissionDenied(c *C) { err := os.WriteFile(path, []byte("foobar\n"), 0000) c.Assert(err, IsNil) defer Remove(path) _, err = Kill(path) c.Assert(err, ErrorMatches, ".* permission denied") } func (s *PidfileTestSuite) TestKillFailedParsePid(c *C) { err := os.WriteFile(path, []byte("foobar\n"), 0644) c.Assert(err, IsNil) defer Remove(path) _, err = Kill(path) c.Assert(err, ErrorMatches, "failed to parse pid .*") }
Test
nl.js
/***********************************************************************************************************************
nl.js – Dutch Localization by: Sjoerd Hekking. Copyright © 2021 Thomas Michael Edwards <[email protected]>. All rights reserved. Use of this source code is governed by a BSD 2-clause "Simplified" License, which may be found in the LICENSE file. For more information about the guidelines used to create this localization, see: http://www.motoslave.net/sugarcube/2/docs/#guide-localization ***********************************************************************************************************************/ /* global l10nStrings */ /* eslint-disable strict */ (function () { /* General. */ l10nStrings.identity = 'Spel'; l10nStrings.aborting = 'Afbreken'; l10nStrings.cancel = 'Annuleren'; l10nStrings.close = 'Sluiten'; l10nStrings.ok = 'Oke'; /* Errors. */ l10nStrings.errorTitle = 'Fout'; l10nStrings.errorToggle = 'Schakel de foutweergave in'; l10nStrings.errorNonexistentPassage = 'De passage "{passage}" bestaat niet'; // NOTE: `passage` is supplied locally l10nStrings.errorSaveDiskLoadFailed = 'gefaald om de opslag te laden vanuit de schijf'; l10nStrings.errorSaveMissingData = 'De opslag ontbreekt de vereiste gegevens. Ofwel het geladen bestand is geen save of de save is beschadigd geraakt'; l10nStrings.errorSaveIdMismatch = 'De opslag is van de foute {identity}'; /* Warnings. */ l10nStrings._warningIntroLacking = 'Uw browser ontbreekt of heeft uitgeschakeld'; l10nStrings._warningOutroDegraded = ', dus deze {identity} draait in een gedegradeerde modus. Mogelijk kunt u doorgaan, maar sommige onderdelen werken mogelijk niet goed.'; l10nStrings.warningNoWebStorage = '{_warningIntroLacking} de webopslag-API{_warningOutroDegraded}'; l10nStrings.warningDegraded = '{_warningIntroLacking} enkele van de mogelijkheden die vereist zijn voor deze {identity}{_warningOutroDegraded}'; /* Debug bar. */ l10nStrings.debugBarToggle = 'Schakel de foutopsporingsbalk in'; l10nStrings.debugBarNoWatches = '\u2014 geen kijkers ingesteld \u2014'; l10nStrings.debugBarAddWatch = 'Voeg kijker toe'; l10nStrings.debugBarDeleteWatch = 'Verwijder kijker'; l10nStrings.debugBarWatchAll = 'Bekijk alles'; l10nStrings.debugBarWatchNone = 'Verwijder alles'; l10nStrings.debugBarLabelAdd = 'Toevoegen'; l10nStrings.debugBarLabelWatch = 'Kijker'; l10nStrings.debugBarLabelTurn = 'Ronde'; // (noun) chance to act (in a game), moment, period l10nStrings.debugBarLabelViews = 'Weergave'; l10nStrings.debugBarViewsToggle = 'Schakel de foutopsporingsweergaven in'; l10nStrings.debugBarWatchToggle = 'Schakel het kijkpaneel in'; /* UI bar. */ l10nStrings.uiBarToggle = 'Schakel de UI-balk in'; l10nStrings.uiBarBackward = 'Ga terug in de {identity} geschiedenis'; l10nStrings.uiBarForward = 'Ga vooruit binnen de {identity} geschiedenis'; l10nStrings.uiBarJumpto = 'Spring naar een specifiek punt in de {identity} geschiedenis'; /* Jump To. */ l10nStrings.jumptoTitle = 'Spring naar'; l10nStrings.jumptoTurn = 'Ronde'; // (noun) chance to act (in a game), moment, period l10nStrings.jumptoUnavailable = 'Momenteel geen historiepunten beschikbaar\u2026'; /* Saves. */ l10nStrings.savesTitle = 'Opslaan'; l10nStrings.savesDisallowed = 'Op deze passage is opslaan niet toegestaan.'; l10nStrings.savesIncapable = '{_warningIntroLacking} de mogelijkheden die nodig zijn om saves te ondersteunen, dus saves zijn uitgeschakeld voor deze sessie.'; l10nStrings.savesLabelAuto = 'Auto-opslag'; l10nStrings.savesLabelDelete = 'Verwijder'; l10nStrings.savesLabelExport = 'Sla op naar opslag\u2026'; l10nStrings.savesLabelImport = 'Laad vanuit opslag\u2026'; l10nStrings.savesLabelLoad = 'Laad'; l10nStrings.savesLabelClear = 'Verwijder alles'; l10nStrings.savesLabelSave = 'Opslaan'; l10nStrings.savesLabelSlot = 'Slot'; l10nStrings.savesUnavailable = 'Geen opslag sloten gevonden\u2026'; l10nStrings.savesUnknownDate = 'onbekend'; /* Settings. */ l10nStrings.settingsTitle = 'Instellingen'; l10nStrings.settingsOff = 'Uit'; l10nStrings.settingsOn = 'Aan'; l10nStrings.settingsReset = 'Herstel naar begin waarde'; /* Restart. */ l10nStrings.restartTitle = 'Herstart'; l10nStrings.restartPrompt = 'Weet u zeker dat u opnieuw wilt opstarten? Niet-opgeslagen voortgang gaat verloren.'; /* Share. */ l10nStrings.shareTitle = 'Delen'; /* Alert. */ l10nStrings.alertTitle = 'Waarschuwing'; /* Autoload. */ l10nStrings.autoloadTitle = 'Automatisch laden'; l10nStrings.autoloadCancel = 'Ga naar start'; l10nStrings.autoloadOk = 'Laad automatische opslag'; l10nStrings.autoloadPrompt = 'Er bestaat een autosave. Nu laden of naar start gaan?'; /* Macros. */ l10nStrings.macroBackText = 'Terugspoelen'; // (verb) rewind, revert l10nStrings.macroReturnText = 'Teruggaan'; // (verb) go/send back })();
views.py
from flask import Flask,render_template, request, jsonify from . import main @main.route('/') def
(): return render_template('index.html')
index
lib.rs
pub mod lion; pub mod format;
hub.go
package staticbackend import ( "fmt" "strings" "github.com/staticbackendhq/core/cache" "github.com/staticbackendhq/core/internal" "github.com/gbrlsnchs/jwt/v3" "github.com/gorilla/websocket" ) // Hub maintains the set of active clients and broadcasts messages to the // clients. type Hub struct { // Registered clients. sockets map[*Socket]string // Reverse ID => socket ids map[string]*Socket // Socket's subscribed channels channels map[*Socket][]chan bool // Inbound messages from the clients. broadcast chan internal.Command // Register requests from the clients. register chan *Socket // Unregister requests from clients. unregister chan *Socket // Cache used for keys and pub/sub (Redis) volatile *cache.Cache } func
(c *cache.Cache) *Hub { return &Hub{ broadcast: make(chan internal.Command), register: make(chan *Socket), unregister: make(chan *Socket), sockets: make(map[*Socket]string), ids: make(map[string]*Socket), channels: make(map[*Socket][]chan bool), volatile: c, } } func (h *Hub) run() { for { select { case sck := <-h.register: h.sockets[sck] = sck.id h.ids[sck.id] = sck cmd := internal.Command{ Type: "init", Data: sck.id, } sck.send <- cmd case sck := <-h.unregister: if _, ok := h.sockets[sck]; ok { h.unsub(sck) delete(h.sockets, sck) delete(h.ids, sck.id) delete(h.channels, sck) //time.AfterFunc(500*time.Millisecond, func() { close(sck.send) //}) } case msg := <-h.broadcast: sockets, p := h.getTargets(msg) for _, sck := range sockets { select { case sck.send <- p: default: h.unsub(sck) close(sck.send) delete(h.ids, msg.SID) delete(h.sockets, sck) delete(h.channels, sck) } } } } } func (h *Hub) getTargets(msg internal.Command) (sockets []*Socket, payload internal.Command) { sender, ok := h.ids[msg.SID] if !ok { return } switch msg.Type { case internal.MsgTypeEcho: sockets = append(sockets, sender) payload = msg payload.Data = "echo: " + msg.Data case internal.MsgTypeAuth: sockets = append(sockets, sender) var pl internal.JWTPayload if _, err := jwt.Verify([]byte(msg.Data), internal.HashSecret, &pl); err != nil { payload = internal.Command{Type: internal.MsgTypeError, Data: "invalid token"} return } var a internal.Auth if err := volatile.GetTyped(pl.Token, &a); err != nil { payload = internal.Command{Type: internal.MsgTypeError, Data: "invalid token"} } else { payload = internal.Command{Type: internal.MsgTypeToken, Data: pl.Token} } case internal.MsgTypeJoin: subs, ok := h.channels[sender] if !ok { subs = make([]chan bool, 0) } closeSubChan := make(chan bool) subs = append(subs, closeSubChan) go h.volatile.Subscribe(sender.send, msg.Token, msg.Data, closeSubChan) sockets = append(sockets, sender) payload = internal.Command{Type: internal.MsgTypeJoined, Data: msg.Data} case internal.MsgTypeChanIn: sockets = append(sockets, sender) if len(msg.Channel) == 0 { payload = internal.Command{Type: internal.MsgTypeError, Data: "no channel was specified"} return } else if strings.HasPrefix(strings.ToLower(msg.Channel), "db-") { payload = internal.Command{ Type: internal.MsgTypeError, Data: "you cannot write to database channel", } return } if err := h.volatile.Publish(msg); err != nil { payload = internal.Command{Type: internal.MsgTypeError, Data: "unable to send your message"} return } payload = internal.Command{Type: internal.MsgTypeOk} default: sockets = append(sockets, sender) payload.Type = internal.MsgTypeError payload.Data = fmt.Sprintf(`%s command not found`, msg.Type) } return } func (h *Hub) join(scksck *websocket.Conn, channel string) { } func (h *Hub) unsub(sck *Socket) { subs, ok := h.channels[sck] if !ok { return } for _, sub := range subs { sub <- true close(sub) } }
newHub
testutils.py
import os, sys, platform from os.path import join, dirname, abspath, basename import unittest def add_to_path(): """ Prepends the build directory to the path so that newly built pypyodbc libraries are used, allowing it to be tested without installing it. """ # Put the build directory into the Python path so we pick up the version we just built. # # To make this cross platform, we'll search the directories until we find the .pyd file. import imp library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ] library_names = [ 'pypyodbc%s' % ext for ext in library_exts ] # Only go into directories that match our version number. dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1]) build = join(dirname(dirname(abspath(__file__))), 'build') for root, dirs, files in os.walk(build): for d in dirs[:]: if not d.endswith(dir_suffix): dirs.remove(d) for name in library_names: if name in files: sys.path.insert(0, root) return print >>sys.stderr, 'Did not find the pypyodbc library in the build directory. Will use an installed version.' def print_library_info(cnxn): import pypyodbc print 'python: %s' % sys.version print 'pypyodbc: %s %s' % (pypyodbc.version, os.path.abspath(pypyodbc.__file__)) print 'odbc: %s' % cnxn.getinfo(pypyodbc.SQL_ODBC_VER) print 'driver: %s %s' % (cnxn.getinfo(pypyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pypyodbc.SQL_DRIVER_VER)) print ' supports ODBC version %s' % cnxn.getinfo(pypyodbc.SQL_DRIVER_ODBC_VER) print 'os: %s' % platform.system() print 'unicode: Py_Unicode=%s SQLWCHAR=%s' % (pypyodbc.UNICODE_SIZE, pypyodbc.SQLWCHAR_SIZE) if platform.system() == 'Windows': print ' %s' % ' '.join([s for s in platform.win32_ver() if s]) def load_tests(testclass, name, *args): """ Returns a TestSuite for tests in `testclass`. name Optional test name if you only want to run 1 test. If not provided all tests in `testclass` will be loaded. args Arguments for the test class constructor. These will be passed after the test method name. """ if name: if not name.startswith('test_'): name = 'test_%s' % name names = [ name ] else: names = [ method for method in dir(testclass) if method.startswith('test_') ] return unittest.TestSuite([ testclass(name, *args) for name in names ]) def load_setup_connection_string(section):
file exists but cannot be parsed, an exception is raised. """ from os.path import exists, join, dirname, splitext, basename from ConfigParser import SafeConfigParser FILENAME = 'setup.cfg' KEY = 'connection-string' path = join(dirname(dirname(abspath(__file__))), 'tmp', FILENAME) if exists(path): try: p = SafeConfigParser() p.read(path) except: raise SystemExit('Unable to parse %s: %s' % (path, sys.exc_info()[1])) if p.has_option(section, KEY): return p.get(section, KEY) return None
""" Attempts to read the default connection string from the setup.cfg file. If the file does not exist or if it exists but does not contain the connection string, None is returned. If the
Spinner.Basic.Example.tsx
// @codepen import * as React from 'react'; import { Spinner, SpinnerSize } from 'office-ui-fabric-react/lib/Spinner'; import { Label } from 'office-ui-fabric-react/lib/Label'; import './Spinner.Basic.Example.scss'; export class
extends React.Component<any, any> { public render(): JSX.Element { return ( <div className="ms-BasicSpinnersExample"> <Label>Extra Small Spinner</Label> <Spinner size={SpinnerSize.xSmall} /> <Label>Small Spinner</Label> <Spinner size={SpinnerSize.small} /> <Label>Medium Spinner</Label> <Spinner size={SpinnerSize.medium} /> <Label>Large Spinner</Label> <Spinner size={SpinnerSize.large} /> <Label>Spinner with Label</Label> <Spinner label="I am definitely loading..." /> <Label>Large Spinner with Label positioned at bottom (default)</Label> <Spinner size={SpinnerSize.large} label="Seriously, still loading..." ariaLive="assertive" /> <Label>Large Spinner with Label positioned above</Label> <Spinner size={SpinnerSize.large} label="Sorry, still loading..." ariaLive="assertive" labelPosition="top" /> <Label>Large Spinner with Label positioned on the right side</Label> <Spinner size={SpinnerSize.large} label="Wait, wait..." ariaLive="assertive" labelPosition="right" /> <Label>Large Spinner with Label positioned on the left side</Label> <Spinner size={SpinnerSize.large} label="Nope, still loading..." ariaLive="assertive" labelPosition="left" /> </div> ); } }
SpinnerBasicExample
turn_schedule.rs
use ecs::*; use util::{Schedule, ScheduleTicket}; pub type TurnSchedule = Schedule<EntityId>; pub trait TurnScheduleQueue { fn schedule_turn(&mut self, entity: EntityId, time: u64) -> ScheduleTicket; } impl TurnScheduleQueue for TurnSchedule { fn schedule_turn(&mut self, entity: EntityId, time: u64) -> ScheduleTicket
}
{ self.insert(entity, time) }