file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
day06.rs | const INPUT: &str = include_str!("./inputs/06.txt");
use std::collections::HashMap;
use std::io::{self, Write};
pub fn | () -> crate::util::Result<()> {
let mut memory_banks: Vec<usize> = INPUT
.split_whitespace()
.filter_map(|s| s.parse::<usize>().ok())
.collect();
let (mut configs, mut cycles, len) = (HashMap::new(), 0, memory_banks.len());
let (p1, p2) = loop {
if let Some(last_seen) = configs.insert(memory_banks.clone(), cycles) {
break (cycles, cycles - last_seen);
}
let (index, &blocks) = memory_banks
.iter()
.enumerate()
.max_by_key(|&(index, item)| (item, usize::MAX - index))
.expect("no max");
*memory_banks.get_mut(index).expect("index invalid") = 0;
for b in 1..=blocks {
*memory_banks.get_mut((index + b) % len).expect("OOB") += 1;
}
cycles += 1;
};
writeln!(io::stdout(), "Day 06 Part 1: {}\nDay 06 Part 2: {}", p1, p2)?;
Ok(())
}
| solve |
properties.rs | use crate::descriptions::PixelFormat;
use crate::enums::BitmapOptions;
use math2d::Matrix3x2f;
use math2d::Point2f;
use winapi::um::d2d1::{
D2D1_BITMAP_PROPERTIES, D2D1_BRUSH_PROPERTIES, D2D1_LINEAR_GRADIENT_BRUSH_PROPERTIES,
D2D1_RADIAL_GRADIENT_BRUSH_PROPERTIES,
};
use winapi::um::d2d1_1::D2D1_BITMAP_PROPERTIES1;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct BitmapProperties {
pub pixel_format: PixelFormat,
pub dpi_x: f32,
pub dpi_y: f32,
}
impl From<BitmapProperties> for D2D1_BITMAP_PROPERTIES {
#[inline]
fn from(bp: BitmapProperties) -> Self {
D2D1_BITMAP_PROPERTIES {
pixelFormat: bp.pixel_format.into(),
dpiX: bp.dpi_x,
dpiY: bp.dpi_y,
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct BitmapProperties1 {
pub pixel_format: PixelFormat,
pub dpi_x: f32,
pub dpi_y: f32,
pub options: BitmapOptions,
}
impl From<BitmapProperties1> for D2D1_BITMAP_PROPERTIES1 {
#[inline]
fn from(bp: BitmapProperties1) -> Self {
D2D1_BITMAP_PROPERTIES1 {
pixelFormat: bp.pixel_format.into(),
dpiX: bp.dpi_x,
dpiY: bp.dpi_y,
bitmapOptions: bp.options.0,
colorContext: std::ptr::null_mut(), // TODO: ColorContext
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct BrushProperties {
pub opacity: f32,
pub transform: Matrix3x2f,
}
impl BrushProperties {
#[inline]
pub fn new(opacity: f32, transform: &Matrix3x2f) -> BrushProperties {
BrushProperties {
opacity,
transform: *transform,
}
}
}
impl From<BrushProperties> for D2D1_BRUSH_PROPERTIES {
#[inline]
fn from(bp: BrushProperties) -> D2D1_BRUSH_PROPERTIES {
D2D1_BRUSH_PROPERTIES {
opacity: bp.opacity,
transform: bp.transform.into(),
}
}
} | impl Default for BrushProperties {
#[inline]
fn default() -> BrushProperties {
BrushProperties {
opacity: 1.0,
transform: Matrix3x2f::IDENTITY,
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
pub struct LinearGradientBrushProperties {
pub start: Point2f,
pub end: Point2f,
}
impl From<LinearGradientBrushProperties> for D2D1_LINEAR_GRADIENT_BRUSH_PROPERTIES {
#[inline]
fn from(bp: LinearGradientBrushProperties) -> D2D1_LINEAR_GRADIENT_BRUSH_PROPERTIES {
D2D1_LINEAR_GRADIENT_BRUSH_PROPERTIES {
startPoint: bp.start.into(),
endPoint: bp.end.into(),
}
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Default)]
pub struct RadialGradientBrushProperties {
pub center: Point2f,
pub origin_offset: Point2f,
pub radius_x: f32,
pub radius_y: f32,
}
impl From<RadialGradientBrushProperties> for D2D1_RADIAL_GRADIENT_BRUSH_PROPERTIES {
#[inline]
fn from(bp: RadialGradientBrushProperties) -> D2D1_RADIAL_GRADIENT_BRUSH_PROPERTIES {
D2D1_RADIAL_GRADIENT_BRUSH_PROPERTIES {
center: bp.center.into(),
gradientOriginOffset: bp.origin_offset.into(),
radiusX: bp.radius_x,
radiusY: bp.radius_y,
}
}
} | |
dataset.go | package cmd
import (
"errors"
"fmt"
"github.com/hirosassa/bqiam/metadata"
"github.com/spf13/cobra"
)
// datasetCmd represents the dataset command
var datasetCmd = &cobra.Command{
Use: "dataset [user email (required)]",
Short: "List datasets that the input user or service account has permissions",
Long: `
This subcommand returns a list of datasets
that the input user or service account is able to access.
`,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("user email is required")
}
return nil
},
RunE: runCmdDataset,
}
func | (cmd *cobra.Command, args []string) error {
var ms metadata.Metas
if err := ms.Load(config.CacheFile); err != nil {
return err
}
entity := args[0]
for _, m := range ms.Metas {
if m.Entity == entity {
fmt.Println(m.Project, m.Dataset, m.Role)
}
}
return nil
}
func init() {
rootCmd.AddCommand(datasetCmd)
}
| runCmdDataset |
animation.js | /*!
* tanguage script compiled code
*
* Datetime: Fri, 10 Aug 2018 04:01:28 GMT
*/
;
// tang.config({});
tang.init().block([
'$_/math/',
'$_/draw/Charts/'
], function (pandora, root, imports, undefined) {
var module = this.module;
var draw = pandora.ns('draw', {});
var math = imports['$_/math/'];
var Charts = imports['$_/draw/charts/'];
var _ = pandora;
var doc = root.document;
var console = root.console;
var helpers = draw.Charts.helpers;
var requestAnimFrame = helpers.requestAnimFrame, cancelAnimFrame = helpers.cancelAnimFrame; | }
for (var i = startIndex + 1;i < arrayToSearch.length;i++) {
var currentItem = arrayToSearch[i];
if (filterCallback(currentItem)) {
return currentItem;
}
};
}
var Animation = pandora.declareClass({
curFrame: null,
fps: 36,
easing: "",
render: null,
onAnimationProgress: null,
onAnimationComplete: null,
digestWrapper: function () {
this.startDigest();
},
startDigest: function () {
var startTime = Date.now();
var framesToDrop = 0;
if (this.dropFrames > 1) {
framesToDrop = Math.floor(this.dropFrames);
this.dropFrames -= framesToDrop;
}
for (var i = 0;i < animations.length;i++) {
if (animations[i].animationObject.curFrame === null) {
animations[i].animationObject.curFrame = 0;
}
animations[i].animationObject.curFrame += 1 + framesToDrop;
if (animations[i].animationObject.curFrame > animations[i].animationObject.frames) {
animations[i].animationObject.curFrame = animations[i].animationObject.frames;
}
animations[i].animationObject.render(animations[i].chartInstance, animations[i].animationObject);
if (animations[i].animationObject.curFrame == animations[i].animationObject.frames) {
animations[i].animationObject.onAnimationComplete.call(animations[i].chartInstance);
animations.splice(i, 1);
i--;
}
}
var endTime = Date.now();
var delay = endTime - startTime - this.frameDuration;
var frameDelay = delay /this.frameDuration;
if (frameDelay > 1) {
this.dropFrames += frameDelay;
}
var that = this;
if (animations.length > 0) {
requestAnimFrame(function () {
that.digestWrapper();
}, this.fps);
};
}
});
pandora.extend(pandora.draw.Charts.prototype, true, {
dropFrames: 0,
addAnimation: function (duration) {
var animationObject = new Animation();
animationObject.animationDuration = duration || this.options.animationDuration || 1000;
animationObject.frames = Math.ceil(animationObject.animationDuration * animationObject.fps /1000);
animationObject.frameDuration = 1000 /animationObject.fps;
animationObject.easing = this.options.animationEasing;
animationObject.render = function (instance, animation) {
var easingFunction = math.easing.all[animation.easing];
var stepDecimal = animation.curFrame /animation.frames;
var easeDecimal = easingFunction(stepDecimal, 0, 1, 1);
instance.draw(easeDecimal, stepDecimal, animation.curFrame);
}
animationObject.onAnimationProgress = this.options.onAnimationProgress;
animationObject.onAnimationComplete = this.options.onAnimationComplete;
for (var index = 0;index < animations.length;++index) {
if (animations[index].chartInstance === this) {
animations[index].animationObject = animationObject;
return;;
}
}
animations.push({
chartInstance: this,
animationObject: animationObject
});
if (animations.length == 1) {
requestAnimFrame(function () {
animationObject.digestWrapper();
}, animationObject.fps);
};
},
cancelAnimation: function () {
var index = findNextWhere(animations, function (animationWrapper) {
return animationWrapper.chartInstance === this;
});
if (index) {
animations.splice(index, 1);
};
}
});
});
//# sourceMappingURL=animation.js.map | var animations = [];
function findNextWhere (arrayToSearch, filterCallback, startIndex) {
if (!startIndex) {
startIndex = -1; |
IGangCommandEvent.d.ts | import { GangCommandWrapper } from './GangCommandWrapper';
import { GangEventTypes } from './GangEventTypes';
export interface IGangCommandEvent { | type: GangEventTypes.Command;
wrapper: GangCommandWrapper<unknown>;
} |
|
main.rs | use actix_web::{get, middleware, post, web, App, HttpResponse, HttpServer};
use futures::executor;
use std::{sync::mpsc, thread};
#[get("/hello")]
async fn hello() -> &'static str {
"Hello world!"
}
#[post("/stop")]
async fn stop(stopper: web::Data<mpsc::Sender<()>>) -> HttpResponse {
// make request that sends message through the Sender
stopper.send(()).unwrap();
HttpResponse::NoContent().finish()
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_server=debug,actix_web=debug");
env_logger::init();
// create a channel
let (tx, rx) = mpsc::channel::<()>();
let bind = ("127.0.0.1", 8080);
// start server as normal but don't .await after .run() yet
let server = HttpServer::new(move || {
// give the server a Sender in .data
App::new()
.app_data(web::Data::new(tx.clone()))
.wrap(middleware::Logger::default())
.service(hello)
.service(stop)
})
.bind(&bind)?
.run();
// clone the Server handle
let srv = server.handle();
thread::spawn(move || { | rx.recv().unwrap();
// stop server gracefully
executor::block_on(srv.stop(true))
});
// run server
server.await
} | // wait for shutdown signal |
cinder.go | // Copyright (c) 2017 Huawei Technologies Co., Ltd. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
This module implements cinder driver for OpenSDS. Cinder driver will pass
these operation requests about volume to gophercloud which is an OpenStack
Go SDK.
*/
package cinder
import (
"time"
log "github.com/golang/glog"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats"
"github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions"
"github.com/gophercloud/gophercloud/openstack/blockstorage/noauth"
snapshotsv2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots"
volumesv2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
. "github.com/opensds/opensds/contrib/drivers/utils/config"
pb "github.com/opensds/opensds/pkg/dock/proto"
"github.com/opensds/opensds/pkg/model"
"github.com/opensds/opensds/pkg/utils/config"
"github.com/satori/go.uuid"
)
const (
defaultConfPath = "/etc/opensds/driver/cinder.yaml"
)
var conf = CinderConfig{}
// Driver is a struct of Cinder backend, which can be called to manage block
// storage service defined in gophercloud. | // Current block storage version
blockStoragev2 *gophercloud.ServiceClient
blockStoragev3 *gophercloud.ServiceClient
conf *CinderConfig
}
// AuthOptions
type AuthOptions struct {
NoAuth bool `yaml:"noAuth,omitempty"`
CinderEndpoint string `yaml:"cinderEndpoint,omitempty"`
IdentityEndpoint string `yaml:"endpoint,omitempty"`
DomainID string `yaml:"domainId,omitempty"`
DomainName string `yaml:"domainName,omitempty"`
Username string `yaml:"username,omitempty"`
Password string `yaml:"password,omitempty"`
TenantID string `yaml:"tenantId,omitempty"`
TenantName string `yaml:"tenantName,omitempty"`
}
// CinderConfig
type CinderConfig struct {
AuthOptions `yaml:"authOptions"`
Pool map[string]PoolProperties `yaml:"pool,flow"`
}
// Setup
func (d *Driver) Setup() error {
// Read cinder config file
d.conf = &CinderConfig{}
p := config.CONF.OsdsDock.Backends.Cinder.ConfigPath
if "" == p {
p = defaultConfPath
}
Parse(d.conf, p)
opts := gophercloud.AuthOptions{
IdentityEndpoint: d.conf.IdentityEndpoint,
DomainID: d.conf.DomainID,
DomainName: d.conf.DomainName,
Username: d.conf.Username,
Password: d.conf.Password,
TenantID: d.conf.TenantID,
TenantName: d.conf.TenantName,
}
if d.conf.NoAuth {
provider, err := noauth.NewClient(opts)
if err != nil {
log.Error("When get no authentication options:", err)
return err
}
d.blockStoragev2, err = noauth.NewBlockStorageV2(provider, noauth.EndpointOpts{
CinderEndpoint: d.conf.CinderEndpoint,
})
if err != nil {
log.Error("When get no authentication block storage session:", err)
return err
}
} else {
provider, err := openstack.AuthenticatedClient(opts)
if err != nil {
log.Error("When get auth options:", err)
return err
}
d.blockStoragev2, err = openstack.NewBlockStorageV2(provider, gophercloud.EndpointOpts{})
if err != nil {
log.Error("When get block storage session:", err)
return err
}
}
return nil
}
// Unset
func (d *Driver) Unset() error { return nil }
// CreateVolume
func (d *Driver) CreateVolume(req *pb.CreateVolumeOpts) (*model.VolumeSpec, error) {
//Configure create request body.
opts := &volumesv2.CreateOpts{
Name: req.GetName(),
Description: req.GetDescription(),
Size: int(req.GetSize()),
AvailabilityZone: req.GetAvailabilityZone(),
}
vol, err := volumesv2.Create(d.blockStoragev2, opts).Extract()
if err != nil {
log.Error("Cannot create volume:", err)
return nil, err
}
// Currently dock framework doesn't support sync data from storage system,
// therefore, it's necessary to wait for the result of resource's creation.
// Timout after 10s.
timeout := time.After(10 * time.Second)
ticker := time.NewTicker(300 * time.Millisecond)
done := make(chan bool, 1)
go func() {
for {
select {
case <-ticker.C:
tmpVol, err := d.PullVolume(vol.ID)
if err != nil {
continue
}
if tmpVol.Status != "creating" {
vol.Status = tmpVol.Status
close(done)
return
}
case <-timeout:
close(done)
return
}
}
}()
<-done
return &model.VolumeSpec{
BaseModel: &model.BaseModel{
Id: req.GetId(),
},
Name: vol.Name,
Description: vol.Description,
Size: int64(vol.Size),
AvailabilityZone: vol.AvailabilityZone,
Status: vol.Status,
}, nil
}
// PullVolume
func (d *Driver) PullVolume(volID string) (*model.VolumeSpec, error) {
vol, err := volumesv2.Get(d.blockStoragev2, volID).Extract()
if err != nil {
log.Error("Cannot get volume:", err)
return nil, err
}
return &model.VolumeSpec{
BaseModel: &model.BaseModel{
Id: vol.ID,
},
Name: vol.Name,
Description: vol.Description,
Size: int64(vol.Size),
AvailabilityZone: vol.AvailabilityZone,
Status: vol.Status,
}, nil
}
// DeleteVolume
func (d *Driver) DeleteVolume(opt *pb.DeleteVolumeOpts) error {
if err := volumesv2.Delete(d.blockStoragev2, opt.GetId()).ExtractErr(); err != nil {
log.Error("Cannot delete volume:", err)
return err
}
return nil
}
// ExtendVolume ...
func (d *Driver) ExtendVolume(req *pb.ExtendVolumeOpts) (*model.VolumeSpec, error) {
//Configure create request body.
opts := &volumeactions.ExtendSizeOpts{
NewSize: int(req.GetSize()),
}
err := volumeactions.ExtendSize(d.blockStoragev2, req.GetId(), opts).ExtractErr()
if err != nil {
log.Error("Cannot extend volume:", err)
return nil, err
}
return &model.VolumeSpec{
BaseModel: &model.BaseModel{
Id: req.GetId(),
},
Name: req.GetName(),
Description: req.GetDescription(),
Size: int64(req.GetSize()),
AvailabilityZone: req.GetAvailabilityZone(),
}, nil
}
// InitializeConnection
func (d *Driver) InitializeConnection(req *pb.CreateAttachmentOpts) (*model.ConnectionInfo, error) {
opts := &volumeactions.InitializeConnectionOpts{
IP: req.HostInfo.GetIp(),
Host: req.HostInfo.GetHost(),
Initiator: req.HostInfo.GetInitiator(),
Platform: req.HostInfo.GetPlatform(),
OSType: req.HostInfo.GetOsType(),
Multipath: &req.MultiPath,
}
conn, err := volumeactions.InitializeConnection(d.blockStoragev2, req.GetVolumeId(), opts).Extract()
if err != nil {
log.Error("Cannot initialize volume connection:", err)
return nil, err
}
return &model.ConnectionInfo{
DriverVolumeType: "iscsi",
ConnectionData: conn,
}, nil
}
// TerminateConnection
func (d *Driver) TerminateConnection(opt *pb.DeleteAttachmentOpts) error { return nil }
// CreateSnapshot
func (d *Driver) CreateSnapshot(req *pb.CreateVolumeSnapshotOpts) (*model.VolumeSnapshotSpec, error) {
opts := &snapshotsv2.CreateOpts{
VolumeID: req.GetVolumeId(),
Name: req.GetName(),
Description: req.GetDescription(),
}
snp, err := snapshotsv2.Create(d.blockStoragev2, opts).Extract()
if err != nil {
log.Error("Cannot create snapshot:", err)
return nil, err
}
// Currently dock framework doesn't support sync data from storage system,
// therefore, it's necessary to wait for the result of resource's creation.
// Timout after 10s.
timeout := time.After(10 * time.Second)
ticker := time.NewTicker(300 * time.Millisecond)
done := make(chan bool, 1)
go func() {
for {
select {
case <-ticker.C:
tmpSnp, err := d.PullSnapshot(snp.ID)
if err != nil {
continue
}
if tmpSnp.Status != "creating" {
snp.Status = tmpSnp.Status
close(done)
return
}
case <-timeout:
close(done)
return
}
}
}()
<-done
return &model.VolumeSnapshotSpec{
BaseModel: &model.BaseModel{
Id: snp.ID,
},
Name: snp.Name,
Description: snp.Description,
Size: int64(snp.Size),
Status: snp.Status,
VolumeId: req.GetVolumeId(),
}, nil
}
// PullSnapshot
func (d *Driver) PullSnapshot(snapID string) (*model.VolumeSnapshotSpec, error) {
snp, err := snapshotsv2.Get(d.blockStoragev2, snapID).Extract()
if err != nil {
log.Error("Cannot get snapshot:", err)
return nil, err
}
return &model.VolumeSnapshotSpec{
BaseModel: &model.BaseModel{
Id: snp.ID,
},
Name: snp.Name,
Description: snp.Description,
Size: int64(snp.Size),
Status: snp.Status,
VolumeId: snp.VolumeID,
}, nil
}
// DeleteSnapshot
func (d *Driver) DeleteSnapshot(req *pb.DeleteVolumeSnapshotOpts) error {
if err := snapshotsv2.Delete(d.blockStoragev2, req.GetId()).ExtractErr(); err != nil {
log.Error("Cannot delete snapshot:", err)
return err
}
return nil
}
// ListPools
func (d *Driver) ListPools() ([]*model.StoragePoolSpec, error) {
log.Info("Starting list pools in cinder drivers.")
opts := &schedulerstats.ListOpts{Detail: true}
pages, err := schedulerstats.List(d.blockStoragev2, opts).AllPages()
if err != nil {
log.Error("Cannot list storage pools:", err)
return nil, err
}
polpages, err := schedulerstats.ExtractStoragePools(pages)
if err != nil {
log.Error("Cannot extract storage pools:", err)
return nil, err
}
var pols []*model.StoragePoolSpec
for _, page := range polpages {
if _, ok := d.conf.Pool[page.Name]; !ok {
continue
}
pol := &model.StoragePoolSpec{
BaseModel: &model.BaseModel{
Id: uuid.NewV5(uuid.NamespaceOID, page.Name).String(),
},
Name: page.Name,
TotalCapacity: int64(page.Capabilities.TotalCapacityGB),
FreeCapacity: int64(page.Capabilities.FreeCapacityGB),
StorageType: d.conf.Pool[page.Name].StorageType,
AvailabilityZone: d.conf.Pool[page.Name].AvailabilityZone,
Extras: d.conf.Pool[page.Name].Extras,
}
pols = append(pols, pol)
}
return pols, nil
}
func (d *Driver) CreateVolumeGroup(opt *pb.CreateVolumeGroupOpts, vg *model.VolumeGroupSpec) (*model.VolumeGroupSpec, error) {
return nil, &model.NotImplementError{"Method CreateVolumeGroup did not implement."}
}
func (d *Driver) UpdateVolumeGroup(opt *pb.UpdateVolumeGroupOpts, vg *model.VolumeGroupSpec, addVolumesRef []*model.VolumeSpec, removeVolumesRef []*model.VolumeSpec) (*model.VolumeGroupSpec, []*model.VolumeSpec, []*model.VolumeSpec, error) {
return nil, nil, nil, &model.NotImplementError{"Method UpdateVolumeGroup did not implement."}
}
func (d *Driver) DeleteVolumeGroup(opt *pb.DeleteVolumeGroupOpts, vg *model.VolumeGroupSpec, volumes []*model.VolumeSpec) (*model.VolumeGroupSpec, []*model.VolumeSpec, error) {
return nil, nil, &model.NotImplementError{"Method UpdateVolumeGroup did not implement."}
} | type Driver struct { |
help.py | from nonebot import on_command, CommandSession
@on_command('help', aliases=('h', '帮助'), only_to_me=False)
async def manual(session: CommandSession):
await session.send(f'[CQ:image,file=/admin/manual.png]')
@manual.args_parser
async def _(session: CommandSession):
# do nothing
retu | rn |
|
features.py | import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from pcl_helper import *
print('run features.py')
def rgb_to_hsv(rgb_list):
|
def compute_color_histograms(cloud, using_hsv=False):
# Compute histograms for the clusters
point_colors_list = []
# Step through each point in the point cloud
for point in pc2.read_points(cloud, skip_nans=True):
rgb_list = float_to_rgb(point[3])
if using_hsv:
point_colors_list.append(rgb_to_hsv(rgb_list) * 255)
else:
point_colors_list.append(rgb_list)
# Populate lists with color values
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append(color[0])
channel_2_vals.append(color[1])
channel_3_vals.append(color[2])
# TODO: Compute histograms
nbins = 32
bins_range = (0, 256)
# TODO: Concatenate and normalize the histograms
channel_1_hist = np.histogram(channel_1_vals, bins=nbins, range=bins_range)
channel_2_hist = np.histogram(channel_2_vals, bins=nbins, range=bins_range)
channel_3_hist = np.histogram(channel_3_vals, bins=nbins, range=bins_range)
hist_features = np.concatenate((channel_1_hist[0], channel_2_hist[0], channel_3_hist[0])).astype(np.float64)
normed_features = hist_features / np.sum(hist_features)
# Generate random features for demo mode.
# Replace normed_features with your feature vectorl
# normed_features = np.random.random(96)
# print('run normed_features finished')
return normed_features
def compute_normal_histograms(normal_cloud):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
nbins = 32
bins_range = (-1, 1)
for norm_component in pc2.read_points(normal_cloud,
field_names=('normal_x', 'normal_y', 'normal_z'),
skip_nans=True):
norm_x_vals.append(norm_component[0])
norm_y_vals.append(norm_component[1])
norm_z_vals.append(norm_component[2])
# TODO: Compute histograms of normal values (just like with color)
norm_x_hist = np.histogram(norm_x_vals, bins=nbins, range=bins_range)
norm_y_hist = np.histogram(norm_y_vals, bins=nbins, range=bins_range)
norm_z_hist = np.histogram(norm_z_vals, bins=nbins, range=bins_range)
# TODO: Concatenate and normalize the histograms
norm_hist_features = np.concatenate((norm_x_hist[0], norm_y_hist[0], norm_z_hist[0])).astype(np.float64)
normed_features = norm_hist_features / np.sum(norm_hist_features)
# Generate random features for demo mode.
# Replace normed_features with your feature vector
# normed_feature = np.random.random(96)
# print('run compute_normal_histograms function finished')
return normed_features
| rgb_normalized = [1.0 * rgb_list[0] / 255, 1.0 * rgb_list[1] / 255, 1.0 * rgb_list[2] / 255]
hsv_normalized = matplotlib.colors.rgb_to_hsv([[rgb_normalized]])[0][0]
return hsv_normalized |
handlers.go | // Copyright The Linux Foundation and each contributor to CommunityBridge.
// SPDX-License-Identifier: MIT
package whitelist
import (
"github.com/communitybridge/easycla/cla-backend-go/events"
"github.com/communitybridge/easycla/cla-backend-go/gen/models"
"github.com/communitybridge/easycla/cla-backend-go/gen/restapi/operations"
"github.com/communitybridge/easycla/cla-backend-go/gen/restapi/operations/company"
"github.com/communitybridge/easycla/cla-backend-go/signatures"
"github.com/communitybridge/easycla/cla-backend-go/user"
"github.com/go-openapi/runtime/middleware"
"github.com/savaki/dynastore"
)
// Configure setups handlers on api with service
func | (api *operations.ClaAPI, service service, sessionStore *dynastore.Store, signatureService signatures.SignatureService, eventsService events.Service) {
api.CompanyAddCclaWhitelistRequestHandler = company.AddCclaWhitelistRequestHandlerFunc(
func(params company.AddCclaWhitelistRequestParams) middleware.Responder {
requestID, err := service.AddCclaWhitelistRequest(params.CompanyID, params.ProjectID, params.Body)
if err != nil {
return company.NewAddCclaWhitelistRequestBadRequest().WithPayload(errorResponse(err))
}
eventsService.LogEvent(&events.LogEventArgs{
EventType: events.CCLAWhitelistRequestCreated,
ProjectID: params.ProjectID,
CompanyID: params.CompanyID,
UserID: params.Body.UserID,
EventData: &events.CCLAWhitelistRequestCreatedEventData{RequestID: requestID},
})
return company.NewAddCclaWhitelistRequestOK()
})
api.CompanyDeleteCclaWhitelistRequestHandler = company.DeleteCclaWhitelistRequestHandlerFunc(
func(params company.DeleteCclaWhitelistRequestParams, claUser *user.CLAUser) middleware.Responder {
err := service.DeleteCclaWhitelistRequest(params.RequestID)
if err != nil {
return company.NewDeleteCclaWhitelistRequestBadRequest().WithPayload(errorResponse(err))
}
eventsService.LogEvent(&events.LogEventArgs{
EventType: events.CCLAWhitelistRequestDeleted,
ProjectID: params.ProjectID,
CompanyID: params.CompanyID,
UserID: claUser.UserID,
EventData: &events.CCLAWhitelistRequestDeletedEventData{RequestID: params.RequestID},
})
return company.NewDeleteCclaWhitelistRequestOK()
})
api.CompanyListCclaWhitelistRequestsHandler = company.ListCclaWhitelistRequestsHandlerFunc(
func(params company.ListCclaWhitelistRequestsParams, claUser *user.CLAUser) middleware.Responder {
result, err := service.ListCclaWhitelistRequest(params.CompanyID, params.ProjectID)
if err != nil {
return company.NewListCclaWhitelistRequestsBadRequest().WithPayload(errorResponse(err))
}
return company.NewListCclaWhitelistRequestsOK().WithPayload(result)
})
}
type codedResponse interface {
Code() string
}
func errorResponse(err error) *models.ErrorResponse {
code := ""
if e, ok := err.(codedResponse); ok {
code = e.Code()
}
e := models.ErrorResponse{
Code: code,
Message: err.Error(),
}
return &e
}
| Configure |
mod.rs | // Copyright (c) 2015 Daniel Grunwald
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
pub use self::object::PyObject;
pub use self::typeobject::PyType;
pub use self::module::PyModule;
pub use self::string::{PyBytes, PyString, PyStringData};
#[cfg(feature="python27-sys")]
pub use self::string::PyUnicode;
#[cfg(feature="python3-sys")]
pub use self::string::PyString as PyUnicode;
pub use self::iterator::PyIterator;
pub use self::boolobject::PyBool;
pub use self::tuple::{PyTuple, NoArgs};
pub use self::dict::PyDict;
pub use self::list::PyList;
#[cfg(feature="python27-sys")]
pub use self::num::PyInt;
#[cfg(feature="python3-sys")]
pub use self::num::PyLong as PyInt;
pub use self::num::{PyLong, PyFloat};
pub use self::sequence::PySequence;
#[macro_export]
macro_rules! pyobject_newtype(
($name: ident) => (
py_impl_to_py_object_for_python_object!($name);
py_impl_from_py_object_for_python_object!($name);
impl $crate::PythonObject for $name {
#[inline]
fn as_object(&self) -> &$crate::PyObject {
&self.0
}
#[inline]
fn into_object(self) -> $crate::PyObject {
self.0
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_from(obj: $crate::PyObject) -> Self {
$name(obj)
}
/// Unchecked downcast from PyObject to Self.
/// Undefined behavior if the input object does not have the expected type.
#[inline]
unsafe fn unchecked_downcast_borrow_from<'a>(obj: &'a $crate::PyObject) -> &'a Self {
::std::mem::transmute(obj)
}
}
);
($name: ident, $checkfunction: ident) => (
pyobject_newtype!($name);
impl ::python::PythonObjectWithCheckedDowncast for $name {
#[inline]
fn downcast_from<'p>(py: ::python::Python<'p>, obj: ::objects::object::PyObject) -> Result<$name, ::python::PythonObjectDowncastError<'p>> {
unsafe {
if ::ffi::$checkfunction(obj.as_ptr()) != 0 {
Ok($name(obj))
} else {
Err(::python::PythonObjectDowncastError(py))
}
}
}
#[inline]
fn downcast_borrow_from<'a, 'p>(py: ::python::Python<'p>, obj: &'a ::objects::object::PyObject) -> Result<&'a $name, ::python::PythonObjectDowncastError<'p>> {
unsafe {
if ::ffi::$checkfunction(obj.as_ptr()) != 0 {
Ok(::std::mem::transmute(obj))
} else {
Err(::python::PythonObjectDowncastError(py))
}
}
}
}
);
($name: ident, $checkfunction: ident, $typeobject: ident) => (
pyobject_newtype!($name, $checkfunction);
impl ::python::PythonObjectWithTypeObject for $name {
#[inline]
fn type_object(py: ::python::Python) -> ::objects::typeobject::PyType {
unsafe { ::objects::typeobject::PyType::from_type_ptr(py, &mut ::ffi::$typeobject) }
}
}
);
);
macro_rules! extract(
($obj:ident to $t:ty; $py:ident => $body: block) => {
impl <'source> ::conversion::FromPyObject<'source>
for $t
{
fn extract($py: Python, $obj: &'source PyObject) -> PyResult<Self> {
$body
}
}
}
);
mod object; | mod typeobject;
mod module;
mod string;
mod dict;
mod iterator;
mod boolobject;
mod tuple;
mod list;
mod num;
mod sequence;
pub mod exc;
#[cfg(feature="python27-sys")]
pub mod oldstyle;
mod tests; | |
create-piggy-bank.component.ts | import { ChangeDetectionStrategy, Component, OnInit, Output, EventEmitter } from '@angular/core';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { PiggyBank } from '../piggy-bank.model';
@Component({
selector: 'create-piggy-bank',
templateUrl: './create-piggy-bank.component.html',
changeDetection: ChangeDetectionStrategy.OnPush
})
export class | implements OnInit {
@Output() newPiggyBank = new EventEmitter<PiggyBank>();
createPiggyBankForm: FormGroup;
constructor(private fb: FormBuilder) {}
ngOnInit() {
this.createForm();
}
onSubmit() {
const formValue = this.createPiggyBankForm.value;
const piggyBank: PiggyBank = {
name: formValue.name,
amount: formValue.amount,
goal: formValue.goal
};
this.newPiggyBank.emit(piggyBank);
this.createPiggyBankForm.reset();
}
private createForm() {
this.createPiggyBankForm = this.fb.group({
name: ['', Validators.required],
amount: ['', Validators.required],
goal: ''
});
}
}
| CreatePiggyBankComponent |
app.js | 'use strict';
angular.module('AdCatal', [
'ngRoute',
'ngCookies',
'SessionManager',
'ui.bootstrap',
'AuthInterceptor',
'ngSanitize',
'pascalprecht.translate',
'NavBar',
'ngResource'
])
.constant('APP_CONFIG',{
'appName':'Product Catalogue',
'appVersion':'1.0.0-SNAPSHOT'
})
.config(['$routeProvider', '$httpProvider','$translateProvider','$translatePartialLoaderProvider',
function($routeProvider,$httpProvider,$translateProvider,$translatePartialLoaderProvider) {
$routeProvider
.when('/',{templateUrl:'views/CatalArticle/CatalArticles.html',controller:'catalArticlesCtlr'})
.when('/CatalArticles/new',{templateUrl:'views/CatalArticle/CatalArticleCreate.html',controller:'catalArticleCreateCtlr'})
.when('/CatalArticles',{templateUrl:'views/CatalArticle/CatalArticles.html',controller:'catalArticlesCtlr'})
.when('/CatalArticles/edit/:pic',{templateUrl:'views/CatalArticle/CatalArticleEdit.html',controller:'catalArticleEditCtlr'})
.when('/CatalArticles/show/:pic',{templateUrl:'views/CatalArticle/CatalArticleShow.html',controller:'catalArticleShowCtlr'})
// .when('/CatalArtDetailConfigs',{templateUrl:'views/CatalArtDetailConfig/search.html',controller:'SearchCatalArtDetailConfigController'})
// .when('/CatalArtDetailConfigs/new',{templateUrl:'views/CatalArtDetailConfig/detail.html',controller:'NewCatalArtDetailConfigController'})
// .when('/CatalArtDetailConfigs/edit/:CatalArtDetailConfigId',{templateUrl:'views/CatalArtDetailConfig/detail.html',controller:'EditCatalArtDetailConfigController'})
// .when('/CatalArtEquivalences',{templateUrl:'views/CatalArtEquivalence/search.html',controller:'SearchCatalArtEquivalenceController'})
// .when('/CatalArtEquivalences/new',{templateUrl:'views/CatalArtEquivalence/detail.html',controller:'NewCatalArtEquivalenceController'})
// .when('/CatalArtEquivalences/edit/:CatalArtEquivalenceId',{templateUrl:'views/CatalArtEquivalence/detail.html',controller:'EditCatalArtEquivalenceController'})
// .when('/CatalArtFeatMappings',{templateUrl:'views/CatalArtFeatMapping/search.html',controller:'SearchCatalArtFeatMappingController'})
// .when('/CatalArtFeatMappings/new',{templateUrl:'views/CatalArtFeatMapping/detail.html',controller:'NewCatalArtFeatMappingController'})
// .when('/CatalArtFeatMappings/edit/:CatalArtFeatMappingId',{templateUrl:'views/CatalArtFeatMapping/detail.html',controller:'EditCatalArtFeatMappingController'})
// .when('/CatalArtManufSupps',{templateUrl:'views/CatalArtManufSupp/search.html',controller:'SearchCatalArtManufSuppController'})
// .when('/CatalArtManufSupps/new',{templateUrl:'views/CatalArtManufSupp/detail.html',controller:'NewCatalArtManufSuppController'})
// .when('/CatalArtManufSupps/edit/:CatalArtManufSuppId',{templateUrl:'views/CatalArtManufSupp/detail.html',controller:'EditCatalArtManufSuppController'})
// .when('/CatalFamilyFeatMapings',{templateUrl:'views/CatalFamilyFeatMaping/search.html',controller:'SearchCatalFamilyFeatMapingController'})
// .when('/CatalFamilyFeatMapings/new',{templateUrl:'views/CatalFamilyFeatMaping/detail.html',controller:'NewCatalFamilyFeatMapingController'})
// .when('/CatalFamilyFeatMapings/edit/:CatalFamilyFeatMapingId',{templateUrl:'views/CatalFamilyFeatMaping/detail.html',controller:'EditCatalFamilyFeatMapingController'})
// .when('/CatalManufSuppls',{templateUrl:'views/CatalManufSuppl/search.html',controller:'SearchCatalManufSupplController'})
// .when('/CatalManufSuppls/new',{templateUrl:'views/CatalManufSuppl/detail.html',controller:'NewCatalManufSupplController'})
// .when('/CatalManufSuppls/edit/:CatalManufSupplId',{templateUrl:'views/CatalManufSuppl/detail.html',controller:'EditCatalManufSupplController'})
// .when('/CatalPicMappings',{templateUrl:'views/CatalPicMapping/search.html',controller:'SearchCatalPicMappingController'})
// .when('/CatalPicMappings/new',{templateUrl:'views/CatalPicMapping/detail.html',controller:'NewCatalPicMappingController'})
// .when('/CatalPicMappings/edit/:CatalPicMappingId',{templateUrl:'views/CatalPicMapping/detail.html',controller:'EditCatalPicMappingController'})
.when('/CatalPkgModes',{templateUrl:'views/CatalPkgMode/search.html',controller:'catalPkgModeCtrl'})
.when('/CatalPkgModes/new',{templateUrl:'views/CatalPkgMode/create.html',controller:'catalPkgModeCreateCtrl'})
.when('/CatalPkgModes/edit/:identif',{templateUrl:'views/CatalPkgMode/edit.html',controller:'catalPkgModeEditCtrl'})
.when('/CatalPkgModes/show/:identif',{templateUrl:'views/CatalPkgMode/show.html',controller:'catalPkgModeShowCtrl'})
.when('/CatalProductFamilies',{templateUrl:'views/CatalProductFamily/search.html',controller:'SearchCatalProductFamilyController'})
.when('/CatalProductFamilies/new',{templateUrl:'views/CatalProductFamily/detail.html',controller:'NewCatalProductFamilyController'})
.when('/CatalProductFamilies/edit/:CatalProductFamilyId',{templateUrl:'views/CatalProductFamily/detail.html',controller:'EditCatalProductFamilyController'})
.otherwise({ redirectTo: '/' });
$httpProvider.defaults.withCredentials = true;
$httpProvider.interceptors.push('authInterceptor');
$translateProvider.useLoader('$translatePartialLoader', {
urlTemplate: '{part}/locale-{lang}.json'
});
| // $scope.matchesRoute = function(route) {
// var path = $location.path();
// return (path === ("/" + route) || path.indexOf("/" + route + "/") == 0);
// };
//})
//
.run(['$rootScope', '$location','sessionManager','$translate','APP_CONFIG','$translatePartialLoader',
function ($rootScope, $location, sessionManager,$translate,APP_CONFIG,$translatePartialLoader) {
$rootScope.appName = APP_CONFIG.appName ;
$rootScope.appVersion = APP_CONFIG.appVersion ;
$translatePartialLoader.addPart('/adcatal.client/i18n/main');
sessionManager.appMenuUrl("/adcatal.client/menu.html");
$rootScope.sessionManager = sessionManager;
$rootScope.$on('$locationChangeStart', function (event, next, current) {
var noSess = !sessionManager.hasValues(sessionManager.terminalSession(), sessionManager.userSession());
if(noSess){
var sessParam = $location.search();
if(sessParam && sessionManager.hasValues(sessParam.trm,sessParam.usr)){
sessionManager.wsin(sessParam.trm,sessParam.usr,
function(data, status, headers, config){
sessionManager.language(headers('X-USER-LANG'),false);
$location.path('/');
}
);
}
}
});
}]); |
}])
//
//.controller('NavController', function NavController($scope, $location) { |
input.rs | use crate::entity::act;
use crate::entity::control;
use crate::game::position::Position;
use crate::game::{self, env, ObjectStore, State};
use crate::ui::hud;
#[derive(Clone, Debug)]
pub enum PlayerInput {
Meta(UiAction),
Game(PlayerAction),
Undefined,
}
#[derive(Clone, Debug)]
pub enum UiAction {
ExitGameLoop,
CharacterScreen,
ChoosePrimary,
ChooseSecondary,
ChooseQuick1,
ChooseQuick2,
GenomeEditor,
Help,
SetFont(usize),
}
#[derive(Clone, Debug)]
pub enum PlayerAction {
Primary(act::Target), // using the arrow keys
Secondary(act::Target), // using 'W','A','S','D' keys
Quick1, // using 'Q', un-targeted quick action
Quick2, // using 'E', un-targeted second quick action
PassTurn,
UseInventoryItem(usize),
DropItem(usize),
}
/// Translate between bracket's keys and our own key codes.
fn key_to_action(
ctx: &mut rltk::BTerm,
key: rltk::VirtualKeyCode,
ctrl: bool,
shift: bool,
) -> PlayerInput {
use self::act::Target::*;
use rltk::VirtualKeyCode as Vkc;
match (key, ctrl, shift) {
// letters
(Vkc::A, false, false) => PlayerInput::Game(PlayerAction::Secondary(West)),
(Vkc::C, false, false) => PlayerInput::Meta(UiAction::CharacterScreen),
(Vkc::D, false, false) => PlayerInput::Game(PlayerAction::Secondary(East)),
(Vkc::E, false, false) => PlayerInput::Game(PlayerAction::Quick2),
(Vkc::E, false, true) => PlayerInput::Meta(UiAction::ChooseQuick2),
(Vkc::G, false, false) => {
if env().is_debug_mode {
PlayerInput::Meta(UiAction::GenomeEditor)
} else {
PlayerInput::Undefined
}
}
(Vkc::P, false, true) => PlayerInput::Meta(UiAction::ChoosePrimary),
(Vkc::Q, false, false) => PlayerInput::Game(PlayerAction::Quick1),
(Vkc::Q, false, true) => PlayerInput::Meta(UiAction::ChooseQuick1),
(Vkc::S, false, false) => PlayerInput::Game(PlayerAction::Secondary(South)),
(Vkc::S, false, true) => PlayerInput::Meta(UiAction::ChooseSecondary),
(Vkc::S, true, true) => {
take_screenshot(ctx);
PlayerInput::Undefined
}
(Vkc::W, false, false) => PlayerInput::Game(PlayerAction::Secondary(North)),
(Vkc::Up, false, false) => PlayerInput::Game(PlayerAction::Primary(North)),
(Vkc::Down, false, false) => PlayerInput::Game(PlayerAction::Primary(South)),
(Vkc::Left, false, false) => PlayerInput::Game(PlayerAction::Primary(West)),
(Vkc::Right, false, false) => PlayerInput::Game(PlayerAction::Primary(East)),
(Vkc::Space, false, false) => PlayerInput::Game(PlayerAction::PassTurn),
(Vkc::Escape, false, false) => PlayerInput::Meta(UiAction::ExitGameLoop),
(Vkc::F1, false, false) => PlayerInput::Meta(UiAction::Help),
(Vkc::Key1, false, false) => PlayerInput::Meta(UiAction::SetFont(0)),
(Vkc::Key2, false, false) => PlayerInput::Meta(UiAction::SetFont(1)),
(Vkc::Key3, false, false) => PlayerInput::Meta(UiAction::SetFont(2)),
(Vkc::Key4, false, false) => PlayerInput::Meta(UiAction::SetFont(3)),
(Vkc::Key5, false, false) => PlayerInput::Meta(UiAction::SetFont(4)),
_ => PlayerInput::Undefined,
}
}
// Create A detailed info panel as tooltip.
// - list stats and (compare with player) to give hints about strength, receptors and such
// - get player sensor quality, quantity and adjust how much info is shown
// - either take the player out of the objects and compare to everything else
// or just gather all info and adjust visibility later when rendering tooltips in UI
// useful info:
// - receptor matching or not
// - virus RNA or DNA
fn get_names_under_mouse(
state: &State,
objects: &mut ObjectStore,
mouse: Position,
) -> Vec<hud::ToolTip> {
let mut tooltips: Vec<hud::ToolTip> = vec![];
if let Some(player) = objects.extract_by_index(state.player_idx) {
if player.pos.eq(&mouse) {
// tooltips.push(ToolTip::header_only("You".to_string()));
tooltips.push(player.generate_tooltip(&player));
}
tooltips.append(
&mut objects
.get_vector()
.iter()
.flatten()
.filter(|o| o.pos.eq(&mouse) && o.physics.is_visible)
// vvvvv---- replace function with `key-value`-list generating function.
.map(|o| o.generate_tooltip(&player))
.collect::<Vec<_>>(),
);
objects.replace(state.player_idx, player);
}
tooltips
}
/// Check whether the user has given inputs either via mouse or keyboard. Also update any input-
/// dependent UI elements, like hover-tooltips etc.
pub fn | (
state: &mut State,
objects: &mut ObjectStore,
hud: &mut hud::Hud,
ctx: &mut rltk::BTerm,
) -> PlayerInput {
let mut input = rltk::INPUT.lock();
#[allow(clippy::single_match)]
input.for_each_message(|event| match event {
rltk::BEvent::CloseRequested => ctx.quitting = true,
_ => (),
});
// 1) check whether key has been pressed
use rltk::VirtualKeyCode as Vkc;
let ctrl = input.key_pressed_set().contains(&Vkc::LControl)
|| input.key_pressed_set().contains(&Vkc::RControl);
let shift = input.key_pressed_set().contains(&Vkc::LShift)
|| input.key_pressed_set().contains(&Vkc::RShift);
if let Some(key) = ctx.key {
return key_to_action(ctx, key, ctrl, shift);
}
let mouse = Position::from(ctx.mouse_point());
let is_clicked: bool = ctx.left_click;
// 2) update hovered objects
hud.update_tooltips(mouse.into(), get_names_under_mouse(state, objects, mouse));
// 3) if mouse is hovering over world
if mouse.x() < game::consts::WORLD_WIDTH {
// 3b) check whether a mouse button has been pressed for player action
if is_clicked {
// get clicked cell, check if it is adjacent to player, perform primary action
if let Some(player) = &objects[state.player_idx] {
if let Some(control::Controller::Player(ctrl)) = &player.control {
if let act::TargetCategory::Any = ctrl.primary_action.get_target_category() {
return PlayerInput::Game(PlayerAction::Primary(act::Target::from_pos(
&player.pos,
&mouse,
)));
} else if player.pos.is_adjacent(&mouse) {
return PlayerInput::Game(PlayerAction::Primary(act::Target::from_pos(
&player.pos,
&mouse,
)));
}
}
}
}
PlayerInput::Undefined
} else {
// 4) is mouse is hovering over sidebar
// 4a) update hovered button
if let Some(item) = hud
.items
.iter()
.find(|i| i.layout.point_in_rect(mouse.into()))
{
return if is_clicked {
match item.item_enum {
hud::HudItem::PrimaryAction => PlayerInput::Meta(UiAction::ChoosePrimary),
hud::HudItem::SecondaryAction => PlayerInput::Meta(UiAction::ChooseSecondary),
hud::HudItem::Quick1Action => PlayerInput::Meta(UiAction::ChooseQuick1),
hud::HudItem::Quick2Action => PlayerInput::Meta(UiAction::ChooseQuick2),
hud::HudItem::DnaItem => PlayerInput::Undefined, // no action when clicked
hud::HudItem::BarItem => PlayerInput::Undefined, // no action when clicked
hud::HudItem::UseInventory { idx } => {
PlayerInput::Game(PlayerAction::UseInventoryItem(idx))
}
hud::HudItem::DropInventory { idx } => {
PlayerInput::Game(PlayerAction::DropItem(idx))
}
}
} else {
PlayerInput::Undefined
};
};
// 3b) check for button press to activate ui buttons
PlayerInput::Undefined
}
}
#[cfg(not(target_arch = "wasm32"))]
fn take_screenshot(ctx: &mut rltk::BTerm) {
ctx.screenshot("innit_screenshot.png");
}
#[cfg(target_arch = "wasm32")]
fn take_screenshot(_ctx: &mut rltk::BTerm) {
info!("screenshots no supported in wasm")
}
| read |
query.go | package query
import (
"github.com/labstack/echo"
"github.com/yalunga/onewod-api/pkg/utl/model"
)
// List prepares data for list queries
func List(u *gorsk.AuthUser) (*gorsk.ListQuery, error) | {
switch true {
case u.Role <= gorsk.AdminRole: // user is SuperAdmin or Admin
return nil, nil
case u.Role == gorsk.CompanyAdminRole:
return &gorsk.ListQuery{Query: "company_id = ?", ID: u.CompanyID}, nil
case u.Role == gorsk.LocationAdminRole:
return &gorsk.ListQuery{Query: "location_id = ?", ID: u.LocationID}, nil
default:
return nil, echo.ErrForbidden
}
} |
|
utils.py | import scipy.signal as signal
import torch
import torch.nn as nn
import numpy as np
import models
import gym
import wandb
def create_feedforward(sizes, activation=nn.ReLU):
layers = []
for i in range(len(sizes) - 1):
layers.append(nn.Linear(sizes[i], sizes[i+1]))
if i < len(sizes) - 2:
layers.append(activation())
return nn.Sequential(*layers)
def get_shape(shape):
if shape is None:
return ()
return shape
def discounted_cumsum(rewards, reward_decay):
"""Taken from https://stackoverflow.com/questions/47970683/vectorize-a-numpy-discount-calculation"""
return signal.lfilter([1], [1, -reward_decay], x=rewards[::-1])[::-1]
class TrajectoryBuffer:
def __init__(self, observation_shape, action_shape, size, reward_decay=0.99):
self.max_size = size
self.trajectory_start = 0
self.pos = 0
self.reward_decay = reward_decay
self.observations = np.empty((size, *observation_shape), dtype=np.float32)
self.actions = np.empty((size, *get_shape(action_shape)), dtype=np.float32)
self.rewards = np.empty((size,), dtype=np.float32)
self.returns = np.empty((size,), dtype=np.float32)
self.dones = np.empty((size,), dtype=np.float32)
def store(self, observation, action, reward, done):
assert self.pos < self.max_size, "Buffer Overflow"
self.observations[self.pos] = observation
self.actions[self.pos] = action
self.rewards[self.pos] = reward
self.dones[self.pos] = done
self.pos += 1
def end_trajectory(self, value=0):
# Compute return
sl = slice(self.trajectory_start, self.pos)
rewards = self.rewards[sl]
rewards = np.append(rewards, value)
self.returns[sl] = discounted_cumsum(rewards, self.reward_decay)[:-1]
self.trajectory_start = self.pos
def get_data(self):
sl = slice(0, self.pos)
data = dict(
observations=self.observations[sl],
actions=self.actions[sl],
rewards=self.rewards[sl],
returns=self.returns[sl],
dones=self.dones[sl]
)
return {key : torch.from_numpy(value) for key, value in data.items()}
def clear(self):
self.pos = 0
self.trajectory_start = 0
class VecTrajectoryBuffer:
def __init__(self, observation_shape, action_shape, num_envs, size, reward_decay=0.99):
self.max_size = size
self.pos = 0
self.reward_decay = reward_decay
self.traj_starts = np.zeros((num_envs,), dtype=int)
self.observations = np.empty((size, num_envs, *observation_shape), dtype=np.float32)
self.actions = np.empty((size, num_envs, *get_shape(action_shape)), dtype=np.float32)
self.rewards = np.empty((size, num_envs), dtype=np.float32)
self.returns = np.empty((size, num_envs), dtype=np.float32)
self.dones = np.empty((size, num_envs), dtype=np.float32)
def store(self, observations, actions, rewards, dones):
assert self.pos < self.max_size, "Buffer Overflow"
self.observations[self.pos] = observations
self.actions[self.pos] = actions
self.rewards[self.pos] = rewards
self.dones[self.pos] = dones
self.pos += 1
# Compute returns
for env_index, done in enumerate(dones):
if done:
self._end_trajectory(env_index)
def end_trajectory(self, values):
for env_index, value in enumerate(values):
self._end_trajectory(env_index, value)
def _end_trajectory(self, env_index, value=0):
# Compute return
sl = slice(self.traj_starts[env_index], self.pos)
rewards = self.rewards[sl, env_index]
rewards = np.append(rewards, value)
self.returns[sl, env_index] = discounted_cumsum(rewards, self.reward_decay)[:-1]
# Update trajectory start
self.traj_starts[env_index] = self.pos
def get_data(self, device=torch.device('cpu')):
sl = slice(0, self.pos)
data = dict(
observations=self._remove_env_axis(self.observations[sl]),
actions=self._remove_env_axis(self.actions[sl]),
rewards=self._remove_env_axis(self.rewards[sl]),
returns=self._remove_env_axis(self.returns[sl]),
dones=self._remove_env_axis(self.dones[sl])
)
return {key : torch.from_numpy(value).to(device) for key, value in data.items()}
def clear(self):
self.pos = 0
self.traj_starts.fill(0)
def _remove_env_axis(self, array):
# array.shape = (size, num_envs, ???)
shape = array.shape
# Swap size with num_envs to ensure reshaping won't mix trajectories
array = array.swapaxes(0, 1)
# Flatten | return array
def play(model: models.Policy, env: gym.Env, repeats=10, device=torch.device('cpu')):
for _ in range(repeats):
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state]).to(device)
action = model.get_actions(inp)[0]
state, reward, done, _ = env.step(action)
env.render()
env.close()
def capture_video(model: models.Policy, env: gym.Env, fps=30, device=torch.device('cpu')):
frames = []
reward_sum = 0
step_count = 0
state = env.reset()
done = False
while not done:
inp = torch.FloatTensor([state]).to(device)
action = model.get_actions(inp)[0]
state, reward, done, _ = env.step(action)
frames.append(np.array(env.render("rgb_array")))
reward_sum += reward
step_count += 1
frames = np.array(frames) # (Time, Width, Height, Channels)
frames = np.moveaxis(frames, 3, 1) # (Time, Channels, Width, Height)
return wandb.Video(frames, caption=f"RewardSum={reward_sum}; EpisodeLength={step_count}", fps=fps) | new_shape = (shape[0] * shape[1], *shape[2:])
array = array.reshape(new_shape) |
coercion.rs | //! # Type Coercion
//!
//! Under certain circumstances we will coerce from one type to another,
//! for example by auto-borrowing. This occurs in situations where the
//! compiler has a firm 'expected type' that was supplied from the user,
//! and where the actual type is similar to that expected type in purpose
//! but not in representation (so actual subtyping is inappropriate).
//!
//! ## Reborrowing
//!
//! Note that if we are expecting a reference, we will *reborrow*
//! even if the argument provided was already a reference. This is
//! useful for freezing mut/const things (that is, when the expected is &T
//! but you have &const T or &mut T) and also for avoiding the linearity
//! of mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/ui/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) { ... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later -- after all,
//! we may want to adjust precisely when coercions occur.
use crate::check::{FnCtxt, Needs};
use errors::DiagnosticBuilder;
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::hir::ptr::P;
use rustc::infer::{Coercion, InferResult, InferOk};
use rustc::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc::traits::{self, ObligationCause, ObligationCauseCode};
use rustc::ty::adjustment::{
Adjustment, Adjust, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast
};
use rustc::ty::{self, TypeAndMut, Ty};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::error::TypeError;
use rustc::ty::relate::RelateResult;
use rustc::ty::subst::SubstsRef;
use smallvec::{smallvec, SmallVec};
use std::ops::Deref;
use syntax::feature_gate;
use syntax::symbol::sym;
use syntax_pos;
use rustc_target::spec::abi::Abi;
struct Coerce<'a, 'tcx> {
fcx: &'a FnCtxt<'a, 'tcx>,
cause: ObligationCause<'tcx>,
use_lub: bool,
/// Determines whether or not allow_two_phase_borrow is set on any
/// autoref adjustments we create while coercing. We don't want to
/// allow deref coercions to create two-phase borrows, at least initially,
/// but we do need two-phase borrows for function argument reborrows.
/// See #47489 and #48598
/// See docs on the "AllowTwoPhase" type for a more detailed discussion
allow_two_phase: AllowTwoPhase,
}
impl<'a, 'tcx> Deref for Coerce<'a, 'tcx> {
type Target = FnCtxt<'a, 'tcx>;
fn deref(&self) -> &Self::Target |
}
type CoerceResult<'tcx> = InferResult<'tcx, (Vec<Adjustment<'tcx>>, Ty<'tcx>)>;
fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability,
to_mutbl: hir::Mutability)
-> RelateResult<'tcx, ()> {
match (from_mutbl, to_mutbl) {
(hir::MutMutable, hir::MutMutable) |
(hir::MutImmutable, hir::MutImmutable) |
(hir::MutMutable, hir::MutImmutable) => Ok(()),
(hir::MutImmutable, hir::MutMutable) => Err(TypeError::Mutability),
}
}
fn identity(_: Ty<'_>) -> Vec<Adjustment<'_>> { vec![] }
fn simple<'tcx>(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>> {
move |target| vec![Adjustment { kind, target }]
}
fn success<'tcx>(adj: Vec<Adjustment<'tcx>>,
target: Ty<'tcx>,
obligations: traits::PredicateObligations<'tcx>)
-> CoerceResult<'tcx> {
Ok(InferOk {
value: (adj, target),
obligations
})
}
impl<'f, 'tcx> Coerce<'f, 'tcx> {
fn new(
fcx: &'f FnCtxt<'f, 'tcx>,
cause: ObligationCause<'tcx>,
allow_two_phase: AllowTwoPhase,
) -> Self {
Coerce {
fcx,
cause,
allow_two_phase,
use_lub: false,
}
}
fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
self.commit_if_ok(|_| {
if self.use_lub {
self.at(&self.cause, self.fcx.param_env).lub(b, a)
} else {
self.at(&self.cause, self.fcx.param_env)
.sup(b, a)
.map(|InferOk { value: (), obligations }| InferOk { value: a, obligations })
}
})
}
/// Unify two types (using sub or lub) and produce a specific coercion.
fn unify_and<F>(&self, a: Ty<'tcx>, b: Ty<'tcx>, f: F)
-> CoerceResult<'tcx>
where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>
{
self.unify(&a, &b).and_then(|InferOk { value: ty, obligations }| {
success(f(ty), ty, obligations)
})
}
fn coerce(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
let a = self.shallow_resolve(a);
debug!("Coerce.tys({:?} => {:?})", a, b);
// Just ignore error types.
if a.references_error() || b.references_error() {
return success(vec![], self.fcx.tcx.types.err, vec![]);
}
if a.is_never() {
// Subtle: If we are coercing from `!` to `?T`, where `?T` is an unbound
// type variable, we want `?T` to fallback to `!` if not
// otherwise constrained. An example where this arises:
//
// let _: Option<?T> = Some({ return; });
//
// here, we would coerce from `!` to `?T`.
let b = self.shallow_resolve(b);
return if self.shallow_resolve(b).is_ty_var() {
// Micro-optimization: no need for this if `b` is
// already resolved in some way.
let diverging_ty = self.next_diverging_ty_var(
TypeVariableOrigin {
kind: TypeVariableOriginKind::AdjustmentType,
span: self.cause.span,
},
);
self.unify_and(&b, &diverging_ty, simple(Adjust::NeverToAny))
} else {
success(simple(Adjust::NeverToAny)(b), b, vec![])
};
}
// Consider coercing the subtype to a DST
//
// NOTE: this is wrapped in a `commit_if_ok` because it creates
// a "spurious" type variable, and we don't want to have that
// type variable in memory if the coercion fails.
let unsize = self.commit_if_ok(|_| self.coerce_unsized(a, b));
match unsize {
Ok(_) => {
debug!("coerce: unsize successful");
return unsize;
}
Err(TypeError::ObjectUnsafeCoercion(did)) => {
debug!("coerce: unsize not object safe");
return Err(TypeError::ObjectUnsafeCoercion(did));
}
Err(_) => {}
}
debug!("coerce: unsize failed");
// Examine the supertype and consider auto-borrowing.
//
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.kind {
ty::RawPtr(mt_b) => {
return self.coerce_unsafe_ptr(a, b, mt_b.mutbl);
}
ty::Ref(r_b, ty, mutbl) => {
let mt_b = ty::TypeAndMut { ty, mutbl };
return self.coerce_borrowed_pointer(a, b, r_b, mt_b);
}
_ => {}
}
match a.kind {
ty::FnDef(..) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
// Additionally, we permit coercion of function
// items to drop the unsafe qualifier.
self.coerce_from_fn_item(a, b)
}
ty::FnPtr(a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
ty::Closure(def_id_a, substs_a) => {
// Non-capturing closures are coercible to
// function pointers or unsafe function pointers.
// It cannot convert closures that require unsafe.
self.coerce_closure_to_fn(a, def_id_a, substs_a, b)
}
_ => {
// Otherwise, just use unification rules.
self.unify_and(a, b, identity)
}
}
}
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
r_b: ty::Region<'tcx>,
mt_b: TypeAndMut<'tcx>)
-> CoerceResult<'tcx>
{
debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b);
// If we have a parameter of type `&M T_a` and the value
// provided is `expr`, we will be adding an implicit borrow,
// meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
// to type check, we will construct the type that `&M*expr` would
// yield.
let (r_a, mt_a) = match a.kind {
ty::Ref(r_a, ty, mutbl) => {
let mt_a = ty::TypeAndMut { ty, mutbl };
coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?;
(r_a, mt_a)
}
_ => return self.unify_and(a, b, identity),
};
let span = self.cause.span;
let mut first_error = None;
let mut r_borrow_var = None;
let mut autoderef = self.autoderef(span, a);
let mut found = None;
for (referent_ty, autoderefs) in autoderef.by_ref() {
if autoderefs == 0 {
// Don't let this pass, otherwise it would cause
// &T to autoref to &&T.
continue;
}
// At this point, we have deref'd `a` to `referent_ty`. So
// imagine we are coercing from `&'a mut Vec<T>` to `&'b mut [T]`.
// In the autoderef loop for `&'a mut Vec<T>`, we would get
// three callbacks:
//
// - `&'a mut Vec<T>` -- 0 derefs, just ignore it
// - `Vec<T>` -- 1 deref
// - `[T]` -- 2 deref
//
// At each point after the first callback, we want to
// check to see whether this would match out target type
// (`&'b mut [T]`) if we autoref'd it. We can't just
// compare the referent types, though, because we still
// have to consider the mutability. E.g., in the case
// we've been considering, we have an `&mut` reference, so
// the `T` in `[T]` needs to be unified with equality.
//
// Therefore, we construct reference types reflecting what
// the types will be after we do the final auto-ref and
// compare those. Note that this means we use the target
// mutability [1], since it may be that we are coercing
// from `&mut T` to `&U`.
//
// One fine point concerns the region that we use. We
// choose the region such that the region of the final
// type that results from `unify` will be the region we
// want for the autoref:
//
// - if in sub mode, that means we want to use `'b` (the
// region from the target reference) for both
// pointers [2]. This is because sub mode (somewhat
// arbitrarily) returns the subtype region. In the case
// where we are coercing to a target type, we know we
// want to use that target type region (`'b`) because --
// for the program to type-check -- it must be the
// smaller of the two.
// - One fine point. It may be surprising that we can
// use `'b` without relating `'a` and `'b`. The reason
// that this is ok is that what we produce is
// effectively a `&'b *x` expression (if you could
// annotate the region of a borrow), and regionck has
// code that adds edges from the region of a borrow
// (`'b`, here) into the regions in the borrowed
// expression (`*x`, here). (Search for "link".)
// - if in lub mode, things can get fairly complicated. The
// easiest thing is just to make a fresh
// region variable [4], which effectively means we defer
// the decision to region inference (and regionck, which will add
// some more edges to this variable). However, this can wind up
// creating a crippling number of variables in some cases --
// e.g., #32278 -- so we optimize one particular case [3].
// Let me try to explain with some examples:
// - The "running example" above represents the simple case,
// where we have one `&` reference at the outer level and
// ownership all the rest of the way down. In this case,
// we want `LUB('a, 'b)` as the resulting region.
// - However, if there are nested borrows, that region is
// too strong. Consider a coercion from `&'a &'x Rc<T>` to
// `&'b T`. In this case, `'a` is actually irrelevant.
// The pointer we want is `LUB('x, 'b`). If we choose `LUB('a,'b)`
// we get spurious errors (`ui/regions-lub-ref-ref-rc.rs`).
// (The errors actually show up in borrowck, typically, because
// this extra edge causes the region `'a` to be inferred to something
// too big, which then results in borrowck errors.)
// - We could track the innermost shared reference, but there is already
// code in regionck that has the job of creating links between
// the region of a borrow and the regions in the thing being
// borrowed (here, `'a` and `'x`), and it knows how to handle
// all the various cases. So instead we just make a region variable
// and let regionck figure it out.
let r = if !self.use_lub {
r_b // [2] above
} else if autoderefs == 1 {
r_a // [3] above
} else {
if r_borrow_var.is_none() {
// create var lazilly, at most once
let coercion = Coercion(span);
let r = self.next_region_var(coercion);
r_borrow_var = Some(r); // [4] above
}
r_borrow_var.unwrap()
};
let derefd_ty_a = self.tcx.mk_ref(r,
TypeAndMut {
ty: referent_ty,
mutbl: mt_b.mutbl, // [1] above
});
match self.unify(derefd_ty_a, b) {
Ok(ok) => {
found = Some(ok);
break;
}
Err(err) => {
if first_error.is_none() {
first_error = Some(err);
}
}
}
}
// Extract type or return an error. We return the first error
// we got, which should be from relating the "base" type
// (e.g., in example above, the failure from relating `Vec<T>`
// to the target type), since that should be the least
// confusing.
let InferOk { value: ty, mut obligations } = match found {
Some(d) => d,
None => {
let err = first_error.expect("coerce_borrowed_pointer had no error");
debug!("coerce_borrowed_pointer: failed with err = {:?}", err);
return Err(err);
}
};
if ty == a && mt_a.mutbl == hir::MutImmutable && autoderef.step_count() == 1 {
// As a special case, if we would produce `&'a *x`, that's
// a total no-op. We end up with the type `&'a T` just as
// we started with. In that case, just skip it
// altogether. This is just an optimization.
//
// Note that for `&mut`, we DO want to reborrow --
// otherwise, this would be a move, which might be an
// error. For example `foo(self.x)` where `self` and
// `self.x` both have `&mut `type would be a move of
// `self.x`, but we auto-coerce it to `foo(&mut *self.x)`,
// which is a borrow.
assert_eq!(mt_b.mutbl, hir::MutImmutable); // can only coerce &T -> &U
return success(vec![], ty, obligations);
}
let needs = Needs::maybe_mut_place(mt_b.mutbl);
let InferOk { value: mut adjustments, obligations: o }
= autoderef.adjust_steps_as_infer_ok(self, needs);
obligations.extend(o);
obligations.extend(autoderef.into_obligations());
// Now apply the autoref. We have to extract the region out of
// the final ref type we got.
let r_borrow = match ty.kind {
ty::Ref(r_borrow, _, _) => r_borrow,
_ => span_bug!(span, "expected a ref type, got {:?}", ty),
};
let mutbl = match mt_b.mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
allow_two_phase_borrow: self.allow_two_phase,
}
};
adjustments.push(Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
target: ty
});
debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}",
ty,
adjustments);
success(adjustments, ty, obligations)
}
// &[T; n] or &mut [T; n] -> &[T]
// or &mut [T; n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self, source: Ty<'tcx>, target: Ty<'tcx>) -> CoerceResult<'tcx> {
debug!("coerce_unsized(source={:?}, target={:?})", source, target);
let traits = (self.tcx.lang_items().unsize_trait(),
self.tcx.lang_items().coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
debug!("missing Unsize or CoerceUnsized traits");
return Err(TypeError::Mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
// a DST unless we have to. This currently comes out in the wash since
// we can't unify [T] with U. But to properly support DST, we need to allow
// that, at which point we will need extra checks on the target here.
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let reborrow = match (&source.kind, &target.kind) {
(&ty::Ref(_, ty_a, mutbl_a), &ty::Ref(_, _, mutbl_b)) => {
coerce_mutbls(mutbl_a, mutbl_b)?;
let coercion = Coercion(self.cause.span);
let r_borrow = self.next_region_var(coercion);
let mutbl = match mutbl_b {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
// We don't allow two-phase borrows here, at least for initial
// implementation. If it happens that this coercion is a function argument,
// the reborrow in coerce_borrowed_ptr will pick it up.
allow_two_phase_borrow: AllowTwoPhase::No,
}
};
Some((Adjustment {
kind: Adjust::Deref(None),
target: ty_a
}, Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
target: self.tcx.mk_ref(r_borrow, ty::TypeAndMut {
mutbl: mutbl_b,
ty: ty_a
})
}))
}
(&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
coerce_mutbls(mt_a, mt_b)?;
Some((Adjustment {
kind: Adjust::Deref(None),
target: ty_a
}, Adjustment {
kind: Adjust::Borrow(AutoBorrow::RawPtr(mt_b)),
target: self.tcx.mk_ptr(ty::TypeAndMut {
mutbl: mt_b,
ty: ty_a
})
}))
}
_ => None,
};
let coerce_source = reborrow.as_ref().map_or(source, |&(_, ref r)| r.target);
// Setup either a subtyping or a LUB relationship between
// the `CoerceUnsized` target type and the expected type.
// We only have the latter, so we use an inference variable
// for the former and let type inference do the rest.
let origin = TypeVariableOrigin {
kind: TypeVariableOriginKind::MiscVariable,
span: self.cause.span,
};
let coerce_target = self.next_ty_var(origin);
let mut coercion = self.unify_and(coerce_target, target, |target| {
let unsize = Adjustment {
kind: Adjust::Pointer(PointerCast::Unsize),
target
};
match reborrow {
None => vec![unsize],
Some((ref deref, ref autoref)) => {
vec![deref.clone(), autoref.clone(), unsize]
}
}
})?;
let mut selcx = traits::SelectionContext::new(self);
// Create an obligation for `Source: CoerceUnsized<Target>`.
let cause = ObligationCause::new(
self.cause.span,
self.body_id,
ObligationCauseCode::Coercion { source, target },
);
// Use a FIFO queue for this custom fulfillment procedure.
//
// A Vec (or SmallVec) is not a natural choice for a queue. However,
// this code path is hot, and this queue usually has a max length of 1
// and almost never more than 3. By using a SmallVec we avoid an
// allocation, at the (very small) cost of (occasionally) having to
// shift subsequent elements down when removing the front element.
let mut queue: SmallVec<[_; 4]> =
smallvec![self.tcx.predicate_for_trait_def(self.fcx.param_env,
cause,
coerce_unsized_did,
0,
coerce_source,
&[coerce_target.into()])];
let mut has_unsized_tuple_coercion = false;
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// inference might unify those two inner type variables later.
let traits = [coerce_unsized_did, unsize_did];
while !queue.is_empty() {
let obligation = queue.remove(0);
debug!("coerce_unsized resolve step: {:?}", obligation);
let trait_ref = match obligation.predicate {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
if unsize_did == tr.def_id() {
let sty = &tr.skip_binder().input_types().nth(1).unwrap().kind;
if let ty::Tuple(..) = sty {
debug!("coerce_unsized: found unsized tuple coercion");
has_unsized_tuple_coercion = true;
}
}
tr.clone()
}
_ => {
coercion.obligations.push(obligation);
continue;
}
};
match selcx.select(&obligation.with(trait_ref)) {
// Uncertain or unimplemented.
Ok(None) => {
if trait_ref.def_id() == unsize_did {
let trait_ref = self.resolve_vars_if_possible(&trait_ref);
let self_ty = trait_ref.skip_binder().self_ty();
let unsize_ty = trait_ref.skip_binder().input_types().nth(1).unwrap();
debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_ref);
match (&self_ty.kind, &unsize_ty.kind) {
(ty::Infer(ty::TyVar(v)),
ty::Dynamic(..)) if self.type_var_is_sized(*v) => {
debug!("coerce_unsized: have sized infer {:?}", v);
coercion.obligations.push(obligation);
// `$0: Unsize<dyn Trait>` where we know that `$0: Sized`, try going
// for unsizing.
}
_ => {
// Some other case for `$0: Unsize<Something>`. Note that we
// hit this case even if `Something` is a sized type, so just
// don't do the coercion.
debug!("coerce_unsized: ambiguous unsize");
return Err(TypeError::Mismatch);
}
}
} else {
debug!("coerce_unsized: early return - ambiguous");
return Err(TypeError::Mismatch);
}
}
Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
return Err(TypeError::Mismatch);
}
// Object safety violations or miscellaneous.
Err(err) => {
self.report_selection_error(&obligation, &err, false, false);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
queue.extend(vtable.nested_obligations())
}
}
}
if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion {
feature_gate::emit_feature_err(&self.tcx.sess.parse_sess,
sym::unsized_tuple_coercion,
self.cause.span,
feature_gate::GateIssue::Language,
feature_gate::EXPLAIN_UNSIZED_TUPLE_COERCION);
}
Ok(coercion)
}
fn coerce_from_safe_fn<F, G>(&self,
a: Ty<'tcx>,
fn_ty_a: ty::PolyFnSig<'tcx>,
b: Ty<'tcx>,
to_unsafe: F,
normal: G)
-> CoerceResult<'tcx>
where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>
{
if let ty::FnPtr(fn_ty_b) = b.kind {
if let (hir::Unsafety::Normal, hir::Unsafety::Unsafe)
= (fn_ty_a.unsafety(), fn_ty_b.unsafety())
{
let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a);
return self.unify_and(unsafe_a, b, to_unsafe);
}
}
self.unify_and(a, b, normal)
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: ty::PolyFnSig<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
//! Attempts to coerce from the type of a Rust function item
//! into a closure or a `proc`.
//!
let b = self.shallow_resolve(b);
debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b);
self.coerce_from_safe_fn(a, fn_ty_a, b,
simple(Adjust::Pointer(PointerCast::UnsafeFnPointer)), identity)
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
//! Attempts to coerce from the type of a Rust function item
//! into a closure or a `proc`.
let b = self.shallow_resolve(b);
debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b);
match b.kind {
ty::FnPtr(_) => {
let a_sig = a.fn_sig(self.tcx);
// Intrinsics are not coercible to function pointers
if a_sig.abi() == Abi::RustIntrinsic ||
a_sig.abi() == Abi::PlatformIntrinsic {
return Err(TypeError::IntrinsicCast);
}
let InferOk { value: a_sig, mut obligations } =
self.normalize_associated_types_in_as_infer_ok(self.cause.span, &a_sig);
let a_fn_pointer = self.tcx.mk_fn_ptr(a_sig);
let InferOk { value, obligations: o2 } = self.coerce_from_safe_fn(
a_fn_pointer,
a_sig,
b,
|unsafe_ty| {
vec![
Adjustment {
kind: Adjust::Pointer(PointerCast::ReifyFnPointer),
target: a_fn_pointer
},
Adjustment {
kind: Adjust::Pointer(PointerCast::UnsafeFnPointer),
target: unsafe_ty
},
]
},
simple(Adjust::Pointer(PointerCast::ReifyFnPointer))
)?;
obligations.extend(o2);
Ok(InferOk { value, obligations })
}
_ => self.unify_and(a, b, identity),
}
}
fn coerce_closure_to_fn(&self,
a: Ty<'tcx>,
def_id_a: DefId,
substs_a: SubstsRef<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
//! Attempts to coerce from the type of a non-capturing closure
//! into a function pointer.
//!
let b = self.shallow_resolve(b);
match b.kind {
ty::FnPtr(fn_ty) if self.tcx.upvars(def_id_a).map_or(true, |v| v.is_empty()) => {
// We coerce the closure, which has fn type
// `extern "rust-call" fn((arg0,arg1,...)) -> _`
// to
// `fn(arg0,arg1,...) -> _`
// or
// `unsafe fn(arg0,arg1,...) -> _`
let sig = self.closure_sig(def_id_a, substs_a);
let unsafety = fn_ty.unsafety();
let pointer_ty = self.tcx.coerce_closure_fn_ty(sig, unsafety);
debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})",
a, b, pointer_ty);
self.unify_and(pointer_ty, b, simple(
Adjust::Pointer(PointerCast::ClosureFnPointer(unsafety))
))
}
_ => self.unify_and(a, b, identity),
}
}
fn coerce_unsafe_ptr(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: hir::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b);
let (is_ref, mt_a) = match a.kind {
ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
ty::RawPtr(mt) => (false, mt),
_ => return self.unify_and(a, b, identity)
};
// Check that the types which they point at are compatible.
let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut {
mutbl: mutbl_b,
ty: mt_a.ty,
});
coerce_mutbls(mt_a.mutbl, mutbl_b)?;
// Although references and unsafe ptrs have the same
// representation, we still register an Adjust::DerefRef so that
// regionck knows that the region for `a` must be valid here.
if is_ref {
self.unify_and(a_unsafe, b, |target| {
vec![Adjustment {
kind: Adjust::Deref(None),
target: mt_a.ty
}, Adjustment {
kind: Adjust::Borrow(AutoBorrow::RawPtr(mutbl_b)),
target
}]
})
} else if mt_a.mutbl != mutbl_b {
self.unify_and(
a_unsafe, b, simple(Adjust::Pointer(PointerCast::MutToConstPointer))
)
} else {
self.unify_and(a_unsafe, b, identity)
}
}
}
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Attempt to coerce an expression to a type, and return the
/// adjusted type of the expression, if successful.
/// Adjustments are only recorded if the coercion succeeded.
/// The expressions *must not* have any pre-existing adjustments.
pub fn try_coerce(
&self,
expr: &hir::Expr,
expr_ty: Ty<'tcx>,
target: Ty<'tcx>,
allow_two_phase: AllowTwoPhase,
) -> RelateResult<'tcx, Ty<'tcx>> {
let source = self.resolve_vars_with_obligations(expr_ty);
debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target);
let cause = self.cause(expr.span, ObligationCauseCode::ExprAssignable);
let coerce = Coerce::new(self, cause, allow_two_phase);
let ok = self.commit_if_ok(|_| coerce.coerce(source, target))?;
let (adjustments, _) = self.register_infer_ok_obligations(ok);
self.apply_adjustments(expr, adjustments);
Ok(if expr_ty.references_error() {
self.tcx.types.err
} else {
target
})
}
/// Same as `try_coerce()`, but without side-effects.
pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool {
let source = self.resolve_vars_with_obligations(expr_ty);
debug!("coercion::can({:?} -> {:?})", source, target);
let cause = self.cause(syntax_pos::DUMMY_SP, ObligationCauseCode::ExprAssignable);
// We don't ever need two-phase here since we throw out the result of the coercion
let coerce = Coerce::new(self, cause, AllowTwoPhase::No);
self.probe(|_| coerce.coerce(source, target)).is_ok()
}
/// Given some expressions, their known unified type and another expression,
/// tries to unify the types, potentially inserting coercions on any of the
/// provided expressions and returns their LUB (aka "common supertype").
///
/// This is really an internal helper. From outside the coercion
/// module, you should instantiate a `CoerceMany` instance.
fn try_find_coercion_lub<E>(&self,
cause: &ObligationCause<'tcx>,
exprs: &[E],
prev_ty: Ty<'tcx>,
new: &hir::Expr,
new_ty: Ty<'tcx>)
-> RelateResult<'tcx, Ty<'tcx>>
where E: AsCoercionSite
{
let prev_ty = self.resolve_vars_with_obligations(prev_ty);
let new_ty = self.resolve_vars_with_obligations(new_ty);
debug!("coercion::try_find_coercion_lub({:?}, {:?})", prev_ty, new_ty);
// Special-case that coercion alone cannot handle:
// Two function item types of differing IDs or InternalSubsts.
if let (&ty::FnDef(..), &ty::FnDef(..)) = (&prev_ty.kind, &new_ty.kind) {
// Don't reify if the function types have a LUB, i.e., they
// are the same function and their parameters have a LUB.
let lub_ty = self.commit_if_ok(|_| {
self.at(cause, self.param_env)
.lub(prev_ty, new_ty)
}).map(|ok| self.register_infer_ok_obligations(ok));
if lub_ty.is_ok() {
// We have a LUB of prev_ty and new_ty, just return it.
return lub_ty;
}
// The signature must match.
let a_sig = prev_ty.fn_sig(self.tcx);
let a_sig = self.normalize_associated_types_in(new.span, &a_sig);
let b_sig = new_ty.fn_sig(self.tcx);
let b_sig = self.normalize_associated_types_in(new.span, &b_sig);
let sig = self.at(cause, self.param_env)
.trace(prev_ty, new_ty)
.lub(&a_sig, &b_sig)
.map(|ok| self.register_infer_ok_obligations(ok))?;
// Reify both sides and return the reified fn pointer type.
let fn_ptr = self.tcx.mk_fn_ptr(sig);
for expr in exprs.iter().map(|e| e.as_coercion_site()).chain(Some(new)) {
// The only adjustment that can produce an fn item is
// `NeverToAny`, so this should always be valid.
self.apply_adjustments(expr, vec![Adjustment {
kind: Adjust::Pointer(PointerCast::ReifyFnPointer),
target: fn_ptr
}]);
}
return Ok(fn_ptr);
}
// Configure a Coerce instance to compute the LUB.
// We don't allow two-phase borrows on any autorefs this creates since we
// probably aren't processing function arguments here and even if we were,
// they're going to get autorefed again anyway and we can apply 2-phase borrows
// at that time.
let mut coerce = Coerce::new(self, cause.clone(), AllowTwoPhase::No);
coerce.use_lub = true;
// First try to coerce the new expression to the type of the previous ones,
// but only if the new expression has no coercion already applied to it.
let mut first_error = None;
if !self.tables.borrow().adjustments().contains_key(new.hir_id) {
let result = self.commit_if_ok(|_| coerce.coerce(new_ty, prev_ty));
match result {
Ok(ok) => {
let (adjustments, target) = self.register_infer_ok_obligations(ok);
self.apply_adjustments(new, adjustments);
return Ok(target);
}
Err(e) => first_error = Some(e),
}
}
// Then try to coerce the previous expressions to the type of the new one.
// This requires ensuring there are no coercions applied to *any* of the
// previous expressions, other than noop reborrows (ignoring lifetimes).
for expr in exprs {
let expr = expr.as_coercion_site();
let noop = match self.tables.borrow().expr_adjustments(expr) {
&[
Adjustment { kind: Adjust::Deref(_), .. },
Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. }
] => {
match self.node_ty(expr.hir_id).kind {
ty::Ref(_, _, mt_orig) => {
let mutbl_adj: hir::Mutability = mutbl_adj.into();
// Reborrow that we can safely ignore, because
// the next adjustment can only be a Deref
// which will be merged into it.
mutbl_adj == mt_orig
}
_ => false,
}
}
&[Adjustment { kind: Adjust::NeverToAny, .. }] | &[] => true,
_ => false,
};
if !noop {
return self.commit_if_ok(|_|
self.at(cause, self.param_env)
.lub(prev_ty, new_ty)
).map(|ok| self.register_infer_ok_obligations(ok));
}
}
match self.commit_if_ok(|_| coerce.coerce(prev_ty, new_ty)) {
Err(_) => {
// Avoid giving strange errors on failed attempts.
if let Some(e) = first_error {
Err(e)
} else {
self.commit_if_ok(|_|
self.at(cause, self.param_env)
.lub(prev_ty, new_ty)
).map(|ok| self.register_infer_ok_obligations(ok))
}
}
Ok(ok) => {
let (adjustments, target) = self.register_infer_ok_obligations(ok);
for expr in exprs {
let expr = expr.as_coercion_site();
self.apply_adjustments(expr, adjustments.clone());
}
Ok(target)
}
}
}
}
/// CoerceMany encapsulates the pattern you should use when you have
/// many expressions that are all getting coerced to a common
/// type. This arises, for example, when you have a match (the result
/// of each arm is coerced to a common type). It also arises in less
/// obvious places, such as when you have many `break foo` expressions
/// that target the same loop, or the various `return` expressions in
/// a function.
///
/// The basic protocol is as follows:
///
/// - Instantiate the `CoerceMany` with an initial `expected_ty`.
/// This will also serve as the "starting LUB". The expectation is
/// that this type is something which all of the expressions *must*
/// be coercible to. Use a fresh type variable if needed.
/// - For each expression whose result is to be coerced, invoke `coerce()` with.
/// - In some cases we wish to coerce "non-expressions" whose types are implicitly
/// unit. This happens for example if you have a `break` with no expression,
/// or an `if` with no `else`. In that case, invoke `coerce_forced_unit()`.
/// - `coerce()` and `coerce_forced_unit()` may report errors. They hide this
/// from you so that you don't have to worry your pretty head about it.
/// But if an error is reported, the final type will be `err`.
/// - Invoking `coerce()` may cause us to go and adjust the "adjustments" on
/// previously coerced expressions.
/// - When all done, invoke `complete()`. This will return the LUB of
/// all your expressions.
/// - WARNING: I don't believe this final type is guaranteed to be
/// related to your initial `expected_ty` in any particular way,
/// although it will typically be a subtype, so you should check it.
/// - Invoking `complete()` may cause us to go and adjust the "adjustments" on
/// previously coerced expressions.
///
/// Example:
///
/// ```
/// let mut coerce = CoerceMany::new(expected_ty);
/// for expr in exprs {
/// let expr_ty = fcx.check_expr_with_expectation(expr, expected);
/// coerce.coerce(fcx, &cause, expr, expr_ty);
/// }
/// let final_ty = coerce.complete(fcx);
/// ```
pub struct CoerceMany<'tcx, 'exprs, E: AsCoercionSite> {
expected_ty: Ty<'tcx>,
final_ty: Option<Ty<'tcx>>,
expressions: Expressions<'tcx, 'exprs, E>,
pushed: usize,
}
/// The type of a `CoerceMany` that is storing up the expressions into
/// a buffer. We use this in `check/mod.rs` for things like `break`.
pub type DynamicCoerceMany<'tcx> = CoerceMany<'tcx, 'tcx, P<hir::Expr>>;
enum Expressions<'tcx, 'exprs, E: AsCoercionSite> {
Dynamic(Vec<&'tcx hir::Expr>),
UpFront(&'exprs [E]),
}
impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
/// The usual case; collect the set of expressions dynamically.
/// If the full set of coercion sites is known before hand,
/// consider `with_coercion_sites()` instead to avoid allocation.
pub fn new(expected_ty: Ty<'tcx>) -> Self {
Self::make(expected_ty, Expressions::Dynamic(vec![]))
}
/// As an optimization, you can create a `CoerceMany` with a
/// pre-existing slice of expressions. In this case, you are
/// expected to pass each element in the slice to `coerce(...)` in
/// order. This is used with arrays in particular to avoid
/// needlessly cloning the slice.
pub fn with_coercion_sites(expected_ty: Ty<'tcx>,
coercion_sites: &'exprs [E])
-> Self {
Self::make(expected_ty, Expressions::UpFront(coercion_sites))
}
fn make(expected_ty: Ty<'tcx>, expressions: Expressions<'tcx, 'exprs, E>) -> Self {
CoerceMany {
expected_ty,
final_ty: None,
expressions,
pushed: 0,
}
}
/// Returns the "expected type" with which this coercion was
/// constructed. This represents the "downward propagated" type
/// that was given to us at the start of typing whatever construct
/// we are typing (e.g., the match expression).
///
/// Typically, this is used as the expected type when
/// type-checking each of the alternative expressions whose types
/// we are trying to merge.
pub fn expected_ty(&self) -> Ty<'tcx> {
self.expected_ty
}
/// Returns the current "merged type", representing our best-guess
/// at the LUB of the expressions we've seen so far (if any). This
/// isn't *final* until you call `self.final()`, which will return
/// the merged type.
pub fn merged_ty(&self) -> Ty<'tcx> {
self.final_ty.unwrap_or(self.expected_ty)
}
/// Indicates that the value generated by `expression`, which is
/// of type `expression_ty`, is one of the possibilities that we
/// could coerce from. This will record `expression`, and later
/// calls to `coerce` may come back and add adjustments and things
/// if necessary.
pub fn coerce<'a>(
&mut self,
fcx: &FnCtxt<'a, 'tcx>,
cause: &ObligationCause<'tcx>,
expression: &'tcx hir::Expr,
expression_ty: Ty<'tcx>,
) {
self.coerce_inner(fcx,
cause,
Some(expression),
expression_ty,
None, false)
}
/// Indicates that one of the inputs is a "forced unit". This
/// occurs in a case like `if foo { ... };`, where the missing else
/// generates a "forced unit". Another example is a `loop { break;
/// }`, where the `break` has no argument expression. We treat
/// these cases slightly differently for error-reporting
/// purposes. Note that these tend to correspond to cases where
/// the `()` expression is implicit in the source, and hence we do
/// not take an expression argument.
///
/// The `augment_error` gives you a chance to extend the error
/// message, in case any results (e.g., we use this to suggest
/// removing a `;`).
pub fn coerce_forced_unit<'a>(
&mut self,
fcx: &FnCtxt<'a, 'tcx>,
cause: &ObligationCause<'tcx>,
augment_error: &mut dyn FnMut(&mut DiagnosticBuilder<'_>),
label_unit_as_expected: bool,
) {
self.coerce_inner(fcx,
cause,
None,
fcx.tcx.mk_unit(),
Some(augment_error),
label_unit_as_expected)
}
/// The inner coercion "engine". If `expression` is `None`, this
/// is a forced-unit case, and hence `expression_ty` must be
/// `Nil`.
fn coerce_inner<'a>(
&mut self,
fcx: &FnCtxt<'a, 'tcx>,
cause: &ObligationCause<'tcx>,
expression: Option<&'tcx hir::Expr>,
mut expression_ty: Ty<'tcx>,
augment_error: Option<&mut dyn FnMut(&mut DiagnosticBuilder<'_>)>,
label_expression_as_expected: bool,
) {
// Incorporate whatever type inference information we have
// until now; in principle we might also want to process
// pending obligations, but doing so should only improve
// compatibility (hopefully that is true) by helping us
// uncover never types better.
if expression_ty.is_ty_var() {
expression_ty = fcx.infcx.shallow_resolve(expression_ty);
}
// If we see any error types, just propagate that error
// upwards.
if expression_ty.references_error() || self.merged_ty().references_error() {
self.final_ty = Some(fcx.tcx.types.err);
return;
}
// Handle the actual type unification etc.
let result = if let Some(expression) = expression {
if self.pushed == 0 {
// Special-case the first expression we are coercing.
// To be honest, I'm not entirely sure why we do this.
// We don't allow two-phase borrows, see comment in try_find_coercion_lub for why
fcx.try_coerce(expression, expression_ty, self.expected_ty, AllowTwoPhase::No)
} else {
match self.expressions {
Expressions::Dynamic(ref exprs) => fcx.try_find_coercion_lub(
cause,
exprs,
self.merged_ty(),
expression,
expression_ty,
),
Expressions::UpFront(ref coercion_sites) => fcx.try_find_coercion_lub(
cause,
&coercion_sites[0..self.pushed],
self.merged_ty(),
expression,
expression_ty,
),
}
}
} else {
// this is a hack for cases where we default to `()` because
// the expression etc has been omitted from the source. An
// example is an `if let` without an else:
//
// if let Some(x) = ... { }
//
// we wind up with a second match arm that is like `_ =>
// ()`. That is the case we are considering here. We take
// a different path to get the right "expected, found"
// message and so forth (and because we know that
// `expression_ty` will be unit).
//
// Another example is `break` with no argument expression.
assert!(expression_ty.is_unit(), "if let hack without unit type");
fcx.at(cause, fcx.param_env)
.eq_exp(label_expression_as_expected, expression_ty, self.merged_ty())
.map(|infer_ok| {
fcx.register_infer_ok_obligations(infer_ok);
expression_ty
})
};
match result {
Ok(v) => {
self.final_ty = Some(v);
if let Some(e) = expression {
match self.expressions {
Expressions::Dynamic(ref mut buffer) => buffer.push(e),
Expressions::UpFront(coercion_sites) => {
// if the user gave us an array to validate, check that we got
// the next expression in the list, as expected
assert_eq!(coercion_sites[self.pushed].as_coercion_site().hir_id,
e.hir_id);
}
}
self.pushed += 1;
}
}
Err(coercion_error) => {
let (expected, found) = if label_expression_as_expected {
// In the case where this is a "forced unit", like
// `break`, we want to call the `()` "expected"
// since it is implied by the syntax.
// (Note: not all force-units work this way.)"
(expression_ty, self.final_ty.unwrap_or(self.expected_ty))
} else {
// Otherwise, the "expected" type for error
// reporting is the current unification type,
// which is basically the LUB of the expressions
// we've seen so far (combined with the expected
// type)
(self.final_ty.unwrap_or(self.expected_ty), expression_ty)
};
let mut err;
match cause.code {
ObligationCauseCode::ReturnNoExpression => {
err = struct_span_err!(
fcx.tcx.sess, cause.span, E0069,
"`return;` in a function whose return type is not `()`");
err.span_label(cause.span, "return type is not `()`");
}
ObligationCauseCode::BlockTailExpression(blk_id) => {
let parent_id = fcx.tcx.hir().get_parent_node(blk_id);
err = self.report_return_mismatched_types(
cause,
expected,
found,
coercion_error,
fcx,
parent_id,
expression.map(|expr| (expr, blk_id)),
);
}
ObligationCauseCode::ReturnValue(id) => {
err = self.report_return_mismatched_types(
cause, expected, found, coercion_error, fcx, id, None);
}
_ => {
err = fcx.report_mismatched_types(cause, expected, found, coercion_error);
}
}
if let Some(augment_error) = augment_error {
augment_error(&mut err);
}
// Error possibly reported in `check_assign` so avoid emitting error again.
err.emit_unless(expression.filter(|e| fcx.is_assign_to_bool(e, expected))
.is_some());
self.final_ty = Some(fcx.tcx.types.err);
}
}
}
fn report_return_mismatched_types<'a>(
&self,
cause: &ObligationCause<'tcx>,
expected: Ty<'tcx>,
found: Ty<'tcx>,
ty_err: TypeError<'tcx>,
fcx: &FnCtxt<'a, 'tcx>,
id: hir::HirId,
expression: Option<(&'tcx hir::Expr, hir::HirId)>,
) -> DiagnosticBuilder<'a> {
let mut err = fcx.report_mismatched_types(cause, expected, found, ty_err);
let mut pointing_at_return_type = false;
let mut return_sp = None;
// Verify that this is a tail expression of a function, otherwise the
// label pointing out the cause for the type coercion will be wrong
// as prior return coercions would not be relevant (#57664).
let parent_id = fcx.tcx.hir().get_parent_node(id);
let fn_decl = if let Some((expr, blk_id)) = expression {
pointing_at_return_type = fcx.suggest_mismatched_types_on_tail(
&mut err,
expr,
expected,
found,
cause.span,
blk_id,
);
let parent = fcx.tcx.hir().get(parent_id);
if let (Some(match_expr), true, false) = (
fcx.tcx.hir().get_match_if_cause(expr.hir_id),
expected.is_unit(),
pointing_at_return_type,
) {
if match_expr.span.desugaring_kind().is_none() {
err.span_label(match_expr.span, "expected this to be `()`");
fcx.suggest_semicolon_at_end(match_expr.span, &mut err);
}
}
fcx.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
} else {
fcx.get_fn_decl(parent_id)
};
if let (Some((fn_decl, can_suggest)), _) = (fn_decl, pointing_at_return_type) {
if expression.is_none() {
pointing_at_return_type |= fcx.suggest_missing_return_type(
&mut err, &fn_decl, expected, found, can_suggest);
}
if !pointing_at_return_type {
return_sp = Some(fn_decl.output.span()); // `impl Trait` return type
}
}
if let (Some(sp), Some(return_sp)) = (fcx.ret_coercion_span.borrow().as_ref(), return_sp) {
err.span_label(return_sp, "expected because this return type...");
err.span_label( *sp, format!(
"...is found to be `{}` here",
fcx.resolve_vars_with_obligations(expected),
));
}
err
}
pub fn complete<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Ty<'tcx> {
if let Some(final_ty) = self.final_ty {
final_ty
} else {
// If we only had inputs that were of type `!` (or no
// inputs at all), then the final type is `!`.
assert_eq!(self.pushed, 0);
fcx.tcx.types.never
}
}
}
/// Something that can be converted into an expression to which we can
/// apply a coercion.
pub trait AsCoercionSite {
fn as_coercion_site(&self) -> &hir::Expr;
}
impl AsCoercionSite for hir::Expr {
fn as_coercion_site(&self) -> &hir::Expr {
self
}
}
impl AsCoercionSite for P<hir::Expr> {
fn as_coercion_site(&self) -> &hir::Expr {
self
}
}
impl<'a, T> AsCoercionSite for &'a T
where T: AsCoercionSite
{
fn as_coercion_site(&self) -> &hir::Expr {
(**self).as_coercion_site()
}
}
impl AsCoercionSite for ! {
fn as_coercion_site(&self) -> &hir::Expr {
unreachable!()
}
}
impl AsCoercionSite for hir::Arm {
fn as_coercion_site(&self) -> &hir::Expr {
&self.body
}
}
| {
&self.fcx
} |
qiskit_conversions.py | import hashlib
from typing import Dict, Iterable, List, NamedTuple, Sequence, Tuple, Union
import numpy as np
import qiskit
import sympy
from .. import _builtin_gates, _circuit, _gates
from ..symbolic.qiskit_expressions import QISKIT_DIALECT, expression_from_qiskit
from ..symbolic.sympy_expressions import SYMPY_DIALECT, expression_from_sympy
from ..symbolic.translations import translate_expression
QiskitOperation = Tuple[
qiskit.circuit.Instruction, List[qiskit.circuit.Qubit], List[qiskit.circuit.Clbit]
]
def qiskit_qubit(index: int, num_qubits_in_circuit: int) -> qiskit.circuit.Qubit:
return qiskit.circuit.Qubit(
qiskit.circuit.QuantumRegister(num_qubits_in_circuit, "q"), index
)
def _import_qiskit_qubit(qubit: qiskit.circuit.Qubit) -> int:
return qubit.index
def _qiskit_expr_from_zquantum(expr):
intermediate = expression_from_sympy(expr)
return translate_expression(intermediate, QISKIT_DIALECT)
def _zquantum_expr_from_qiskit(expr):
intermediate = expression_from_qiskit(expr)
return translate_expression(intermediate, SYMPY_DIALECT)
ZQUANTUM_QISKIT_GATE_MAP = {
_builtin_gates.X: qiskit.circuit.library.XGate,
_builtin_gates.Y: qiskit.circuit.library.YGate,
_builtin_gates.Z: qiskit.circuit.library.ZGate,
_builtin_gates.S: qiskit.circuit.library.SGate,
_builtin_gates.T: qiskit.circuit.library.TGate,
_builtin_gates.H: qiskit.circuit.library.HGate,
_builtin_gates.I: qiskit.circuit.library.IGate,
_builtin_gates.CNOT: qiskit.circuit.library.CXGate,
_builtin_gates.CZ: qiskit.circuit.library.CZGate,
_builtin_gates.SWAP: qiskit.circuit.library.SwapGate,
_builtin_gates.ISWAP: qiskit.circuit.library.iSwapGate,
_builtin_gates.RX: qiskit.circuit.library.RXGate,
_builtin_gates.RY: qiskit.circuit.library.RYGate,
_builtin_gates.RZ: qiskit.circuit.library.RZGate,
_builtin_gates.PHASE: qiskit.circuit.library.PhaseGate,
_builtin_gates.CPHASE: qiskit.circuit.library.CPhaseGate,
_builtin_gates.XX: qiskit.circuit.library.RXXGate,
_builtin_gates.YY: qiskit.circuit.library.RYYGate,
_builtin_gates.ZZ: qiskit.circuit.library.RZZGate,
_builtin_gates.U3: qiskit.circuit.library.U3Gate,
}
def _make_gate_instance(gate_ref, gate_params) -> _gates.Gate:
"""Returns a gate instance that's applicable to qubits.
For non-parametric gate refs like X, returns just the `X`
For parametric gate factories like `RX`, returns the produced gate, like `RX(0.2)`
"""
if _gates.gate_is_parametric(gate_ref, gate_params):
return gate_ref(*gate_params)
else:
return gate_ref
def _make_controlled_gate_prototype(wrapped_gate_ref, num_control_qubits=1):
def _factory(*gate_params):
return _gates.ControlledGate(
_make_gate_instance(wrapped_gate_ref, gate_params), num_control_qubits
)
return _factory
QISKIT_ZQUANTUM_GATE_MAP = {
**{q_cls: z_ref for z_ref, q_cls in ZQUANTUM_QISKIT_GATE_MAP.items()},
qiskit.circuit.library.CSwapGate: _builtin_gates.SWAP.controlled(1),
qiskit.circuit.library.CRXGate: _make_controlled_gate_prototype(_builtin_gates.RX),
qiskit.circuit.library.CRYGate: _make_controlled_gate_prototype(_builtin_gates.RY),
qiskit.circuit.library.CRZGate: _make_controlled_gate_prototype(_builtin_gates.RZ),
}
def export_to_qiskit(circuit: _circuit.Circuit) -> qiskit.QuantumCircuit:
q_circuit = qiskit.QuantumCircuit(circuit.n_qubits)
custom_names = {
gate_def.gate_name for gate_def in circuit.collect_custom_gate_definitions()
}
q_triplets = [
_export_gate_to_qiskit(
gate_op.gate,
applied_qubit_indices=gate_op.qubit_indices,
n_qubits_in_circuit=circuit.n_qubits,
custom_names=custom_names,
)
for gate_op in circuit.operations
]
for q_gate, q_qubits, q_clbits in q_triplets:
q_circuit.append(q_gate, q_qubits, q_clbits)
return q_circuit
def _export_gate_to_qiskit(
gate, applied_qubit_indices, n_qubits_in_circuit, custom_names
):
try:
return _export_gate_via_mapping(
gate, applied_qubit_indices, n_qubits_in_circuit, custom_names
)
except ValueError:
pass
try:
return _export_controlled_gate(
gate, applied_qubit_indices, n_qubits_in_circuit, custom_names
)
except ValueError:
pass
try:
return _export_custom_gate(
gate, applied_qubit_indices, n_qubits_in_circuit, custom_names
)
except ValueError:
pass
raise NotImplementedError(f"Exporting gate {gate} to Qiskit is unsupported")
def _export_gate_via_mapping(
gate, applied_qubit_indices, n_qubits_in_circuit, custom_names
):
try:
qiskit_cls = ZQUANTUM_QISKIT_GATE_MAP[
_builtin_gates.builtin_gate_by_name(gate.name)
]
except KeyError:
raise ValueError(f"Can't export gate {gate} to Qiskit via mapping")
qiskit_params = [_qiskit_expr_from_zquantum(param) for param in gate.params]
qiskit_qubits = [
qiskit_qubit(qubit_i, n_qubits_in_circuit) for qubit_i in applied_qubit_indices
]
return qiskit_cls(*qiskit_params), qiskit_qubits, []
def _export_controlled_gate(
gate: _gates.ControlledGate,
applied_qubit_indices,
n_qubits_in_circuit,
custom_names,
):
|
def _export_custom_gate(
gate: _gates.MatrixFactoryGate,
applied_qubit_indices,
n_qubits_in_circuit,
custom_names,
):
if gate.name not in custom_names:
raise ValueError(
f"Can't export gate {gate} as a custom gate, the circuit is missing its "
"definition"
)
if gate.params:
raise ValueError(
f"Can't export parametrized gate {gate}, Qiskit doesn't support "
"parametrized custom gates"
)
# At that time of writing it Qiskit doesn't support parametrized gates defined with
# a symbolic matrix.
# See https://github.com/Qiskit/qiskit-terra/issues/4751 for more info.
qiskit_qubits = [
qiskit_qubit(qubit_i, n_qubits_in_circuit) for qubit_i in applied_qubit_indices
]
qiskit_matrix = np.array(gate.matrix)
return (
qiskit.extensions.UnitaryGate(qiskit_matrix, label=gate.name),
qiskit_qubits,
[],
)
class AnonGateOperation(NamedTuple):
gate_name: str
matrix: sympy.Matrix
qubit_indices: Tuple[int, ...]
ImportedOperation = Union[_gates.GateOperation, AnonGateOperation]
def _apply_custom_gate(
anon_op: AnonGateOperation, custom_defs_map: Dict[str, _gates.CustomGateDefinition]
) -> _gates.GateOperation:
gate_def = custom_defs_map[anon_op.gate_name]
# Qiskit doesn't support custom gates with parametrized matrices
# so we can assume empty params list.
gate_params: Tuple[sympy.Symbol, ...] = tuple()
gate = gate_def(*gate_params)
return gate(*anon_op.qubit_indices)
def import_from_qiskit(circuit: qiskit.QuantumCircuit) -> _circuit.Circuit:
q_ops = [_import_qiskit_triplet(triplet) for triplet in circuit.data]
anon_ops = [op for op in q_ops if isinstance(op, AnonGateOperation)]
# Qiskit doesn't support custom gates with parametrized matrices
# so we can assume empty params list.
params_ordering: Tuple[sympy.Symbol, ...] = tuple()
custom_defs = {
anon_op.gate_name: _gates.CustomGateDefinition(
gate_name=anon_op.gate_name,
matrix=anon_op.matrix,
params_ordering=params_ordering,
)
for anon_op in anon_ops
}
imported_ops = [
_apply_custom_gate(op, custom_defs) if isinstance(op, AnonGateOperation) else op
for op in q_ops
]
return _circuit.Circuit(
operations=imported_ops,
n_qubits=circuit.num_qubits,
)
def _import_qiskit_triplet(qiskit_triplet: QiskitOperation) -> ImportedOperation:
qiskit_op, qiskit_qubits, _ = qiskit_triplet
return _import_qiskit_op(qiskit_op, qiskit_qubits)
def _import_qiskit_op(qiskit_op, qiskit_qubits) -> ImportedOperation:
# We always wanna try importing via mapping to handle complex gate structures
# represented by a single class, like CNOT (Control + X) or CSwap (Control + Swap).
try:
return _import_qiskit_op_via_mapping(qiskit_op, qiskit_qubits)
except ValueError:
pass
try:
return _import_controlled_qiskit_op(qiskit_op, qiskit_qubits)
except ValueError:
pass
return _import_custom_qiskit_gate(qiskit_op, qiskit_qubits)
def _import_qiskit_op_via_mapping(
qiskit_gate: qiskit.circuit.Instruction,
qiskit_qubits: Iterable[qiskit.circuit.Qubit],
) -> _gates.GateOperation:
try:
gate_ref = QISKIT_ZQUANTUM_GATE_MAP[type(qiskit_gate)]
except KeyError:
raise ValueError(f"Conversion of {qiskit_gate} from Qiskit is unsupported.")
# values to consider:
# - gate matrix parameters (only parametric gates)
# - gate application indices (all gates)
zquantum_params = [
_zquantum_expr_from_qiskit(param) for param in qiskit_gate.params
]
qubit_indices = [_import_qiskit_qubit(qubit) for qubit in qiskit_qubits]
gate = _make_gate_instance(gate_ref, zquantum_params)
return _gates.GateOperation(gate=gate, qubit_indices=tuple(qubit_indices))
def _import_controlled_qiskit_op(
qiskit_gate: qiskit.circuit.ControlledGate,
qiskit_qubits: Sequence[qiskit.circuit.Qubit],
) -> _gates.GateOperation:
if not isinstance(qiskit_gate, qiskit.circuit.ControlledGate):
# Raising an exception here is redundant to the type hint, but it allows us
# to handle exporting all gates in the same way, regardless of type
raise ValueError(f"Can't import gate {qiskit_gate} as a controlled gate")
wrapped_qubits = qiskit_qubits[qiskit_gate.num_ctrl_qubits :]
wrapped_op = _import_qiskit_op(qiskit_gate.base_gate, wrapped_qubits)
qubit_indices = map(_import_qiskit_qubit, qiskit_qubits)
if isinstance(wrapped_op, _gates.GateOperation):
return wrapped_op.gate.controlled(qiskit_gate.num_ctrl_qubits)(*qubit_indices)
else:
raise NotImplementedError(
"Importing of controlled anonymous gates not yet supported."
)
def _hash_hex(bytes_):
return hashlib.sha256(bytes_).hexdigest()
def _custom_qiskit_gate_name(gate_label: str, gate_name: str, matrix: np.ndarray):
matrix_hash = _hash_hex(matrix.tobytes())
target_name = gate_label or gate_name
return f"{target_name}.{matrix_hash}"
def _import_custom_qiskit_gate(
qiskit_op: qiskit.circuit.Gate, qiskit_qubits
) -> AnonGateOperation:
value_matrix = qiskit_op.to_matrix()
return AnonGateOperation(
gate_name=_custom_qiskit_gate_name(
qiskit_op.label, qiskit_op.name, value_matrix
),
matrix=sympy.Matrix(value_matrix),
qubit_indices=tuple(_import_qiskit_qubit(qubit) for qubit in qiskit_qubits),
)
| if not isinstance(gate, _gates.ControlledGate):
# Raising an exception here is redundant to the type hint, but it allows us
# to handle exporting all gates in the same way, regardless of type
raise ValueError(f"Can't export gate {gate} as a controlled gate")
target_indices = applied_qubit_indices[gate.num_control_qubits :]
target_gate, _, _ = _export_gate_to_qiskit(
gate.wrapped_gate,
applied_qubit_indices=target_indices,
n_qubits_in_circuit=n_qubits_in_circuit,
custom_names=custom_names,
)
controlled_gate = target_gate.control(gate.num_control_qubits)
qiskit_qubits = [
qiskit_qubit(qubit_i, n_qubits_in_circuit) for qubit_i in applied_qubit_indices
]
return controlled_gate, qiskit_qubits, [] |
erfr-executor.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ============================================================================
# Erfr - One-time pad encryption tool
# Executor script
# Copyright (C) 2018 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# Website: http://www.urbanware.org
# GitHub: https://github.com/urbanware-org/erfr
# ============================================================================
import os
import sys
def | ():
from core import clap
from core import common
from core import main as core
from datetime import datetime as dt
try:
p = clap.Parser()
except Exception as e:
print "%s: error: %s" % (os.path.basename(sys.argv[0]), e)
sys.exit(1)
p.set_description("Process a parameter file generated by the main Erfr " \
"script containing encryption related information.")
p.set_epilog("Further information and usage examples can be found " \
"inside the documentation file for this script.")
# Define required arguments
p.add_predef("-a", "--action", "action to perform", "action",
["encrypt", "decrypt"], True)
p.add_avalue("-f", "--file", "parameter file to process", "file", None,
True)
# Define optional arguments (general)
p.add_switch("-h", "--help", "print this help message and exit", None,
True, False)
p.add_avalue("-s", "--suffix", "add additional suffix to the decrypted " \
"file", "suffix", None, False)
p.add_switch(None, "--version", "print the version number and exit", None,
True, False)
if len(sys.argv) == 1:
p.error("At least one required argument is missing.")
elif ("-h" in sys.argv) or ("--help" in sys.argv):
p.print_help()
sys.exit(0)
elif "--version" in sys.argv:
print core.get_version()
sys.exit(0)
args = p.parse_args()
if args.action == None:
p.error("The action argument is missing.")
elif args.action.lower() == "encrypt":
encrypt = True
elif args.action.lower() == "decrypt":
encrypt = False
else:
p.error("An unsupported action was given.")
try:
params = common.process_params(args.file)
buffer_size = params.get("buffer_size")
dev_random = params.get("dev_random")
fortuna = params.get("fortuna")
input_file = params.get("input_file")
key_file = params.get("key_file")
obfuscate_enc = params.get("obfuscate_enc")
obfuscate_key = params.get("obfuscate_key")
output_file = params.get("output_file")
overwrite = params.get("overwrite")
rotate_max = params.get("rotate_max")
rotate_min = params.get("rotate_min")
rotate_mod = params.get("rotate_mod")
rotate_step = params.get("rotate_step")
reverse_bytes = params.get("reverse_bytes")
sbox = params.get("sbox")
task_id = params.get("task_id")
use_existing_key = params.get("use_existing_key")
timestamp = dt.now()
erfr = core.ErfrCrypt()
if task_id == 0:
task_id = None
if encrypt:
if not args.suffix == None:
p.error("The argument for an additional suffix can only be " \
"used when decrypting a file.")
common.status(task_id, "encryption", "start")
erfr.encrypt_file(task_id, input_file, key_file, output_file,
buffer_size, use_existing_key, overwrite,
obfuscate_enc, obfuscate_key, fortuna,
dev_random, rotate_min, rotate_max, rotate_step,
rotate_mod, reverse_bytes, sbox)
common.status(task_id, "encryption", "finish")
else:
if not args.suffix == None:
input_file += ".%s" % args.suffix
common.status(task_id, "decryption", "start")
erfr.decrypt_file(task_id, output_file, key_file, input_file,
buffer_size, overwrite, obfuscate_enc,
obfuscate_key, rotate_min, rotate_max,
rotate_step, rotate_mod, reverse_bytes, sbox)
common.status(task_id, "decryption", "finish")
print "Elapsed time: %s" % (dt.now() - timestamp)
except Exception as e:
if encrypt:
common.status(task_id, "encryption", "cancel")
else:
common.status(task_id, "decryption", "cancel")
p.error(e)
finally:
try:
common.delete_temp_files(task_id)
except:
pass
if __name__ == "__main__":
main()
# EOF
| main |
OptimizeBuiltinCalls.py | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Optimize calls to built-in references to specific built-in calls.
For built-in name references, we check if it's one of the supported built-in
types, and then specialize for the ones, where it makes sense.
"""
from nuitka.__past__ import xrange # pylint: disable=I0021,redefined-builtin
from nuitka.Errors import NuitkaAssumptionError
from nuitka.nodes.AssignNodes import (
StatementAssignmentVariable,
StatementDelVariable,
)
from nuitka.nodes.AttributeNodes import (
ExpressionAttributeLookup,
ExpressionBuiltinGetattr,
ExpressionBuiltinHasattr,
ExpressionBuiltinSetattr,
)
from nuitka.nodes.BuiltinAllNodes import ExpressionBuiltinAll
from nuitka.nodes.BuiltinAnyNodes import ExpressionBuiltinAny
from nuitka.nodes.BuiltinComplexNodes import (
ExpressionBuiltinComplex1,
ExpressionBuiltinComplex2,
)
from nuitka.nodes.BuiltinDecodingNodes import (
ExpressionBuiltinChr,
ExpressionBuiltinOrd,
)
from nuitka.nodes.BuiltinDecoratorNodes import (
ExpressionBuiltinClassmethod,
ExpressionBuiltinStaticmethod,
)
from nuitka.nodes.BuiltinDictNodes import ExpressionBuiltinDict
from nuitka.nodes.BuiltinFormatNodes import (
ExpressionBuiltinAscii,
ExpressionBuiltinBin,
ExpressionBuiltinFormat,
ExpressionBuiltinHex,
ExpressionBuiltinId,
ExpressionBuiltinOct,
)
from nuitka.nodes.BuiltinHashNodes import ExpressionBuiltinHash
from nuitka.nodes.BuiltinIntegerNodes import (
ExpressionBuiltinInt1,
ExpressionBuiltinInt2,
)
from nuitka.nodes.BuiltinIteratorNodes import (
ExpressionBuiltinIter1,
ExpressionBuiltinIter2,
)
from nuitka.nodes.BuiltinLenNodes import ExpressionBuiltinLen
from nuitka.nodes.BuiltinNextNodes import (
ExpressionBuiltinNext1,
ExpressionBuiltinNext2,
)
from nuitka.nodes.BuiltinOpenNodes import ExpressionBuiltinOpen
from nuitka.nodes.BuiltinRangeNodes import (
ExpressionBuiltinRange1,
ExpressionBuiltinRange2,
ExpressionBuiltinRange3,
ExpressionBuiltinXrange1,
ExpressionBuiltinXrange2,
ExpressionBuiltinXrange3,
)
from nuitka.nodes.BuiltinRefNodes import (
ExpressionBuiltinAnonymousRef,
makeExpressionBuiltinTypeRef,
)
from nuitka.nodes.BuiltinSumNodes import (
ExpressionBuiltinSum1,
ExpressionBuiltinSum2,
)
from nuitka.nodes.BuiltinTypeNodes import (
ExpressionBuiltinBool,
ExpressionBuiltinBytearray1,
ExpressionBuiltinBytearray3,
ExpressionBuiltinFloat,
ExpressionBuiltinFrozenset,
ExpressionBuiltinList,
ExpressionBuiltinSet,
ExpressionBuiltinStrP2,
ExpressionBuiltinStrP3,
ExpressionBuiltinTuple,
ExpressionBuiltinUnicodeP2,
)
from nuitka.nodes.BuiltinVarsNodes import ExpressionBuiltinVars
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.ClassNodes import ExpressionBuiltinType3
from nuitka.nodes.ComparisonNodes import ExpressionComparisonIs
from nuitka.nodes.ConditionalNodes import (
ExpressionConditional,
makeStatementConditional,
)
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import makeExpressionMakeTupleOrConstant
from nuitka.nodes.ExecEvalNodes import (
ExpressionBuiltinCompile,
ExpressionBuiltinEval,
)
from nuitka.nodes.GlobalsLocalsNodes import (
ExpressionBuiltinDir1,
ExpressionBuiltinGlobals,
)
from nuitka.nodes.ImportNodes import ExpressionBuiltinImport
from nuitka.nodes.NodeMakingHelpers import (
makeConstantReplacementNode,
makeExpressionBuiltinLocals,
makeRaiseExceptionReplacementExpression,
makeRaiseExceptionReplacementExpressionFromInstance,
wrapExpressionWithSideEffects,
)
from nuitka.nodes.OperatorNodes import ExpressionOperationBinaryDivmod
from nuitka.nodes.OperatorNodesUnary import (
ExpressionOperationNot,
ExpressionOperationUnaryAbs,
ExpressionOperationUnaryRepr,
)
from nuitka.nodes.OutlineNodes import ExpressionOutlineBody
from nuitka.nodes.ReturnNodes import makeStatementReturn
from nuitka.nodes.SliceNodes import makeExpressionBuiltinSlice
from nuitka.nodes.TypeNodes import (
ExpressionBuiltinIsinstance,
ExpressionBuiltinIssubclass,
ExpressionBuiltinSuper0,
ExpressionBuiltinSuper2,
ExpressionBuiltinType1,
)
from nuitka.nodes.VariableRefNodes import (
ExpressionTempVariableRef,
ExpressionVariableRef,
)
from nuitka.PythonVersions import python_version
from nuitka.specs import BuiltinParameterSpecs
from nuitka.Tracing import optimization_logger
from nuitka.tree.ReformulationExecStatements import wrapEvalGlobalsAndLocals
from nuitka.tree.ReformulationTryFinallyStatements import (
makeTryFinallyStatement,
)
from nuitka.tree.TreeHelpers import (
makeCallNode,
makeStatementsSequence,
makeStatementsSequenceFromStatement,
)
def dir_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def buildDirEmptyCase(source_ref):
source = makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
result = makeCallNode(
ExpressionAttributeLookup(
expression=source, attribute_name="keys", source_ref=source_ref
),
source_ref,
)
# For Python3, keys doesn't really return values, but instead a handle
# only, but we want it to be a list.
if python_version >= 0x300:
result = ExpressionBuiltinList(value=result, source_ref=source_ref)
return result
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
# TODO: Needs locals_scope attached.
builtin_class=ExpressionBuiltinDir1,
builtin_spec=BuiltinParameterSpecs.builtin_dir_spec,
empty_special_class=buildDirEmptyCase,
)
def vars_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def selectVarsEmptyClass(source_ref):
return makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
# TODO: Needs locals_cope attached
builtin_class=ExpressionBuiltinVars,
builtin_spec=BuiltinParameterSpecs.builtin_vars_spec,
empty_special_class=selectVarsEmptyClass,
)
def import_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinImport,
builtin_spec=BuiltinParameterSpecs.builtin_import_spec,
)
def type_extractor(node):
args = node.subnode_args
if args is None:
iter_length = 0
else:
iter_length = args.getIterationLength()
if iter_length == 1:
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinType1,
builtin_spec=BuiltinParameterSpecs.builtin_type1_spec,
)
elif iter_length == 3:
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinType3,
builtin_spec=BuiltinParameterSpecs.builtin_type3_spec,
)
else:
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError("type() takes 1 or 3 arguments")
)
def iter_extractor(node):
def wrapIterCreation(callable_arg, sentinel, source_ref):
if sentinel is None:
return ExpressionBuiltinIter1(value=callable_arg, source_ref=source_ref)
else:
return ExpressionBuiltinIter2(
callable_arg=callable_arg, sentinel=sentinel, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapIterCreation,
builtin_spec=BuiltinParameterSpecs.builtin_iter_spec,
)
def next_extractor(node):
# Split up next with and without defaults, they are not going to behave
# really very similar.
def selectNextBuiltinClass(iterator, default, source_ref):
if default is None:
return ExpressionBuiltinNext1(value=iterator, source_ref=source_ref)
else:
return ExpressionBuiltinNext2(
iterator=iterator, default=default, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectNextBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_next_spec,
)
def sum_extractor(node):
# Split up sumwith and without start value, one is much easier.
def selectSumBuiltinClass(sequence, start, source_ref):
if start is None:
return ExpressionBuiltinSum1(sequence=sequence, source_ref=source_ref)
else:
return ExpressionBuiltinSum2(
sequence=sequence, start=start, source_ref=source_ref
)
def makeSum0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError(
"sum expected at least 1 arguments, got 0"
if python_version < 0x380
else "sum() takes at least 1 positional argument (0 given)"
),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectSumBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_sum_spec,
empty_special_class=makeSum0,
)
def dict_extractor(node):
# The "dict" built-in is a bit strange in that it accepts a position
# parameter, or not, but won't have a default value.
def wrapExpressionBuiltinDictCreation(positional_args, dict_star_arg, source_ref):
if len(positional_args) > 1:
result = makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError(
"dict expected at most 1 arguments, got %d" % (len(positional_args))
),
)
result = wrapExpressionWithSideEffects(
side_effects=positional_args, old_node=node, new_node=result
)
if dict_star_arg:
result = wrapExpressionWithSideEffects(
side_effects=dict_star_arg, old_node=node, new_node=result
)
return result
return ExpressionBuiltinDict(
pos_arg=positional_args[0] if positional_args else None,
pairs=dict_star_arg,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinDictCreation,
builtin_spec=BuiltinParameterSpecs.builtin_dict_spec,
)
def chr_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinChr,
builtin_spec=BuiltinParameterSpecs.builtin_chr_spec,
)
def ord_extractor(node):
def makeOrd0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("ord() takes exactly one argument (0 given)"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOrd,
builtin_spec=BuiltinParameterSpecs.builtin_ord_spec,
empty_special_class=makeOrd0,
)
def bin_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinBin,
builtin_spec=BuiltinParameterSpecs.builtin_bin_spec,
)
def oct_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOct,
builtin_spec=BuiltinParameterSpecs.builtin_oct_spec,
)
def hex_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinHex,
builtin_spec=BuiltinParameterSpecs.builtin_hex_spec,
)
def id_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinId,
builtin_spec=BuiltinParameterSpecs.builtin_id_spec,
)
def repr_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationUnaryRepr,
builtin_spec=BuiltinParameterSpecs.builtin_repr_spec,
)
if python_version >= 0x300:
def ascii_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAscii,
builtin_spec=BuiltinParameterSpecs.builtin_repr_spec,
)
def range_extractor(node):
def selectRangeBuiltin(low, high, step, source_ref):
if high is None:
return ExpressionBuiltinRange1(low=low, source_ref=source_ref)
elif step is None:
return ExpressionBuiltinRange2(low=low, high=high, source_ref=source_ref)
else:
return ExpressionBuiltinRange3(
low=low, high=high, step=step, source_ref=source_ref
)
def makeRange0(source_ref):
# pylint: disable=unused-argument
try:
range()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("range without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectRangeBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_range_spec,
empty_special_class=makeRange0,
)
def xrange_extractor(node):
def selectXrangeBuiltin(low, high, step, source_ref):
if high is None:
return ExpressionBuiltinXrange1(low=low, source_ref=source_ref)
elif step is None:
return ExpressionBuiltinXrange2(low=low, high=high, source_ref=source_ref)
else:
return ExpressionBuiltinXrange3(
low=low, high=high, step=step, source_ref=source_ref
)
def makeXrange0(source_ref):
# pylint: disable=unused-argument
try:
xrange()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("range without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectXrangeBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_xrange_spec,
empty_special_class=makeXrange0,
)
def len_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinLen,
builtin_spec=BuiltinParameterSpecs.builtin_len_spec,
)
def all_extractor(node):
# pylint: disable=unused-argument
def makeAll0(source_ref):
exception_message = "all() takes exactly one argument (0 given)"
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError(exception_message)
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAll,
builtin_spec=BuiltinParameterSpecs.builtin_all_spec,
empty_special_class=makeAll0,
)
def abs_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationUnaryAbs,
builtin_spec=BuiltinParameterSpecs.builtin_abs_spec,
)
def any_extractor(node):
# pylint: disable=unused-argument
def makeAny0(source_ref):
exception_message = "any() takes exactly one argument (0 given)"
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=TypeError(exception_message)
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinAny,
builtin_spec=BuiltinParameterSpecs.builtin_any_spec,
empty_special_class=makeAny0,
)
def tuple_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinTuple,
builtin_spec=BuiltinParameterSpecs.builtin_tuple_spec,
)
def list_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinList,
builtin_spec=BuiltinParameterSpecs.builtin_list_spec,
)
def set_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinSet,
builtin_spec=BuiltinParameterSpecs.builtin_set_spec,
)
def frozenset_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFrozenset,
builtin_spec=BuiltinParameterSpecs.builtin_frozenset_spec,
)
def float_extractor(node):
def makeFloat0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=float(), node=node, user_provided=False
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFloat,
builtin_spec=BuiltinParameterSpecs.builtin_float_spec,
empty_special_class=makeFloat0,
)
def complex_extractor(node):
def makeComplex0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=complex(), node=node, user_provided=False
)
def selectComplexBuiltin(real, imag, source_ref):
if imag is None:
return ExpressionBuiltinComplex1(value=real, source_ref=source_ref)
else:
return ExpressionBuiltinComplex2(
real=real, imag=imag, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectComplexBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_complex_spec,
empty_special_class=makeComplex0,
)
def str_extractor(node):
builtin_class = ExpressionBuiltinStrP2 if str is bytes else ExpressionBuiltinStrP3
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=builtin_class,
builtin_spec=builtin_class.builtin_spec,
)
if python_version < 0x300:
def unicode_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinUnicodeP2,
builtin_spec=ExpressionBuiltinUnicodeP2.builtin_spec,
)
else:
from nuitka.nodes.BuiltinTypeNodes import (
ExpressionBuiltinBytes1,
ExpressionBuiltinBytes3,
)
def bytes_extractor(node):
def makeBytes0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=bytes(), node=node, user_provided=False
)
def selectBytesBuiltin(string, encoding, errors, source_ref):
if encoding is None and errors is None:
return ExpressionBuiltinBytes1(value=string, source_ref=source_ref)
else:
return ExpressionBuiltinBytes3(
value=string,
encoding=encoding,
errors=errors,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectBytesBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_bytes_p3_spec,
empty_special_class=makeBytes0,
)
def bool_extractor(node):
def makeBool0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=bool(), node=node, user_provided=False
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinBool,
builtin_spec=BuiltinParameterSpecs.builtin_bool_spec,
empty_special_class=makeBool0,
)
def int_extractor(node):
def makeInt0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=int(), node=node, user_provided=False
)
def selectIntBuiltin(value, base, source_ref):
if base is None:
return ExpressionBuiltinInt1(value=value, source_ref=source_ref)
else:
return ExpressionBuiltinInt2(value=value, base=base, source_ref=source_ref)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectIntBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_int_spec,
empty_special_class=makeInt0,
)
if python_version < 0x300:
from nuitka.nodes.BuiltinIntegerNodes import (
ExpressionBuiltinLong1,
ExpressionBuiltinLong2,
)
def long_extractor(node):
def makeLong0(source_ref):
# pylint: disable=unused-argument
return makeConstantReplacementNode(
constant=int(), node=node, user_provided=False
)
def selectIntBuiltin(value, base, source_ref):
if base is None:
return ExpressionBuiltinLong1(value=value, source_ref=source_ref)
else:
return ExpressionBuiltinLong2(
value=value, base=base, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectIntBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_int_spec,
empty_special_class=makeLong0,
)
def globals_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinGlobals,
builtin_spec=BuiltinParameterSpecs.builtin_globals_spec,
)
def locals_extractor(node):
locals_scope = node.subnode_called.getLocalsScope()
def makeLocalsNode(source_ref):
return makeExpressionBuiltinLocals(
locals_scope=locals_scope, source_ref=source_ref
)
# Note: Locals on the module level is really globals.
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeLocalsNode,
builtin_spec=BuiltinParameterSpecs.builtin_locals_spec,
)
if python_version < 0x300:
from nuitka.nodes.ExecEvalNodes import ExpressionBuiltinExecfile
def execfile_extractor(node):
def wrapExpressionBuiltinExecfileCreation(
filename, globals_arg, locals_arg, source_ref
):
outline_body = ExpressionOutlineBody(
provider=node.getParentVariableProvider(),
name="execfile_call",
source_ref=source_ref,
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=node.getParentVariableProvider(),
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
tried = makeStatementsSequence(
statements=(
tried,
makeStatementReturn(
expression=ExpressionBuiltinExecfile(
source_code=makeCallNode(
ExpressionAttributeLookup(
expression=ExpressionBuiltinOpen(
filename=filename,
mode=makeConstantRefNode(
constant="rU", source_ref=source_ref
),
buffering=None,
source_ref=source_ref,
),
attribute_name="read",
source_ref=source_ref,
),
source_ref,
),
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
),
allow_none=False,
source_ref=source_ref,
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=outline_body,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node, |
def eval_extractor(node):
def wrapEvalBuiltin(source, globals_arg, locals_arg, source_ref):
provider = node.getParentVariableProvider()
outline_body = ExpressionOutlineBody(
provider=node.getParentVariableProvider(),
name="eval_call",
source_ref=source_ref,
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=provider,
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
# The wrapping should not relocate to the "source_ref".
assert (
globals_arg is None
or globals_ref.getSourceReference() == globals_arg.getSourceReference()
)
assert (
locals_arg is None
or locals_ref.getSourceReference() == locals_arg.getSourceReference()
)
source_variable = outline_body.allocateTempVariable(
temp_scope=None, name="source"
)
final.setChild(
"statements",
final.subnode_statements
+ (
StatementDelVariable(
variable=source_variable, tolerant=True, source_ref=source_ref
),
),
)
strip_choice = makeConstantRefNode(constant=(" \t",), source_ref=source_ref)
if python_version >= 0x300:
strip_choice = ExpressionConditional(
condition=ExpressionComparisonIs(
left=ExpressionBuiltinType1(
value=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
source_ref=source_ref,
),
right=makeExpressionBuiltinTypeRef(
builtin_name="bytes", source_ref=source_ref
),
source_ref=source_ref,
),
expression_yes=makeConstantRefNode(
constant=(b" \t",), source_ref=source_ref
),
expression_no=strip_choice,
source_ref=source_ref,
)
# Source needs some special treatment for eval, if it's a string, it
# must be stripped.
string_fixup = StatementAssignmentVariable(
variable=source_variable,
source=makeExpressionCall(
called=ExpressionAttributeLookup(
expression=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
attribute_name="strip",
source_ref=source_ref,
),
args=strip_choice, # This is a tuple
kw=None,
source_ref=source_ref,
),
source_ref=source_ref,
)
acceptable_builtin_types = [
ExpressionBuiltinAnonymousRef(builtin_name="code", source_ref=source_ref)
]
if python_version >= 0x270:
acceptable_builtin_types.append(
makeExpressionBuiltinTypeRef(
builtin_name="memoryview", source_ref=source_ref
)
)
statements = (
StatementAssignmentVariable(
variable=source_variable, source=source, source_ref=source_ref
),
makeStatementConditional(
condition=ExpressionOperationNot(
operand=ExpressionBuiltinIsinstance(
instance=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
classes=makeExpressionMakeTupleOrConstant(
elements=acceptable_builtin_types,
user_provided=True,
source_ref=source_ref,
),
source_ref=source_ref,
),
source_ref=source_ref,
),
yes_branch=string_fixup,
no_branch=None,
source_ref=source_ref,
),
makeStatementReturn(
expression=ExpressionBuiltinEval(
source_code=ExpressionTempVariableRef(
variable=source_variable, source_ref=source_ref
),
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
)
tried = makeStatementsSequence(
statements=(tried,) + statements, allow_none=False, source_ref=source_ref
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=outline_body,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapEvalBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_eval_spec,
)
if python_version >= 0x300:
from nuitka.nodes.ExecEvalNodes import ExpressionBuiltinExec
def exec_extractor(node):
def wrapExpressionBuiltinExecCreation(
source, globals_arg, locals_arg, source_ref
):
provider = node.getParentVariableProvider()
outline_body = ExpressionOutlineBody(
provider=provider, name="exec_call", source_ref=source_ref
)
globals_ref, locals_ref, tried, final = wrapEvalGlobalsAndLocals(
provider=provider,
globals_node=globals_arg,
locals_node=locals_arg,
temp_scope=outline_body.getOutlineTempScope(),
source_ref=source_ref,
)
tried = makeStatementsSequence(
statements=(
tried,
makeStatementReturn(
expression=ExpressionBuiltinExec(
source_code=source,
globals_arg=globals_ref,
locals_arg=locals_ref,
source_ref=source_ref,
),
source_ref=source_ref,
),
),
allow_none=False,
source_ref=source_ref,
)
# Hack: Allow some APIs to work already
tried.parent = outline_body
outline_body.setChild(
"body",
makeStatementsSequenceFromStatement(
statement=makeTryFinallyStatement(
provider=provider,
tried=tried,
final=final,
source_ref=source_ref,
)
),
)
return outline_body
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinExecCreation,
builtin_spec=BuiltinParameterSpecs.builtin_eval_spec,
)
def compile_extractor(node):
def wrapExpressionBuiltinCompileCreation(
source_code, filename, mode, flags, dont_inherit, optimize=None, source_ref=None
):
return ExpressionBuiltinCompile(
source_code=source_code,
filename=filename,
mode=mode,
flags=flags,
dont_inherit=dont_inherit,
optimize=optimize,
source_ref=source_ref,
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapExpressionBuiltinCompileCreation,
builtin_spec=BuiltinParameterSpecs.builtin_compile_spec,
)
def open_extractor(node):
def makeOpen0(source_ref):
# pylint: disable=unused-argument
try:
open()
except Exception as e: # We want to broad here, pylint: disable=broad-except
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node, exception=e
)
else:
raise NuitkaAssumptionError("open without argument is expected to raise")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinOpen,
builtin_spec=BuiltinParameterSpecs.builtin_open_spec,
empty_special_class=makeOpen0,
)
def super_extractor(node):
def wrapSuperBuiltin(type_arg, object_arg, source_ref):
if type_arg is None and python_version >= 0x300:
if provider.isCompiledPythonModule():
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="RuntimeError",
exception_value="super(): no arguments",
)
class_variable = provider.getVariableForReference(variable_name="__class__")
provider.trace_collection.getVariableCurrentTrace(class_variable).addUsage()
type_arg = ExpressionVariableRef(
# Ought to be already closure taken due to "super" flag in
# tree building.
variable=class_variable,
source_ref=source_ref,
)
# If we already have this as a local variable, then use that
# instead.
type_arg_owner = class_variable.getOwner()
if type_arg_owner is provider or not (
type_arg_owner.isExpressionFunctionBody()
or type_arg_owner.isExpressionClassBody()
):
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="SystemError"
if python_version < 0x331
else "RuntimeError",
exception_value="super(): __class__ cell not found",
)
if object_arg is None:
if (
provider.isExpressionGeneratorObjectBody()
or provider.isExpressionCoroutineObjectBody()
or provider.isExpressionAsyncgenObjectBody()
):
parameter_provider = provider.getParentVariableProvider()
else:
parameter_provider = provider
if parameter_provider.getParameters().getArgumentCount() == 0:
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="RuntimeError",
exception_value="super(): no arguments",
)
else:
par1_name = parameter_provider.getParameters().getArgumentNames()[0]
object_variable = provider.getVariableForReference(
variable_name=par1_name
)
provider.trace_collection.getVariableCurrentTrace(
object_variable
).addUsage()
object_arg = ExpressionVariableRef(
variable=object_variable, source_ref=source_ref
)
if not object_arg.getVariable().isParameterVariable():
return makeRaiseExceptionReplacementExpression(
expression=node,
exception_type="SystemError"
if python_version < 0x300
else "RuntimeError",
exception_value="super(): __class__ cell not found",
)
return ExpressionBuiltinSuper0(
type_arg=type_arg, object_arg=object_arg, source_ref=source_ref
)
return ExpressionBuiltinSuper2(
type_arg=type_arg, object_arg=object_arg, source_ref=source_ref
)
provider = node.getParentVariableProvider().getEntryPoint()
if not provider.isCompiledPythonModule():
provider.discardFlag("has_super")
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapSuperBuiltin,
builtin_spec=BuiltinParameterSpecs.builtin_super_spec,
)
def hasattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinHasattr(object, name, source_ref):
return ExpressionBuiltinHasattr(
expression=object, name=name, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinHasattr,
builtin_spec=BuiltinParameterSpecs.builtin_hasattr_spec,
)
def getattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinGetattr(object, name, default, source_ref):
return ExpressionBuiltinGetattr(
expression=object, name=name, default=default, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinGetattr,
builtin_spec=BuiltinParameterSpecs.builtin_getattr_spec,
)
def setattr_extractor(node):
# We need to have to builtin arguments, pylint: disable=redefined-builtin
def makeExpressionBuiltinSetattr(object, name, value, source_ref):
return ExpressionBuiltinSetattr(
expression=object, name=name, value=value, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=makeExpressionBuiltinSetattr,
builtin_spec=BuiltinParameterSpecs.builtin_setattr_spec,
)
def isinstance_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinIsinstance,
builtin_spec=BuiltinParameterSpecs.builtin_isinstance_spec,
)
def issubclass_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinIssubclass,
builtin_spec=BuiltinParameterSpecs.builtin_isinstance_spec,
)
def bytearray_extractor(node):
def makeBytearray0(source_ref):
return makeConstantRefNode(constant=bytearray(), source_ref=source_ref)
def selectNextBuiltinClass(string, encoding, errors, source_ref):
if encoding is None:
return ExpressionBuiltinBytearray1(value=string, source_ref=source_ref)
else:
return ExpressionBuiltinBytearray3(
string=string, encoding=encoding, errors=errors, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=selectNextBuiltinClass,
builtin_spec=BuiltinParameterSpecs.builtin_bytearray_spec,
empty_special_class=makeBytearray0,
)
def slice_extractor(node):
def wrapSlice(start, stop, step, source_ref):
if start is not None and stop is None:
# Default rules are strange. If one argument is given, it's the
# second one then.
stop = start
start = None
return makeExpressionBuiltinSlice(
start=start, stop=stop, step=step, source_ref=source_ref
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=wrapSlice,
builtin_spec=BuiltinParameterSpecs.builtin_slice_spec,
)
def hash_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinHash,
builtin_spec=BuiltinParameterSpecs.builtin_hash_spec,
)
def format_extractor(node):
def makeFormat0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("format() takes at least 1 argument (0 given)"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinFormat,
builtin_spec=BuiltinParameterSpecs.builtin_format_spec,
empty_special_class=makeFormat0,
)
def staticmethod_extractor(node):
def makeStaticmethod0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("staticmethod expected 1 arguments, got 0"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinStaticmethod,
builtin_spec=BuiltinParameterSpecs.builtin_staticmethod_spec,
empty_special_class=makeStaticmethod0,
)
def classmethod_extractor(node):
def makeStaticmethod0(source_ref):
# pylint: disable=unused-argument
return makeRaiseExceptionReplacementExpressionFromInstance(
expression=node,
exception=TypeError("classmethod expected 1 arguments, got 0"),
)
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionBuiltinClassmethod,
builtin_spec=BuiltinParameterSpecs.builtin_classmethod_spec,
empty_special_class=makeStaticmethod0,
)
def divmod_extractor(node):
return BuiltinParameterSpecs.extractBuiltinArgs(
node=node,
builtin_class=ExpressionOperationBinaryDivmod,
builtin_spec=BuiltinParameterSpecs.builtin_divmod_spec,
)
_dispatch_dict = {
"compile": compile_extractor,
"globals": globals_extractor,
"locals": locals_extractor,
"eval": eval_extractor,
"dir": dir_extractor,
"vars": vars_extractor,
"__import__": import_extractor,
"chr": chr_extractor,
"ord": ord_extractor,
"bin": bin_extractor,
"oct": oct_extractor,
"hex": hex_extractor,
"id": id_extractor,
"type": type_extractor,
"iter": iter_extractor,
"next": next_extractor,
"sum": sum_extractor,
"tuple": tuple_extractor,
"list": list_extractor,
"dict": dict_extractor,
"set": set_extractor,
"frozenset": frozenset_extractor,
"float": float_extractor,
"complex": complex_extractor,
"str": str_extractor,
"bool": bool_extractor,
"int": int_extractor,
"repr": repr_extractor,
"len": len_extractor,
"any": any_extractor,
"abs": abs_extractor,
"all": all_extractor,
"super": super_extractor,
"hasattr": hasattr_extractor,
"getattr": getattr_extractor,
"setattr": setattr_extractor,
"isinstance": isinstance_extractor,
"issubclass": issubclass_extractor,
"bytearray": bytearray_extractor,
"slice": slice_extractor,
"hash": hash_extractor,
"format": format_extractor,
"open": open_extractor,
"staticmethod": staticmethod_extractor,
"classmethod": classmethod_extractor,
"divmod": divmod_extractor,
}
if python_version < 0x300:
# These are not in Python3
_dispatch_dict["long"] = long_extractor
_dispatch_dict["unicode"] = unicode_extractor
_dispatch_dict["execfile"] = execfile_extractor
_dispatch_dict["xrange"] = xrange_extractor
_dispatch_dict["range"] = range_extractor
else:
# This one is not in Python2:
_dispatch_dict["bytes"] = bytes_extractor
_dispatch_dict["ascii"] = ascii_extractor
_dispatch_dict["exec"] = exec_extractor
# The Python3 range is really an xrange, use that.
_dispatch_dict["range"] = xrange_extractor
def check():
from nuitka.Builtins import builtin_names
for builtin_name in _dispatch_dict:
assert builtin_name in builtin_names, builtin_name
check()
_builtin_ignore_list = (
# Not supporting 'print', because it could be replaced, and is not
# worth the effort yet.
"print",
# TODO: This could, and should be supported, as we could e.g. lower
# types easily for it.
"sorted",
# TODO: This would be very worthwhile, as it could easily optimize
# its iteration away.
"zip",
# TODO: This would be most precious due to the type hint it gives
"enumerate",
# TODO: Also worthwhile for known values.
"reversed",
# TODO: Not sure what this really is about.
"memoryview",
)
def _describeNewNode(builtin_name, inspect_node):
"""Describe the change for better understanding."""
# Don't mention side effects, that's not what we care about.
if inspect_node.isExpressionSideEffects():
inspect_node = inspect_node.subnode_expression
if inspect_node.isExpressionBuiltinImport():
tags = "new_import"
message = """\
Replaced dynamic "__import__" call with static built-in call."""
elif inspect_node.isExpressionBuiltin() or inspect_node.isStatementExec():
tags = "new_builtin"
message = "Replaced call to built-in '%s' with built-in call '%s'." % (
builtin_name,
inspect_node.kind,
)
elif inspect_node.isExpressionRaiseException():
tags = "new_raise"
message = """\
Replaced call to built-in '%s' with exception raise.""" % (
builtin_name,
)
elif inspect_node.isExpressionOperationBinary():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with binary operation '%s'.""" % (
builtin_name,
inspect_node.getOperator(),
)
elif inspect_node.isExpressionOperationUnary():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with unary operation '%s'.""" % (
builtin_name,
inspect_node.getOperator(),
)
elif inspect_node.isExpressionCall():
tags = "new_expression"
message = """\
Replaced call to built-in '%s' with call.""" % (
builtin_name,
)
elif inspect_node.isExpressionOutlineBody():
tags = "new_expression"
message = (
"""\
Replaced call to built-in '%s' with outlined call."""
% builtin_name
)
elif inspect_node.isExpressionConstantRef():
tags = "new_expression"
message = (
"""\
Replaced call to built-in '%s' with constant value."""
% builtin_name
)
else:
assert False, (builtin_name, "->", inspect_node)
return tags, message
def computeBuiltinCall(builtin_name, call_node):
# There is some dispatching for how to output various types of changes,
# with lots of cases.
if builtin_name in _dispatch_dict:
new_node = _dispatch_dict[builtin_name](call_node)
assert new_node is not call_node, builtin_name
assert new_node is not None, builtin_name
# For traces, we are going to ignore side effects, and output traces
# only based on the basis of it.
tags, message = _describeNewNode(builtin_name, new_node)
return new_node, tags, message
else:
if False and builtin_name not in _builtin_ignore_list:
optimization_logger.warning(
"Not handling built-in %r, consider support." % builtin_name
)
return call_node, None, None | builtin_class=wrapExpressionBuiltinExecfileCreation,
builtin_spec=BuiltinParameterSpecs.builtin_execfile_spec,
)
|
config.py | from os.path import abspath, dirname, join
from os import environ, path
_cwd = dirname(abspath(__file__))
basedir = path.abspath(path.dirname(__file__))
class BaseConfiguration(object):
| DEBUG = True
SECRET_KEY = 'Test'
CORS = ["http://localhost:4200", "http://127.0.0.1:5000"] |
|
service.rs | use super::{
retries::{FixedRetryPolicy, RetryLogic},
Batch, BatchServiceSink,
};
use crate::buffers::Acker;
use futures::Poll;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use std::time::Duration;
use tower::{
layer::{util::Stack, Layer},
limit::{concurrency::ConcurrencyLimit, rate::RateLimit},
retry::Retry,
timeout::Timeout,
util::BoxService,
Service, ServiceBuilder,
};
pub type TowerBatchedSink<T, L, B, S> =
BatchServiceSink<T, ConcurrencyLimit<RateLimit<Retry<FixedRetryPolicy<L>, Timeout<S>>>>, B>;
pub trait ServiceBuilderExt<L> {
fn map<R1, R2, F>(self, f: F) -> ServiceBuilder<Stack<MapLayer<R1, R2>, L>>
where
F: Fn(R1) -> R2 + Send + Sync + 'static;
fn settings<RL, Request>(
self,
settings: TowerRequestSettings,
retry_logic: RL,
) -> ServiceBuilder<Stack<TowerRequestLayer<RL, Request>, L>>;
}
impl<L> ServiceBuilderExt<L> for ServiceBuilder<L> {
fn map<R1, R2, F>(self, f: F) -> ServiceBuilder<Stack<MapLayer<R1, R2>, L>>
where
F: Fn(R1) -> R2 + Send + Sync + 'static,
{
self.layer(MapLayer { f: Arc::new(f) })
}
fn settings<RL, Request>(
self,
settings: TowerRequestSettings,
retry_logic: RL,
) -> ServiceBuilder<Stack<TowerRequestLayer<RL, Request>, L>> {
self.layer(TowerRequestLayer {
settings,
retry_logic,
_pd: std::marker::PhantomData,
})
}
}
/// Tower Request based configuration
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)]
pub struct TowerRequestConfig {
pub in_flight_limit: Option<usize>, // 5
pub timeout_secs: Option<u64>, // 60
pub rate_limit_duration_secs: Option<u64>, // 1
pub rate_limit_num: Option<u64>, // 5
pub retry_attempts: Option<usize>, // max_value()
pub retry_max_duration_secs: Option<u64>,
pub retry_initial_backoff_secs: Option<u64>, // 1
}
impl TowerRequestConfig {
pub fn unwrap_with(&self, defaults: &TowerRequestConfig) -> TowerRequestSettings {
TowerRequestSettings {
in_flight_limit: self
.in_flight_limit
.or(defaults.in_flight_limit)
.unwrap_or(5),
timeout: Duration::from_secs(self.timeout_secs.or(defaults.timeout_secs).unwrap_or(60)),
rate_limit_duration: Duration::from_secs(
self.rate_limit_duration_secs
.or(defaults.rate_limit_duration_secs)
.unwrap_or(1),
),
rate_limit_num: self.rate_limit_num.or(defaults.rate_limit_num).unwrap_or(5),
retry_attempts: self
.retry_attempts
.or(defaults.retry_attempts)
.unwrap_or(usize::max_value()),
retry_max_duration_secs: Duration::from_secs(
self.retry_max_duration_secs
.or(defaults.retry_max_duration_secs)
.unwrap_or(3600),
),
retry_initial_backoff_secs: Duration::from_secs(
self.retry_initial_backoff_secs
.or(defaults.retry_initial_backoff_secs)
.unwrap_or(1),
),
}
}
}
#[derive(Debug, Clone)]
pub struct TowerRequestSettings {
pub in_flight_limit: usize,
pub timeout: Duration,
pub rate_limit_duration: Duration,
pub rate_limit_num: u64,
pub retry_attempts: usize,
pub retry_max_duration_secs: Duration,
pub retry_initial_backoff_secs: Duration,
}
impl TowerRequestSettings {
pub fn retry_policy<L: RetryLogic>(&self, logic: L) -> FixedRetryPolicy<L> {
FixedRetryPolicy::new(
self.retry_attempts,
self.retry_initial_backoff_secs,
self.retry_max_duration_secs,
logic,
)
}
pub fn batch_sink<B, L, S, T>(
&self,
retry_logic: L,
service: S,
acker: Acker,
) -> TowerBatchedSink<T, L, B, S>
// Would like to return `impl Sink + SinkExt<T>` here, but that
// doesn't work with later calls to `batched_with_min` etc (via
// `trait SinkExt` above), as it is missing a bound on the
// associated types that cannot be expressed in stable Rust.
where
L: RetryLogic<Error = S::Error, Response = S::Response>,
S: Clone + Service<T>,
S::Error: 'static + std::error::Error + Send + Sync,
S::Response: std::fmt::Debug,
T: Clone,
B: Batch<Output = T>,
{
let policy = self.retry_policy(retry_logic);
let service = ServiceBuilder::new()
.concurrency_limit(self.in_flight_limit)
.rate_limit(self.rate_limit_num, self.rate_limit_duration)
.retry(policy)
.timeout(self.timeout)
.service(service);
BatchServiceSink::new(service, acker)
}
}
#[derive(Debug, Clone)]
pub struct TowerRequestLayer<L, Request> {
settings: TowerRequestSettings,
retry_logic: L,
_pd: std::marker::PhantomData<Request>,
}
impl<S, L, Request> tower::layer::Layer<S> for TowerRequestLayer<L, Request>
where
S: Service<Request> + Send + Clone + 'static,
S::Response: Send + 'static,
S::Error: std::error::Error + Send + Sync + 'static,
S::Future: Send + 'static,
L: RetryLogic<Response = S::Response, Error = S::Error> + Send + 'static,
Request: Clone + Send + 'static,
{
type Service = BoxService<Request, S::Response, crate::Error>;
fn layer(&self, inner: S) -> Self::Service {
let policy = self.settings.retry_policy(self.retry_logic.clone());
let l = ServiceBuilder::new()
.concurrency_limit(self.settings.in_flight_limit)
.rate_limit(
self.settings.rate_limit_num,
self.settings.rate_limit_duration,
)
.retry(policy)
.timeout(self.settings.timeout)
.service(inner);
BoxService::new(l)
}
}
pub struct MapLayer<R1, R2> {
f: Arc<dyn Fn(R1) -> R2 + Send + Sync + 'static>,
}
impl<S, R1, R2> Layer<S> for MapLayer<R1, R2>
where
S: Service<R2>,
{
type Service = Map<S, R1, R2>;
fn | (&self, inner: S) -> Self::Service {
Map {
f: self.f.clone(),
inner,
}
}
}
pub struct Map<S, R1, R2> {
f: Arc<dyn Fn(R1) -> R2 + Send + Sync + 'static>,
inner: S,
}
impl<S, R1, R2> Service<R1> for Map<S, R1, R2>
where
S: Service<R2>,
crate::Error: From<S::Error>,
{
type Response = S::Response;
type Error = crate::Error;
type Future = futures::future::MapErr<S::Future, fn(S::Error) -> crate::Error>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready().map_err(Into::into)
}
fn call(&mut self, req: R1) -> Self::Future {
let req = (self.f)(req);
use futures::Future;
self.inner.call(req).map_err(|e| e.into())
}
}
impl<S: Clone, R1, R2> Clone for Map<S, R1, R2> {
fn clone(&self) -> Self {
Self {
f: self.f.clone(),
inner: self.inner.clone(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::Future;
use std::sync::Arc;
use tokio01_test::{assert_ready, task::MockTask};
use tower::layer::Layer;
use tower_test::{assert_request_eq, mock};
#[test]
fn map() {
let mut task = MockTask::new();
let (mock, mut handle) = mock::pair();
let f = |r| r;
let map_layer = MapLayer { f: Arc::new(f) };
let mut svc = map_layer.layer(mock);
task.enter(|| assert_ready!(svc.poll_ready()));
let res = svc.call("hello world");
assert_request_eq!(handle, "hello world").send_response("world bye");
res.wait().unwrap();
}
}
| layer |
parser.go | package dbc
import (
"bytes"
"fmt"
"math"
"strconv"
"strings"
"text/scanner"
"unicode/utf8"
)
const defaultScannerMode = scanner.ScanIdents | scanner.ScanFloats
const (
defaultWhitespace = scanner.GoWhitespace
significantNewline = defaultWhitespace & ^uint64(1<<'\n')
significantTab = defaultWhitespace & ^uint64(1<<'\t')
)
type token struct {
typ rune
pos scanner.Position
txt string
}
type Parser struct {
sc scanner.Scanner
curr token
lookahead token
hasLookahead bool
data []byte
defs []Def
}
func NewParser(filename string, data []byte) *Parser {
p := &Parser{data: data}
p.sc.Init(bytes.NewReader(data))
p.sc.Mode = defaultScannerMode
p.sc.Whitespace = defaultWhitespace
p.sc.Filename = filename
p.sc.Error = func(sc *scanner.Scanner, msg string) {
p.failf(sc.Pos(), msg)
}
return p
}
func (p *Parser) Defs() []Def {
return p.defs
}
func (p *Parser) File() *File {
return &File{
Name: p.sc.Filename,
Data: p.data,
Defs: p.defs,
}
}
func (p *Parser) Parse() (err Error) {
defer func() {
if r := recover(); r != nil {
// recover from parse errors only
if errParse, ok := r.(*parseError); ok {
err = errParse
} else {
panic(r)
}
}
}()
for p.peekToken().typ != scanner.EOF {
var def Def
switch p.peekKeyword() {
case KeywordVersion:
def = &VersionDef{}
case KeywordBitTiming:
def = &BitTimingDef{}
case KeywordNewSymbols:
def = &NewSymbolsDef{}
case KeywordNodes:
def = &NodesDef{}
case KeywordMessage:
def = &MessageDef{}
case KeywordSignal:
def = &SignalDef{}
case KeywordSignalMultiplexValue:
def = &SignalMultiplexValueDef{}
case KeywordEnvironmentVariable:
def = &EnvironmentVariableDef{}
case KeywordComment:
def = &CommentDef{}
case KeywordAttribute:
def = &AttributeDef{}
case KeywordAttributeDefault:
def = &AttributeDefaultValueDef{}
case KeywordAttributeValue:
def = &AttributeValueForObjectDef{}
case KeywordValueDescriptions:
def = &ValueDescriptionsDef{}
case KeywordValueTable:
def = &ValueTableDef{}
case KeywordSignalValueType:
def = &SignalValueTypeDef{}
case KeywordMessageTransmitters:
def = &MessageTransmittersDef{}
case KeywordEnvironmentVariableData:
def = &EnvironmentVariableDataDef{}
default:
def = &UnknownDef{}
}
def.parseFrom(p)
p.defs = append(p.defs, def)
}
return nil
}
func (p *Parser) failf(pos scanner.Position, format string, a ...interface{}) {
panic(&parseError{pos: pos, reason: fmt.Sprintf(format, a...)})
}
//
// Whitespace
//
func (p *Parser) useWhitespace(whitespace uint64) {
p.sc.Whitespace = whitespace
}
//
// Characters
//
func (p *Parser) nextRune() rune {
if p.hasLookahead {
if utf8.RuneCountInString(p.lookahead.txt) > 1 {
p.failf(p.lookahead.pos, "cannot get next rune when lookahead contains a token")
}
p.hasLookahead = false
r, _ := utf8.DecodeRuneInString(p.lookahead.txt)
return r
}
return p.sc.Next()
}
func (p *Parser) peekRune() rune {
if p.hasLookahead {
if utf8.RuneCountInString(p.lookahead.txt) > 1 {
p.failf(p.lookahead.pos, "cannot peek next rune when lookahead contains a token")
}
r, _ := utf8.DecodeRuneInString(p.lookahead.txt)
return r
}
return p.sc.Peek()
}
func (p *Parser) discardLine() {
p.useWhitespace(significantNewline)
defer p.useWhitespace(defaultWhitespace)
for p.nextToken().typ != '\n' && p.nextToken().typ != scanner.EOF {
// skip all non-newline tokens
}
}
//
// Tokens
//
func (p *Parser) nextToken() token {
if p.hasLookahead {
p.hasLookahead = false
p.curr = p.lookahead
return p.lookahead
}
p.curr = token{typ: p.sc.Scan(), pos: p.sc.Position, txt: p.sc.TokenText()}
return p.curr
}
func (p *Parser) peekToken() token {
if p.hasLookahead {
return p.lookahead
}
p.hasLookahead = true
p.lookahead = token{typ: p.sc.Scan(), pos: p.sc.Position, txt: p.sc.TokenText()}
return p.lookahead
}
//
// Data types
//
// string parses a string that may contain newlines.
func (p *Parser) string() string {
tok := p.nextToken()
if tok.typ != '"' {
p.failf(tok.pos, `expected token "`)
}
var b strings.Builder
ReadLoop:
for {
switch r := p.nextRune(); r {
case scanner.EOF:
p.failf(tok.pos, "unterminated string")
case '"':
break ReadLoop
case '\n':
if _, err := b.WriteRune(' '); err != nil {
p.failf(tok.pos, err.Error())
}
case '\\':
if p.peekRune() == '"' {
_ = p.nextRune() // include escaped quotes in string
if _, err := b.WriteString(`\"`); err != nil {
p.failf(tok.pos, err.Error())
}
continue
}
fallthrough
default:
if _, err := b.WriteRune(r); err != nil {
p.failf(tok.pos, err.Error())
}
}
}
return b.String()
}
func (p *Parser) identifier() Identifier {
tok := p.nextToken()
if tok.typ != scanner.Ident {
p.failf(tok.pos, "expected ident")
}
id := Identifier(tok.txt)
if err := id.Validate(); err != nil {
p.failf(tok.pos, err.Error())
}
return id
}
func (p *Parser) stringIdentifier() Identifier {
tok := p.peekToken()
id := Identifier(p.string())
if err := id.Validate(); err != nil {
p.failf(tok.pos, err.Error())
}
return id
}
func (p *Parser) keyword(kw Keyword) token {
if p.peekKeyword() != kw {
p.failf(p.peekToken().pos, "expected keyword: %v", kw)
}
return p.nextToken()
}
func (p *Parser) peekKeyword() Keyword {
tok := p.peekToken()
if tok.typ != scanner.Ident {
p.failf(p.peekToken().pos, "expected ident")
}
return Keyword(tok.txt)
}
func (p *Parser) token(typ rune) {
if tok := p.nextToken(); tok.typ != typ {
p.failf(
p.peekToken().pos,
"expected token: %v, found: %v (%v)",
scanner.TokenString(typ),
scanner.TokenString(tok.typ),
tok.txt,
)
}
}
func (p *Parser) optionalToken(typ rune) {
if p.peekToken().typ == typ {
p.token(typ)
}
}
func (p *Parser) enumValue(values []string) string {
tok := p.peekToken()
if tok.typ == scanner.Int {
// SPECIAL-CASE: Enum values by index encountered in the wild
i := p.uint()
if i >= uint64(len(values)) {
p.failf(tok.pos, "enum index out of bounds")
}
return values[i]
}
return p.string()
}
func (p *Parser) float() float64 {
var isNegative bool
if p.peekToken().typ == '-' {
p.token('-')
isNegative = true
}
tok := p.nextToken()
if tok.typ != scanner.Int && tok.typ != scanner.Float {
p.failf(p.peekToken().pos, "expected int or float")
}
f, err := strconv.ParseFloat(tok.txt, 64)
if err != nil {
p.failf(tok.pos, "invalid float")
}
if isNegative {
f *= -1
}
return f
}
func (p *Parser) int() int64 {
var isNegative bool
if p.peekToken().typ == '-' {
p.token('-')
isNegative = true
}
tok := p.nextToken()
if tok.typ != scanner.Int && tok.typ != scanner.Float {
p.failf(tok.pos, "expected int or float")
}
f, err := strconv.ParseFloat(tok.txt, 64)
if err != nil {
p.failf(tok.pos, "invalid int")
}
i := int64(f)
if f > math.MaxInt64 {
i = math.MaxInt64
} else if f < math.MinInt64 {
i = math.MinInt64
}
if isNegative {
i *= -1
}
return i
}
func (p *Parser) uint() uint64 {
tok := p.nextToken()
if tok.typ != scanner.Int {
p.failf(tok.pos, "expected int")
}
i, err := strconv.ParseUint(tok.txt, 10, 64)
if err != nil {
p.failf(tok.pos, "invalid uint")
}
return i
}
func (p *Parser) intInRange(min, max int) int {
var isNegative bool
if p.peekToken().typ == '-' {
p.token('-')
isNegative = true
} | if err != nil {
p.failf(tok.pos, "invalid int")
}
if isNegative {
i *= -1
}
if i < min || i > max {
p.failf(tok.pos, "invalid value")
}
return i
}
func (p *Parser) optionalUint() uint64 {
if p.peekToken().typ != scanner.Int {
return 0
}
tok := p.nextToken()
i, err := strconv.ParseUint(tok.txt, 10, 64)
if err != nil {
p.failf(tok.pos, "invalid uint")
}
return i
}
func (p *Parser) anyOf(tokenTypes ...rune) rune {
tok := p.nextToken()
for _, tokenType := range tokenTypes {
if tok.typ == tokenType {
return tok.typ
}
}
p.failf(tok.pos, "unexpected token")
return 0
}
func (p *Parser) optionalObjectType() ObjectType {
tok := p.peekToken()
if tok.typ != scanner.Ident {
return ObjectTypeUnspecified
}
objectType := ObjectType(p.identifier())
if err := objectType.Validate(); err != nil {
p.failf(tok.pos, err.Error())
}
return objectType
}
func (p *Parser) messageID() MessageID {
tok := p.peekToken()
messageID := MessageID(p.uint())
if err := messageID.Validate(); err != nil {
p.failf(tok.pos, err.Error())
}
return messageID
}
func (p *Parser) signalValueType() SignalValueType {
tok := p.peekToken()
signalValueType := SignalValueType(p.uint())
if err := signalValueType.Validate(); err != nil {
p.failf(tok.pos, err.Error())
}
return signalValueType
}
func (p *Parser) environmentVariableType() EnvironmentVariableType {
tok := p.peekToken()
environmentVariableType := EnvironmentVariableType(p.uint())
if err := environmentVariableType.Validate(); err != nil {
p.failf(tok.pos, err.Error())
}
return environmentVariableType
}
func (p *Parser) attributeValueType() AttributeValueType {
tok := p.peekToken()
attributeValueType := AttributeValueType(p.identifier())
if err := attributeValueType.Validate(); err != nil {
p.failf(tok.pos, err.Error())
}
return attributeValueType
}
func (p *Parser) accessType() AccessType {
tok := p.peekToken()
accessType := AccessType(p.identifier())
if err := accessType.Validate(); err != nil {
p.failf(tok.pos, "invalid access type")
}
return accessType
} | tok := p.nextToken()
i, err := strconv.Atoi(tok.txt) |
pstats.py | """Class for printing reports on profiled python code."""
# Class for printing reports on profiled python code. rev 1.0 4/1/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
#
# see profile.doc and profile.py for more info.
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import os
import time
import marshal
import re
__all__ = ["Stats"]
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarily many file names as arguments.
All the print methods now take an argument that indicates how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the available lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additional options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of quoted
strings to select the sort order. For example sort_stats('time', 'name')
sorts on the major key of "internal function time", and on the minor
key of 'the name of the function'. Look at the two tables in sort_stats()
and get_sort_arg_defs(self) for more examples.
All methods now return "self", so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args):
if not len(args):
arg = None
else:
arg = args[0]
args = args[1:]
self.init(arg)
apply(self.add, args)
def init(self, arg):
self.all_callees = None # calc only if needed
self.files = []
self.fcn_list = None
self.total_tt = 0
self.total_calls = 0
self.prim_calls = 0
self.max_name_len = 0
self.top_level = {}
self.stats = {}
self.sort_arg_dict = {}
self.load_stats(arg)
trouble = 1
try:
self.get_top_level_stats()
trouble = 0
finally:
if trouble:
print "Invalid timing data",
if self.files: print self.files[-1],
print
def load_stats(self, arg):
if not arg: self.stats = {}
elif type(arg) == type(""):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats[8]) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError, "Cannot create or construct a " \
+ `self.__class__` \
+ " object from '" + `arg` + "'"
return
def get_top_level_stats(self):
for func, (cc, nc, tt, ct, callers) in self.stats.items():
self.total_calls += nc
self.prim_calls += cc
self.total_tt += tt
if callers.has_key(("jprofile", 0, "profiler")):
self.top_level[func] = None
if len(func_std_string(func)) > self.max_name_len:
self.max_name_len = len(func_std_string(func))
def add(self, *arg_list):
if not arg_list: return self
if len(arg_list) > 1: apply(self.add, arg_list[1:])
other = arg_list[0]
if type(self) != type(other) or self.__class__ != other.__class__:
other = Stats(other)
self.files += other.files
self.total_calls += other.total_calls
self.prim_calls += other.prim_calls
self.total_tt += other.total_tt
for func in other.top_level.keys():
self.top_level[func] = None
if self.max_name_len < other.max_name_len:
self.max_name_len = other.max_name_len
self.fcn_list = None
for func in other.stats.keys():
if self.stats.has_key(func):
old_func_stat = self.stats[func]
else:
old_func_stat = (0, 0, 0, 0, {},)
self.stats[func] = add_func_stats(old_func_stat, other.stats[func])
return self
# list the tuple indices and directions for sorting,
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
"pcalls" : (((0,-1), ), "call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
bad_list = {}
for word in self.sort_arg_dict_default.keys():
fragment = word
while fragment:
if not fragment:
break
if dict.has_key(fragment):
bad_list[fragment] = 0
break
dict[fragment] = self.sort_arg_dict_default[word]
fragment = fragment[:-1]
for word in bad_list.keys():
del dict[word]
return self.sort_arg_dict
def sort_stats(self, *field):
if not field:
self.fcn_list = 0
return self
if len(field) == 1 and type(field[0]) == type(1):
# Be compatible with old profiler
field = [ {-1: "stdname",
0:"calls",
1:"time",
2: "cumulative" } [ field[0] ] ]
sort_arg_defs = self.get_sort_arg_defs()
sort_tuple = ()
self.sort_type = ""
connector = ""
for word in field:
sort_tuple = sort_tuple + sort_arg_defs[word][0]
self.sort_type += connector + sort_arg_defs[word][1]
connector = ", "
stats_list = []
for func in self.stats.keys():
cc, nc, tt, ct, callers = self.stats[func]
stats_list.append((cc, nc, tt, ct) + func +
(func_std_string(func), func))
stats_list.sort(TupleComp(sort_tuple).compare)
self.fcn_list = fcn_list = []
for tuple in stats_list:
fcn_list.append(tuple[-1])
return self
def reverse_order(self):
if self.fcn_list:
self.fcn_list.reverse()
return self
def strip_dirs(self):
oldstats = self.stats
self.stats = newstats = {}
max_name_len = 0
for func in oldstats.keys():
cc, nc, tt, ct, callers = oldstats[func]
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2 in callers.keys():
newcallers[func_strip_path(func2)] = callers[func2]
if newstats.has_key(newfunc):
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = self.top_level
self.top_level = new_top = {}
for func in old_top.keys():
new_top[func_strip_path(func)] = None
self.max_name_len = max_name_len
self.fcn_list = None
self.all_callees = None
return self
def calc_callees(self):
if self.all_callees: return
self.all_callees = all_callees = {}
for func in self.stats.keys():
if not all_callees.has_key(func):
all_callees[func] = {}
cc, nc, tt, ct, callers = self.stats[func]
for func2 in callers.keys():
if not all_callees.has_key(func2):
all_callees[func2] = {}
all_callees[func2][func] = callers[func2]
return
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
def eval_print_amount(self, sel, list, msg):
new_list = list
if type(sel) == type(""):
new_list = []
for func in list:
if re.search(sel, func_std_string(func)):
new_list.append(func)
else:
count = len(list)
if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
count = int(count * sel + .5)
new_list = list[:count]
elif type(sel) == type(1) and 0 <= sel < count:
count = sel
new_list = list[:count]
if len(list) != len(new_list):
msg = msg + " List reduced from " + `len(list)` \
+ " to " + `len(new_list)` + \
" due to restriction <" + `sel` + ">\n"
return new_list, msg
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
list = self.fcn_list[:]
msg = " Ordered by: " + self.sort_type + '\n'
else:
list = self.stats.keys()
msg = " Random listing order was used\n"
for selection in sel_list:
list, msg = self.eval_print_amount(selection, list, msg)
count = len(list)
if not list:
return 0, list
print msg
if count < len(self.stats):
width = 0
for func in list:
if len(func_std_string(func)) > width:
width = len(func_std_string(func))
return width+2, list
def print_stats(self, *amount):
for filename in self.files:
print filename
if self.files: print
indent = ' ' * 8
for func in self.top_level.keys():
print indent, func_get_function_name(func)
print indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print "(%d primitive calls)" % self.prim_calls,
print "in %.3f CPU seconds" % self.total_tt
print
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print
print
return self
def print_callees(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.calc_callees()
self.print_call_heading(width, "called...")
for func in list:
if self.all_callees.has_key(func):
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
print
print
return self
def print_callers(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.print_call_heading(width, "was called by...")
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers)
print
print
return self
def print_call_heading(self, name_size, column_title):
print "Function ".ljust(name_size) + column_title
def print_call_line(self, name_size, source, call_dict):
print func_std_string(source).ljust(name_size),
if not call_dict:
print "--"
return
clist = call_dict.keys()
clist.sort()
name_size = name_size + 1
indent = ""
for func in clist:
name = func_std_string(func)
print indent*name_size + name + '(' \
+ `call_dict[func]`+')', \
f8(self.stats[func][3])
indent = " "
def print_title(self):
print ' ncalls tottime percall cumtime percall', \
'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
print c.rjust(9),
print f8(tt),
if nc == 0:
print ' '*8,
else:
print f8(tt/nc),
print f8(ct),
if cc == 0:
print ' '*8,
else:
print f8(ct/cc),
print func_std_string(func)
def ignore(self):
# Deprecated since 1.5.1 -- see the docs.
pass # has no return value, so use at end of line :-)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indices (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
def compare (self, left, right):
for index, direction in self.comp_select_list:
l = left[index]
r = right[index]
if l < r:
return -direction
if l > r:
return direction
return 0
#**************************************************************************
# func_name is a triple (file:string, line:int, name:string)
def func_strip_path(func_name):
file, line, name = func_name
return os.path.basename(file), line, name
def func_get_function_name(func):
return func[2]
def func_std_string(func_name): # match what old profile produced
return "%s:%d(%s)" % func_name
#**************************************************************************
# The following functions combine statists for pairs functions.
# The bulk of the processing involves correctly handling "call" lists,
# such as callers and callees.
#**************************************************************************
def add_func_stats(target, source):
|
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func in target.keys():
new_callers[func] = target[func]
for func in source.keys():
if new_callers.has_key(func):
new_callers[func] = source[func] + new_callers[func]
else:
new_callers[func] = source[func]
return new_callers
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for func in callers.keys():
nc += callers[func]
return nc
#**************************************************************************
# The following functions support printing of reports
#**************************************************************************
def f8(x):
return "%8.3f" % x
#**************************************************************************
# Statistics browser added by ESR, April 2001
#**************************************************************************
if __name__ == '__main__':
import cmd
try:
import readline
except ImportError:
pass
class ProfileBrowser(cmd.Cmd):
def __init__(self, profile=None):
cmd.Cmd.__init__(self)
self.prompt = "% "
if profile:
self.stats = Stats(profile)
else:
self.stats = None
def generic(self, fn, line):
args = line.split()
processed = []
for term in args:
try:
processed.append(int(term))
continue
except ValueError:
pass
try:
frac = float(term)
if frac > 1 or frac < 0:
print "Fraction argument mus be in [0, 1]"
continue
processed.append(frac)
continue
except ValueError:
pass
processed.append(term)
if self.stats:
apply(getattr(self.stats, fn), processed)
else:
print "No statistics object is loaded."
return 0
def generic_help(self):
print "Arguments may be:"
print "* An integer maximum number of entries to print."
print "* A decimal fractional number between 0 and 1, controlling"
print " what fraction of selected entries to print."
print "* A regular expression; only entries with function names"
print " that match it are printed."
def do_add(self, line):
self.stats.add(line)
return 0
def help_add(self):
print "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
print "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
print "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
print ""
return 1
def help_EOF(self):
print "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
print "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
print args[1]
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[-2:]
else:
print "No statistics object is current -- cannot reload."
return 0
def help_read(self):
print "Read in profile data from a specified file."
def do_reverse(self, line):
self.stats.reverse_order()
return 0
def help_reverse(self):
print "Reverse the sort order of the profiling report."
def do_sort(self, line):
abbrevs = self.stats.get_sort_arg_defs().keys()
if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
apply(self.stats.sort_stats, line.split())
else:
print "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.items():
print "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
print "Sort profile data according to specified keys."
print "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default.keys() if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
print "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
self.stats.strip_dirs()
return 0
def help_strip(self):
print "Strip leading path information from filenames in the report."
def postcmd(self, stop, line):
if stop:
return stop
return None
import sys
print "Welcome to the profile statistics browser."
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
ProfileBrowser(initprofile).cmdloop()
print "Goodbye."
except KeyboardInterrupt:
pass
# That's all, folks.
| """Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
add_callers(t_callers, callers)) |
add-edit-per-diem.page.ts | // TODO: Very hard to fix this file without making massive changes
/* eslint-disable complexity */
import { Component, ElementRef, EventEmitter, OnInit, ViewChild } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import { combineLatest, concat, forkJoin, from, iif, noop, Observable, of, throwError } from 'rxjs';
import { OfflineService } from 'src/app/core/services/offline.service';
import {
catchError,
concatMap,
debounceTime,
distinctUntilChanged,
filter,
finalize,
map,
shareReplay,
startWith,
switchMap,
take,
tap,
withLatestFrom,
} from 'rxjs/operators';
import { AbstractControl, FormArray, FormBuilder, FormGroup, ValidationErrors, Validators } from '@angular/forms';
import { AccountsService } from 'src/app/core/services/accounts.service';
import { DateService } from 'src/app/core/services/date.service';
import * as moment from 'moment';
import { CustomInputsService } from 'src/app/core/services/custom-inputs.service';
import { CustomFieldsService } from 'src/app/core/services/custom-fields.service';
import { cloneDeep, isEmpty, isEqual, isNumber } from 'lodash';
import { CurrencyService } from 'src/app/core/services/currency.service';
import { ReportService } from 'src/app/core/services/report.service';
import { ProjectsService } from 'src/app/core/services/projects.service';
import { TransactionService } from 'src/app/core/services/transaction.service';
import { LoaderService } from 'src/app/core/services/loader.service';
import { AuthService } from 'src/app/core/services/auth.service';
import { PolicyService } from 'src/app/core/services/policy.service';
import { DataTransformService } from 'src/app/core/services/data-transform.service';
import { FyCriticalPolicyViolationComponent } from 'src/app/shared/components/fy-critical-policy-violation/fy-critical-policy-violation.component';
import { ModalController, NavController, PopoverController } from '@ionic/angular';
import { TransactionsOutboxService } from 'src/app/core/services/transactions-outbox.service';
import { PolicyViolationComponent } from './policy-violation/policy-violation.component';
import { StatusService } from 'src/app/core/services/status.service';
import { NetworkService } from 'src/app/core/services/network.service';
import { PopupService } from 'src/app/core/services/popup.service';
import { DuplicateDetectionService } from 'src/app/core/services/duplicate-detection.service';
import { TrackingService } from '../../core/services/tracking.service';
import { CurrencyPipe } from '@angular/common';
import { TokenService } from 'src/app/core/services/token.service';
import { RecentlyUsedItemsService } from 'src/app/core/services/recently-used-items.service';
import { RecentlyUsed } from 'src/app/core/models/v1/recently_used.model';
import { ExtendedProject } from 'src/app/core/models/v2/extended-project.model';
import { CostCenter } from 'src/app/core/models/v1/cost-center.model';
import { ExpenseFieldsService } from 'src/app/core/services/expense-fields.service';
import { ModalPropertiesService } from 'src/app/core/services/modal-properties.service';
import { ViewCommentComponent } from 'src/app/shared/components/comments-history/view-comment/view-comment.component';
import { PopupAlertComponentComponent } from 'src/app/shared/components/popup-alert-component/popup-alert-component.component';
import { FyDeleteDialogComponent } from 'src/app/shared/components/fy-delete-dialog/fy-delete-dialog.component';
import { MatSnackBar } from '@angular/material/snack-bar';
import { ToastMessageComponent } from 'src/app/shared/components/toast-message/toast-message.component';
import { SnackbarPropertiesService } from 'src/app/core/services/snackbar-properties.service';
@Component({
selector: 'app-add-edit-per-diem',
templateUrl: './add-edit-per-diem.page.html',
styleUrls: ['./add-edit-per-diem.page.scss'],
})
export class | implements OnInit {
@ViewChild('duplicateInputContainer') duplicateInputContainer: ElementRef;
@ViewChild('formContainer') formContainer: ElementRef;
@ViewChild('comments') commentsContainer: ElementRef;
title: string;
activeIndex: number;
reviewList: string[];
mode = 'add';
canCreatePerDiem$: Observable<boolean>;
allowedPerDiemRateOptions$: Observable<any[]>;
paymentModes$: Observable<any[]>;
homeCurrency$: Observable<string>;
fg: FormGroup;
minDate: string;
maxDate: string;
txnFields$: Observable<any>;
subCategories$: Observable<any[]>;
isAmountDisabled = false;
etxn$: Observable<any>;
isIndividualProjectsEnabled$: Observable<boolean>;
individualProjectIds$: Observable<[]>;
isProjectsEnabled$: Observable<boolean>;
customInputs$: Observable<any>;
costCenters$: Observable<any>;
reports$: Observable<any[]>;
isBalanceAvailableInAnyAdvanceAccount$: Observable<boolean>;
paymentModeInvalid$: Observable<boolean>;
isAmountCapped$: Observable<boolean>;
isAmountDisabled$: Observable<boolean>;
isCriticalPolicyViolated$: Observable<boolean>;
projectCategoryIds$: Observable<string[]>;
filteredCategories$: Observable<any>;
isConnected$: Observable<boolean>;
invalidPaymentMode = false;
duplicates$: Observable<any>;
duplicateBoxOpen = false;
pointToDuplicates = false;
isAdvancesEnabled$: Observable<boolean>;
comments$: Observable<any>;
expenseStartTime;
policyDetails;
navigateBack = false;
savePerDiemLoader = false;
saveAndNextPerDiemLoader = false;
saveAndPrevPerDiemLoader = false;
clusterDomain: string;
initialFetch;
individualPerDiemRatesEnabled$: Observable<boolean>;
recentlyUsedValues$: Observable<RecentlyUsed>;
recentProjects: { label: string; value: ExtendedProject; selected?: boolean }[];
recentlyUsedProjects$: Observable<ExtendedProject[]>;
presetProjectId: number;
recentCostCenters: { label: string; value: CostCenter; selected?: boolean }[];
presetCostCenterId: number;
recentlyUsedCostCenters$: Observable<{ label: string; value: CostCenter; selected?: boolean }[]>;
isExpandedView = false;
isProjectVisible$: Observable<boolean>;
duplicateDetectionReasons = [
{ label: 'Different expense', value: 'Different expense' },
{ label: 'Other', value: 'Other' },
];
billableDefaultValue: boolean;
canDeleteExpense = true;
constructor(
private activatedRoute: ActivatedRoute,
private offlineService: OfflineService,
private fb: FormBuilder,
private dateService: DateService,
private accountsService: AccountsService,
private customInputsService: CustomInputsService,
private customFieldsService: CustomFieldsService,
private currencyService: CurrencyService,
private reportService: ReportService,
private projectService: ProjectsService,
private transactionsOutboxService: TransactionsOutboxService,
private transactionService: TransactionService,
private authService: AuthService,
private policyService: PolicyService,
private dataTransformService: DataTransformService,
private loaderService: LoaderService,
private router: Router,
private modalController: ModalController,
private statusService: StatusService,
private networkService: NetworkService,
private popupService: PopupService,
private duplicateDetectionService: DuplicateDetectionService,
private navController: NavController,
private trackingService: TrackingService,
private currencyPipe: CurrencyPipe,
private tokenService: TokenService,
private recentlyUsedItemsService: RecentlyUsedItemsService,
private expenseFieldsService: ExpenseFieldsService,
private popoverController: PopoverController,
private modalProperties: ModalPropertiesService,
private matSnackBar: MatSnackBar,
private snackbarProperties: SnackbarPropertiesService
) {}
ngOnInit() {
if (this.activatedRoute.snapshot.params.remove_from_report) {
this.canDeleteExpense = this.activatedRoute.snapshot.params.remove_from_report === 'true';
}
}
get minPerDiemDate() {
return this.fg.controls.from_dt.value && moment(this.fg.controls.from_dt.value).subtract(1, 'day').format('y-MM-D');
}
get showSaveAndNext() {
return this.activeIndex !== null && this.reviewList !== null && +this.activeIndex === this.reviewList.length - 1;
}
async showClosePopup() {
const isAutofilled = this.presetProjectId || this.presetCostCenterId;
if (this.fg.touched || isAutofilled) {
const unsavedChangesPopOver = await this.popoverController.create({
component: PopupAlertComponentComponent,
componentProps: {
title: 'Unsaved Changes',
message: 'You have unsaved information that will be lost if you discard this expense.',
primaryCta: {
text: 'Discard',
action: 'continue',
},
secondaryCta: {
text: 'Cancel',
action: 'cancel',
},
},
cssClass: 'pop-up-in-center',
});
await unsavedChangesPopOver.present();
const { data } = await unsavedChangesPopOver.onWillDismiss();
if (data && data.action === 'continue') {
if (this.navigateBack) {
this.navController.back();
} else {
this.goBack();
}
}
} else {
if (this.activatedRoute.snapshot.params.id) {
this.trackingService.viewExpense({ Type: 'Per Diem' });
}
if (this.navigateBack) {
this.navController.back();
} else {
this.goBack();
}
}
}
goBack() {
if (this.activatedRoute.snapshot.params.persist_filters) {
this.navController.back();
} else {
this.router.navigate(['/', 'enterprise', 'my_expenses']);
}
}
canGetDuplicates() {
return this.offlineService.getOrgSettings().pipe(
map((orgSettings) => {
const isAmountPresent = isNumber(
this.fg.controls.currencyObj.value && this.fg.controls.currencyObj.value.amount
);
return this.fg.valid && orgSettings.policies.duplicate_detection_enabled && isAmountPresent;
})
);
}
checkForDuplicates() {
const customFields$ = this.customInputs$.pipe(
take(1),
map((customInputs) =>
customInputs.map((customInput, i) => ({
id: customInput.id,
mandatory: customInput.mandatory,
name: customInput.name,
options: customInput.options,
placeholder: customInput.placeholder,
prefix: customInput.prefix,
type: customInput.type,
value: this.fg.value.custom_inputs[i].value,
}))
)
);
return this.canGetDuplicates().pipe(
switchMap((canGetDuplicates) =>
iif(
() => canGetDuplicates,
this.generateEtxnFromFg(this.etxn$, customFields$).pipe(
switchMap((etxn) => this.duplicateDetectionService.getPossibleDuplicates(etxn.tx))
),
of(null)
)
)
);
}
getPossibleDuplicates() {
return this.checkForDuplicates();
}
async trackDuplicatesShown(duplicates, etxn) {
try {
const duplicateTxnIds = duplicates.reduce((prev, cur) => prev.concat(cur.duplicate_transaction_ids), []);
const duplicateFields = duplicates.reduce((prev, cur) => prev.concat(cur.duplicate_fields), []);
await this.trackingService.duplicateDetectionAlertShown({
Page: this.mode === 'add' ? 'Add Per Diem' : 'Edit Per Diem',
ExpenseId: etxn.tx.id,
DuplicateExpenses: duplicateTxnIds,
DuplicateFields: duplicateFields,
});
} catch (err) {
// Ignore event tracking errors
}
}
setupDuplicateDetection() {
this.duplicates$ = this.fg.valueChanges.pipe(
debounceTime(1000),
distinctUntilChanged((a, b) => isEqual(a, b)),
switchMap(() => this.getPossibleDuplicates())
);
this.duplicates$
.pipe(
filter((duplicates) => duplicates && duplicates.length),
take(1)
)
.subscribe((res) => {
this.pointToDuplicates = true;
setTimeout(() => {
this.pointToDuplicates = false;
}, 3000);
this.etxn$.pipe(take(1)).subscribe(async (etxn) => await this.trackDuplicatesShown(res, etxn));
});
}
showDuplicates() {
const duplicateInputContainer = this.duplicateInputContainer.nativeElement as HTMLElement;
if (duplicateInputContainer) {
duplicateInputContainer.scrollIntoView({
behavior: 'smooth',
block: 'nearest',
inline: 'start',
});
this.pointToDuplicates = false;
}
}
goToPrev() {
this.activeIndex = this.activatedRoute.snapshot.params.activeIndex;
if (this.reviewList[+this.activeIndex - 1]) {
this.transactionService.getETxn(this.reviewList[+this.activeIndex - 1]).subscribe((etxn) => {
this.goToTransaction(etxn, this.reviewList, +this.activeIndex - 1);
});
}
}
goToNext() {
this.activeIndex = this.activatedRoute.snapshot.params.activeIndex;
if (this.reviewList[+this.activeIndex + 1]) {
this.transactionService.getETxn(this.reviewList[+this.activeIndex + 1]).subscribe((etxn) => {
this.goToTransaction(etxn, this.reviewList, +this.activeIndex + 1);
});
}
}
async showCannotEditActivityDialog() {
const popupResult = await this.popupService.showPopup({
header: 'Cannot Edit Activity Expense!',
// eslint-disable-next-line max-len
message: `To edit this activity expense, you need to login to web version of Fyle app at <a href="${this.clusterDomain}">${this.clusterDomain}</a>`,
primaryCta: {
text: 'Close',
},
showCancelButton: false,
});
}
goToTransaction(expense, reviewList, activeIndex) {
let category;
if (expense.tx.org_category) {
category = expense.tx.org_category.toLowerCase();
}
if (category === 'activity') {
this.showCannotEditActivityDialog();
return;
}
if (category === 'mileage') {
this.router.navigate([
'/',
'enterprise',
'add_edit_mileage',
{
id: expense.tx.id,
txnIds: JSON.stringify(reviewList),
activeIndex,
},
]);
} else if (category === 'per diem') {
this.router.navigate([
'/',
'enterprise',
'add_edit_per_diem',
{
id: expense.tx.id,
txnIds: JSON.stringify(reviewList),
activeIndex,
},
]);
} else {
this.router.navigate([
'/',
'enterprise',
'add_edit_expense',
{
id: expense.tx.id,
txnIds: JSON.stringify(reviewList),
activeIndex,
},
]);
}
}
setupNetworkWatcher() {
const networkWatcherEmitter = new EventEmitter<boolean>();
this.networkService.connectivityWatcher(networkWatcherEmitter);
this.isConnected$ = concat(this.networkService.isOnline(), networkWatcherEmitter.asObservable()).pipe(
shareReplay(1)
);
}
checkIfInvalidPaymentMode() {
return this.etxn$.pipe(
map((etxn) => {
const paymentAccount = this.fg.value.paymentMode;
const originalSourceAccountId = etxn && etxn.tx && etxn.tx.source_account_id;
let isPaymentModeInvalid = false;
if (paymentAccount && paymentAccount.acc && paymentAccount.acc.type === 'PERSONAL_ADVANCE_ACCOUNT') {
if (paymentAccount.acc.id !== originalSourceAccountId) {
isPaymentModeInvalid =
paymentAccount.acc.tentative_balance_amount <
(this.fg.controls.currencyObj.value && this.fg.controls.currencyObj.value.amount);
} else {
isPaymentModeInvalid =
paymentAccount.acc.tentative_balance_amount + etxn.tx.amount <
(this.fg.controls.currencyObj.value && this.fg.controls.currencyObj.value.amount);
}
}
return isPaymentModeInvalid;
})
);
}
getTransactionFields() {
return this.fg.valueChanges.pipe(
startWith({}),
switchMap((formValue) =>
forkJoin({
expenseFieldsMap: this.offlineService.getExpenseFieldsMap(),
perDiemCategoriesContainer: this.getPerDiemCategories(),
}).pipe(
switchMap(({ expenseFieldsMap, perDiemCategoriesContainer }) => {
const fields = ['purpose', 'cost_center_id', 'project_id', 'from_dt', 'to_dt', 'num_days', 'billable'];
return this.expenseFieldsService.filterByOrgCategoryId(
expenseFieldsMap,
fields,
formValue.sub_category || perDiemCategoriesContainer.defaultPerDiemCategory
);
})
)
),
map((expenseFieldsMap: any) => {
if (expenseFieldsMap) {
for (const tfc of Object.keys(expenseFieldsMap)) {
if (expenseFieldsMap[tfc].options && expenseFieldsMap[tfc].options.length > 0) {
expenseFieldsMap[tfc].options = expenseFieldsMap[tfc].options.map((value) => ({ label: value, value }));
}
}
}
return expenseFieldsMap;
}),
shareReplay(1)
);
}
setupTfcDefaultValues() {
const tfcValues$ = this.fg.valueChanges.pipe(
startWith({}),
switchMap((formValue) =>
forkJoin({
expenseFieldsMap: this.offlineService.getExpenseFieldsMap(),
perDiemCategoriesContainer: this.getPerDiemCategories(),
}).pipe(
switchMap(({ expenseFieldsMap, perDiemCategoriesContainer }) => {
const fields = ['purpose', 'cost_center_id', 'from_dt', 'to_dt', 'num_days', 'billable'];
return this.expenseFieldsService.filterByOrgCategoryId(
expenseFieldsMap,
fields,
formValue.sub_category || perDiemCategoriesContainer.defaultPerDiemCategory
);
})
)
),
map((tfc) => this.expenseFieldsService.getDefaultTxnFieldValues(tfc))
);
tfcValues$.subscribe((defaultValues) => {
this.billableDefaultValue = defaultValues.billable;
const keyToControlMap: { [id: string]: AbstractControl } = {
purpose: this.fg.controls.purpose,
cost_center_id: this.fg.controls.costCenter,
from_dt: this.fg.controls.from_dt,
to_dt: this.fg.controls.to_dt,
num_days: this.fg.controls.num_days,
billable: this.fg.controls.billable,
};
for (const defaultValueColumn in defaultValues) {
if (defaultValues.hasOwnProperty(defaultValueColumn)) {
const control = keyToControlMap[defaultValueColumn];
if (!control.value && defaultValueColumn !== 'billable') {
control.patchValue(defaultValues[defaultValueColumn]);
} else if (
control.value === null &&
control.value === undefined &&
this.fg.controls.project.value &&
defaultValueColumn !== 'billable' &&
!control.touched
) {
control.patchValue(defaultValues[defaultValueColumn]);
}
}
}
});
}
getPaymentModes() {
const orgSettings$ = this.offlineService.getOrgSettings();
const accounts$ = this.offlineService.getAccounts();
return forkJoin({
accounts: accounts$,
orgSettings: orgSettings$,
}).pipe(
map(({ accounts, orgSettings }) => {
const isAdvanceEnabled =
(orgSettings.advances && orgSettings.advances.enabled) ||
(orgSettings.advance_requests && orgSettings.advance_requests.enabled);
const isMultipleAdvanceEnabled =
orgSettings && orgSettings.advance_account_settings && orgSettings.advance_account_settings.multiple_accounts;
const userAccounts = this.accountsService
.filterAccountsWithSufficientBalance(
accounts.filter((account) => account.acc.type),
isAdvanceEnabled
)
.filter((userAccount) => ['PERSONAL_ACCOUNT', 'PERSONAL_ADVANCE_ACCOUNT'].includes(userAccount.acc.type));
return this.accountsService.constructPaymentModes(userAccounts, isMultipleAdvanceEnabled);
}),
map((paymentModes) =>
paymentModes.map((paymentMode: any) => ({ label: paymentMode.acc.displayName, value: paymentMode }))
)
);
}
getSubCategories() {
return this.offlineService.getAllEnabledCategories().pipe(
map((categories) => {
const parentCategoryName = 'per diem';
return categories.filter(
(orgCategory) =>
parentCategoryName.toLowerCase() === orgCategory.name.toLowerCase() &&
parentCategoryName.toLowerCase() !== orgCategory.sub_category.toLowerCase()
);
}),
shareReplay(1)
);
}
getProjectCategoryIds() {
return this.offlineService.getAllEnabledCategories().pipe(
map((categories) => {
const perDiemCategories = categories
.filter((category) => ['Per Diem'].indexOf(category.fyle_category) > -1)
.map((category) => category.id as string);
return perDiemCategories;
})
);
}
getPerDiemCategories() {
return this.offlineService.getAllEnabledCategories().pipe(
map((categories) => {
const orgCategoryName = 'per diem';
const defaultPerDiemCategory = categories.find(
(category) => category.name.toLowerCase() === orgCategoryName.toLowerCase()
);
const perDiemCategories = categories.filter((category) => ['Per Diem'].indexOf(category.fyle_category) > -1);
return {
defaultPerDiemCategory,
perDiemCategories,
};
})
);
}
getNewExpense() {
return forkJoin({
categoryContainer: this.getPerDiemCategories(),
homeCurrency: this.offlineService.getHomeCurrency(),
currentEou: this.authService.getEou(),
}).pipe(
map(({ categoryContainer, homeCurrency, currentEou }) => ({
tx: {
skip_reimbursement: false,
source: 'MOBILE',
org_category_id: categoryContainer.defaultPerDiemCategory && categoryContainer.defaultPerDiemCategory.id,
org_category: categoryContainer.defaultPerDiemCategory && categoryContainer.defaultPerDiemCategory.name,
sub_category:
categoryContainer.defaultPerDiemCategory && categoryContainer.defaultPerDiemCategory.sub_category,
amount: 0,
currency: homeCurrency,
state: 'COMPLETE',
txn_dt: new Date(),
from_dt: null,
to_dt: null,
per_diem_rate_id: null,
num_days: null,
policy_amount: null,
custom_properties: [],
org_user_id: currentEou.ou.id,
},
}))
);
}
getEditExpense() {
return this.transactionService.getETxn(this.activatedRoute.snapshot.params.id).pipe(shareReplay(1));
}
setupFilteredCategories(activeCategories$: Observable<any>) {
this.filteredCategories$ = this.fg.controls.project.valueChanges.pipe(
tap(() => {
if (!this.fg.controls.project.value) {
this.fg.patchValue({ billable: false });
} else {
this.fg.patchValue({ billable: this.billableDefaultValue });
}
}),
startWith(this.fg.controls.project.value),
concatMap((project) =>
activeCategories$.pipe(
map((activeCategories) => this.projectService.getAllowedOrgCategoryIds(project, activeCategories))
)
),
map((categories) => categories.map((category) => ({ label: category.sub_category, value: category })))
);
this.filteredCategories$.subscribe((categories) => {
if (
this.fg.value.sub_category &&
this.fg.value.sub_category.id &&
!categories.some(
(category) => this.fg.value.sub_category && this.fg.value.sub_category.id === category.value.id
)
) {
this.fg.controls.sub_category.reset();
}
});
}
getTimeSpentOnPage() {
const expenseEndTime = new Date().getTime();
// Get time spent on page in seconds
return (expenseEndTime - this.expenseStartTime) / 1000;
}
getCustomInputs() {
this.initialFetch = true;
return this.fg.controls.sub_category.valueChanges.pipe(
startWith({}),
switchMap(() => {
const category = this.fg.controls.sub_category.value;
if (this.initialFetch) {
return this.etxn$.pipe(
switchMap((etxn) =>
iif(
() => etxn.tx.org_category_id,
this.offlineService
.getAllEnabledCategories()
.pipe(map((categories) => categories.find((category) => category.id === etxn.tx.org_category_id))),
this.getPerDiemCategories().pipe(map((perDiemContainer) => perDiemContainer.defaultPerDiemCategory))
)
)
);
}
if (category && !isEmpty(category)) {
return of(category);
} else {
return this.getPerDiemCategories().pipe(map((perDiemContainer) => perDiemContainer.defaultPerDiemCategory));
}
}),
switchMap((category: any) => {
const formValue = this.fg.value;
return this.offlineService
.getCustomInputs()
.pipe(
map((customFields: any) =>
this.customFieldsService.standardizeCustomFields(
formValue.custom_inputs || [],
this.customInputsService.filterByCategory(customFields, category && category.id)
)
)
);
}),
map((customFields) =>
customFields.map((customField) => {
if (customField.options) {
customField.options = customField.options.map((option) => ({ label: option, value: option }));
}
return customField;
})
),
switchMap((customFields: any[]) =>
this.isConnected$.pipe(
take(1),
map((isConnected) => {
const customFieldsFormArray = this.fg.controls.custom_inputs as FormArray;
customFieldsFormArray.clear();
for (const customField of customFields) {
customFieldsFormArray.push(
this.fb.group({
name: [customField.name],
value: [
customField.type !== 'DATE' ? customField.value : moment(customField.value).format('y-MM-DD'),
isConnected &&
customField.type !== 'BOOLEAN' &&
customField.type !== 'USER_LIST' &&
customField.mandatory &&
Validators.required,
],
})
);
}
return customFields.map((customField, i) => ({ ...customField, control: customFieldsFormArray.at(i) }));
})
)
),
shareReplay(1)
);
}
customDateValidator(control: AbstractControl) {
if (!this.fg) {
return;
}
const fromDt = moment(new Date(this.fg.value.from_dt));
const passedInDate = control.value && moment(new Date(control.value));
if (passedInDate) {
return passedInDate.isSame(fromDt) || passedInDate.isAfter(fromDt)
? null
: {
invalidDateSelection: true,
};
}
}
ionViewWillEnter() {
this.navigateBack = this.activatedRoute.snapshot.params.navigate_back;
this.expenseStartTime = new Date().getTime();
const today = new Date();
this.minDate = moment(new Date('Jan 1, 2001')).format('y-MM-D');
this.maxDate = moment(this.dateService.addDaysToDate(today, 1)).format('y-MM-D');
from(this.tokenService.getClusterDomain()).subscribe((clusterDomain) => {
this.clusterDomain = clusterDomain;
});
this.fg = this.fb.group({
currencyObj: [
{
value: null,
disabled: true,
},
],
paymentMode: [, Validators.required],
project: [],
sub_category: [],
per_diem_rate: [, Validators.required],
purpose: [],
num_days: [, Validators.compose([Validators.required, Validators.min(0)])],
report: [],
from_dt: [],
to_dt: [, this.customDateValidator.bind(this)],
custom_inputs: new FormArray([]),
add_to_new_report: [],
duplicate_detection_reason: [],
billable: [],
costCenter: [],
});
this.title = 'Add Expense';
this.activeIndex = this.activatedRoute.snapshot.params.activeIndex;
this.reviewList =
this.activatedRoute.snapshot.params.txnIds && JSON.parse(this.activatedRoute.snapshot.params.txnIds);
this.title =
this.activeIndex > -1 && this.reviewList && this.activeIndex < this.reviewList.length ? 'Review' : 'Edit';
if (this.activatedRoute.snapshot.params.id) {
this.mode = 'edit';
}
this.isExpandedView = this.mode !== 'add';
const orgSettings$ = this.offlineService.getOrgSettings();
const perDiemRates$ = this.offlineService.getPerDiemRates();
const orgUserSettings$ = this.offlineService.getOrgUserSettings();
this.isAdvancesEnabled$ = orgSettings$.pipe(
map(
(orgSettings) =>
(orgSettings.advances && orgSettings.advances.enabled) ||
(orgSettings.advance_requests && orgSettings.advance_requests.enabled)
)
);
this.individualPerDiemRatesEnabled$ = orgSettings$.pipe(
map((orgSettings) => orgSettings.per_diem.enable_individual_per_diem_rates)
);
this.setupNetworkWatcher();
this.recentlyUsedValues$ = this.isConnected$.pipe(
take(1),
switchMap((isConnected) => {
if (isConnected) {
return this.recentlyUsedItemsService.getRecentlyUsed();
} else {
return of(null);
}
})
);
const allowedPerDiemRates$ = from(this.loaderService.showLoader())
.pipe(
switchMap(() =>
forkJoin({
orgSettings: orgSettings$,
allowedPerDiemRates: perDiemRates$.pipe(
switchMap((perDiemRates) => this.offlineService.getAllowedPerDiems(perDiemRates))
),
})
),
finalize(() => from(this.loaderService.hideLoader()))
)
.pipe(
switchMap(({ orgSettings, allowedPerDiemRates }) =>
iif(
() => allowedPerDiemRates.length > 0 || orgSettings.per_diem.enable_individual_per_diem_rates,
of(allowedPerDiemRates),
perDiemRates$
)
),
map((rates) => rates.filter((rate) => rate.active)),
map((rates) =>
rates.map((rate) => {
rate.full_name = `${rate.name} (${rate.rate} ${rate.currency} per day)`;
return rate;
})
)
);
this.canCreatePerDiem$ = from(this.loaderService.showLoader())
.pipe(
switchMap(() =>
forkJoin({
orgSettings: orgSettings$,
perDiemRates: perDiemRates$,
allowedPerDiemRates: allowedPerDiemRates$,
})
),
finalize(() => from(this.loaderService.hideLoader()))
)
.pipe(
map(({ orgSettings, perDiemRates, allowedPerDiemRates }) => {
if (orgSettings.per_diem.enable_individual_per_diem_rates) {
if (allowedPerDiemRates.length > 0 && perDiemRates.length > 0) {
return true;
} else {
return false;
}
} else {
return perDiemRates.length > 0;
}
})
);
this.txnFields$ = this.getTransactionFields();
this.paymentModes$ = this.getPaymentModes();
this.homeCurrency$ = this.offlineService.getHomeCurrency();
this.subCategories$ = this.getSubCategories();
this.setupFilteredCategories(this.subCategories$);
this.projectCategoryIds$ = this.getProjectCategoryIds();
this.isProjectVisible$ = this.projectCategoryIds$.pipe(
switchMap((projectCategoryIds) => this.offlineService.getProjectCount({ categoryIds: projectCategoryIds }))
);
this.comments$ = this.statusService.find('transactions', this.activatedRoute.snapshot.params.id);
combineLatest([this.isConnected$, this.filteredCategories$])
.pipe(distinctUntilChanged((a, b) => isEqual(a, b)))
.subscribe(([isConnected, filteredCategories]) => {
this.fg.controls.sub_category.clearValidators();
if (isConnected && filteredCategories && filteredCategories.length) {
this.fg.controls.sub_category.setValidators(Validators.required);
}
this.fg.controls.sub_category.updateValueAndValidity();
});
this.allowedPerDiemRateOptions$ = allowedPerDiemRates$.pipe(
map((allowedPerDiemRates) =>
allowedPerDiemRates.map((rate) => {
rate.readableRate = this.currencyPipe.transform(rate.rate, rate.currency, 'symbol', '1.2-2') + ' per day';
return { label: rate.name, value: rate };
})
)
);
this.isIndividualProjectsEnabled$ = orgSettings$.pipe(
map((orgSettings) => orgSettings.advanced_projects && orgSettings.advanced_projects.enable_individual_projects)
);
this.individualProjectIds$ = orgUserSettings$.pipe(
map((orgUserSettings: any) => orgUserSettings.project_ids || [])
);
this.etxn$ = iif(() => this.mode === 'add', this.getNewExpense(), this.getEditExpense());
this.isProjectsEnabled$ = orgSettings$.pipe(
map((orgSettings) => orgSettings.projects && orgSettings.projects.enabled)
);
this.customInputs$ = this.getCustomInputs();
this.costCenters$ = forkJoin({
orgSettings: orgSettings$,
orgUserSettings: orgUserSettings$,
}).pipe(
switchMap(({ orgSettings, orgUserSettings }) => {
if (orgSettings.cost_centers.enabled) {
return this.offlineService.getAllowedCostCenters(orgUserSettings);
} else {
return of([]);
}
}),
map((costCenters) =>
costCenters.map((costCenter) => ({
label: costCenter.name,
value: costCenter,
}))
)
);
this.recentlyUsedCostCenters$ = forkJoin({
costCenters: this.costCenters$,
recentValue: this.recentlyUsedValues$,
}).pipe(
concatMap(({ costCenters, recentValue }) =>
this.recentlyUsedItemsService.getRecentCostCenters(costCenters, recentValue)
)
);
this.reports$ = this.reportService
.getFilteredPendingReports({ state: 'edit' })
.pipe(map((reports) => reports.map((report) => ({ label: report.rp.purpose, value: report }))));
this.txnFields$
.pipe(
distinctUntilChanged((a, b) => isEqual(a, b)),
switchMap((txnFields) =>
this.isConnected$.pipe(
take(1),
withLatestFrom(this.costCenters$),
map(([isConnected, costCenters]) => ({
isConnected,
txnFields,
costCenters,
}))
)
)
)
.subscribe(({ isConnected, txnFields, costCenters }) => {
const keyToControlMap: { [id: string]: AbstractControl } = {
purpose: this.fg.controls.purpose,
cost_center_id: this.fg.controls.costCenter,
from_dt: this.fg.controls.from_dt,
to_dt: this.fg.controls.to_dt,
num_days: this.fg.controls.num_days,
project_id: this.fg.controls.project,
billable: this.fg.controls.billable,
};
for (const control of Object.values(keyToControlMap)) {
control.clearValidators();
control.updateValueAndValidity();
}
for (const txnFieldKey of Object.keys(txnFields)) {
const control = keyToControlMap[txnFieldKey];
if (txnFields[txnFieldKey].is_mandatory) {
if (txnFieldKey === 'num_days') {
control.setValidators(Validators.compose([Validators.required, Validators.min(0)]));
} else if (txnFieldKey === 'to_dt') {
control.setValidators(
isConnected ? Validators.compose([this.customDateValidator.bind(this), Validators.required]) : null
);
} else if (txnFieldKey === 'cost_center_id') {
control.setValidators(isConnected && costCenters && costCenters.length > 0 ? Validators.required : null);
} else {
control.setValidators(isConnected ? Validators.required : null);
}
} else {
if (txnFieldKey === 'num_days') {
control.setValidators(Validators.compose([Validators.required, Validators.min(0)]));
}
if (txnFieldKey === 'to_dt') {
control.setValidators(isConnected ? this.customDateValidator.bind(this) : null);
}
}
control.updateValueAndValidity();
}
this.fg.updateValueAndValidity();
});
this.setupTfcDefaultValues();
this.isAmountCapped$ = this.etxn$.pipe(
map((etxn) => isNumber(etxn.tx.admin_amount) || isNumber(etxn.tx.policy_amount))
);
this.isAmountDisabled$ = this.etxn$.pipe(map((etxn) => !!etxn.tx.admin_amount));
this.isCriticalPolicyViolated$ = this.etxn$.pipe(
map((etxn) => isNumber(etxn.tx.policy_amount) && etxn.tx.policy_amount < 0.0001)
);
this.getPolicyDetails();
combineLatest(this.fg.controls.from_dt.valueChanges, this.fg.controls.to_dt.valueChanges)
.pipe(distinctUntilChanged((a, b) => isEqual(a, b)))
.subscribe(([fromDt, toDt]) => {
if (fromDt && toDt) {
const fromDate = moment(new Date(fromDt));
const toDate = moment(new Date(toDt));
if (toDate.isSame(fromDate)) {
this.fg.controls.num_days.setValue(1);
} else if (toDate.isAfter(fromDate)) {
this.fg.controls.num_days.setValue(toDate.diff(fromDate, 'day') + 1);
}
}
});
combineLatest(this.fg.controls.from_dt.valueChanges, this.fg.controls.num_days.valueChanges)
.pipe(distinctUntilChanged((a, b) => isEqual(a, b)))
.subscribe(([fromDt, numDays]) => {
if (fromDt && numDays && numDays > 0) {
const fromDate = moment(this.dateService.getUTCDate(new Date(fromDt)));
this.fg.controls.to_dt.setValue(fromDate.add(+numDays - 1, 'day').format('y-MM-DD'), {
emitEvent: false,
});
}
});
combineLatest(
this.fg.controls.per_diem_rate.valueChanges,
this.fg.controls.num_days.valueChanges,
this.homeCurrency$
)
.pipe(
distinctUntilChanged((a, b) => isEqual(a, b)),
filter(([perDiemRate, numDays, homeCurrency]) => !!perDiemRate && !!numDays && !!homeCurrency),
filter(([perDiemRate, numDays, homeCurrency]) => perDiemRate.currency === homeCurrency)
)
.subscribe(([perDiemRate, numDays, homeCurrency]) => {
if (perDiemRate && numDays && homeCurrency) {
if (perDiemRate.currency === homeCurrency) {
this.fg.controls.currencyObj.setValue({
currency: perDiemRate.currency,
amount: (perDiemRate.rate * numDays).toFixed(2),
orig_currency: null,
orig_amount: null,
});
}
}
});
combineLatest(
this.fg.controls.per_diem_rate.valueChanges,
this.fg.controls.num_days.valueChanges,
this.homeCurrency$
)
.pipe(
distinctUntilChanged((a, b) => isEqual(a, b)),
filter(([perDiemRate, numDays, homeCurrency]) => !!perDiemRate && !!numDays && !!homeCurrency),
filter(([perDiemRate, numDays, homeCurrency]) => perDiemRate.currency !== homeCurrency),
switchMap(([perDiemRate, numDays, homeCurrency]) =>
this.currencyService
.getExchangeRate(perDiemRate.currency, homeCurrency)
.pipe(map((res) => [perDiemRate, numDays, homeCurrency, res]))
)
)
.subscribe(([perDiemRate, numDays, homeCurrency, exchangeRate]) => {
this.fg.controls.currencyObj.setValue({
currency: homeCurrency,
amount: (perDiemRate.rate * numDays * exchangeRate).toFixed(2),
orig_currency: perDiemRate.currency,
orig_amount: (perDiemRate.rate * numDays).toFixed(2),
});
});
this.setupDuplicateDetection();
this.isBalanceAvailableInAnyAdvanceAccount$ = this.fg.controls.paymentMode.valueChanges.pipe(
switchMap((paymentMode) => {
if (paymentMode && paymentMode.acc && paymentMode.acc.type === 'PERSONAL_ACCOUNT') {
return this.offlineService
.getAccounts()
.pipe(
map(
(accounts) =>
accounts.filter(
(account) =>
account &&
account.acc &&
account.acc.type === 'PERSONAL_ADVANCE_ACCOUNT' &&
account.acc.tentative_balance_amount > 0
).length > 0
)
);
}
return of(false);
})
);
const selectedProject$ = this.etxn$.pipe(
switchMap((etxn) => {
if (etxn.tx.project_id) {
return of(etxn.tx.project_id);
} else {
return forkJoin({
orgSettings: this.offlineService.getOrgSettings(),
orgUserSettings: this.offlineService.getOrgUserSettings(),
}).pipe(
map(({ orgSettings, orgUserSettings }) => {
if (orgSettings.projects.enabled) {
return orgUserSettings && orgUserSettings.preferences && orgUserSettings.preferences.default_project_id;
}
})
);
}
}),
switchMap((projectId) => {
if (projectId) {
return this.projectService.getbyId(projectId);
} else {
return of(null);
}
})
);
const selectedPaymentMode$ = this.etxn$.pipe(
switchMap((etxn) =>
iif(
() => etxn.tx.source_account_id,
this.paymentModes$.pipe(
map((paymentModes) =>
paymentModes
.map((res) => res.value)
.find((paymentMode) => {
if (paymentMode.acc.displayName === 'Paid by Me') {
return paymentMode.acc.id === etxn.tx.source_account_id && !etxn.tx.skip_reimbursement;
} else {
return paymentMode.acc.id === etxn.tx.source_account_id;
}
})
)
),
of(null)
)
)
);
const defaultPaymentMode$ = this.paymentModes$.pipe(
map((paymentModes) =>
paymentModes.map((res) => res.value).find((paymentMode) => paymentMode.acc.displayName === 'Paid by Me')
)
);
this.recentlyUsedProjects$ = forkJoin({
recentValues: this.recentlyUsedValues$,
perDiemCategoryIds: this.projectCategoryIds$,
eou: this.authService.getEou(),
}).pipe(
switchMap(({ recentValues, perDiemCategoryIds, eou }) =>
this.recentlyUsedItemsService.getRecentlyUsedProjects({
recentValues,
eou,
categoryIds: perDiemCategoryIds,
})
)
);
const selectedSubCategory$ = this.etxn$.pipe(
switchMap((etxn) =>
iif(
() => etxn.tx.org_category_id,
this.subCategories$.pipe(
map((subCategories) => subCategories.find((subCategory) => subCategory.id === etxn.tx.org_category_id))
),
of(null)
)
)
);
const selectedPerDiemOption$ = this.etxn$.pipe(
switchMap((etxn) =>
iif(
() => etxn.tx.per_diem_rate_id,
this.allowedPerDiemRateOptions$.pipe(
map((perDiemOptions) =>
perDiemOptions
.map((res) => res.value)
.find((perDiemOption) => perDiemOption.id === etxn.tx.per_diem_rate_id)
)
),
of(null)
)
)
);
const selectedReport$ = this.etxn$.pipe(
switchMap((etxn) =>
iif(
() => etxn.tx.report_id,
this.reports$.pipe(
map((reportOptions) =>
reportOptions.map((res) => res.value).find((reportOption) => reportOption.rp.id === etxn.tx.report_id)
)
),
of(null)
)
)
);
const selectedCostCenter$ = this.etxn$.pipe(
switchMap((etxn) => {
if (etxn.tx.cost_center_id) {
return of(etxn.tx.cost_center_id);
} else {
return forkJoin({
orgSettings: this.offlineService.getOrgSettings(),
costCenters: this.costCenters$,
}).pipe(
map(({ orgSettings, costCenters }) => {
if (orgSettings.cost_centers.enabled) {
if (costCenters.length === 1 && this.mode === 'add') {
return costCenters[0].value.id;
}
}
})
);
}
}),
switchMap((costCenterId) => {
if (costCenterId) {
return this.costCenters$.pipe(
map((costCenters) =>
costCenters.map((res) => res.value).find((costCenter) => costCenter.id === costCenterId)
)
);
} else {
return of(null);
}
})
);
const selectedCustomInputs$ = this.etxn$.pipe(
switchMap((etxn) =>
this.offlineService
.getCustomInputs()
.pipe(
map((customFields) =>
this.customFieldsService.standardizeCustomFields(
[],
this.customInputsService.filterByCategory(customFields, etxn.tx.org_category_id)
)
)
)
)
);
from(this.loaderService.showLoader())
.pipe(
switchMap(() =>
combineLatest([
this.etxn$,
selectedPaymentMode$,
selectedProject$,
selectedSubCategory$,
selectedPerDiemOption$,
this.txnFields$,
selectedReport$,
selectedCostCenter$,
selectedCustomInputs$,
defaultPaymentMode$,
orgUserSettings$,
orgSettings$,
this.recentlyUsedValues$,
this.recentlyUsedProjects$,
this.recentlyUsedCostCenters$,
])
),
take(1),
finalize(() => from(this.loaderService.hideLoader()))
)
.subscribe(
([
etxn,
paymentMode,
project,
subCategory,
perDiemRate,
txnFields,
report,
costCenter,
customInputs,
defaultPaymentMode,
orgUserSettings,
orgSettings,
recentValue,
recentProjects,
recentCostCenters,
]) => {
const customInputValues = customInputs.map((customInput) => {
const cpor =
etxn.tx.custom_properties &&
etxn.tx.custom_properties.find((customProp) => customProp.name === customInput.name);
if (customInput.type === 'DATE') {
return {
name: customInput.name,
value: (cpor && cpor.value && moment(new Date(cpor.value)).format('y-MM-DD')) || null,
};
} else {
return {
name: customInput.name,
value: (cpor && cpor.value) || null,
};
}
});
// Check if auto-fills is enabled
const isAutofillsEnabled =
orgSettings.org_expense_form_autofills &&
orgSettings.org_expense_form_autofills.allowed &&
orgSettings.org_expense_form_autofills.enabled &&
orgUserSettings.expense_form_autofills.allowed &&
orgUserSettings.expense_form_autofills.enabled;
// Check if recent projects exist
const doRecentProjectIdsExist =
isAutofillsEnabled &&
recentValue &&
recentValue.recent_project_ids &&
recentValue.recent_project_ids.length > 0;
if (recentProjects && recentProjects.length > 0) {
this.recentProjects = recentProjects.map((item) => ({ label: item.project_name, value: item }));
}
/* Autofill project during these cases:
* 1. Autofills is allowed and enabled
* 2. During add expense - When project field is empty
* 3. During edit expense - When the expense is in draft state and there is no project already added
* 4. When there exists recently used project ids to auto-fill
*/
if (
doRecentProjectIdsExist &&
(!etxn.tx.id || (etxn.tx.id && etxn.tx.state === 'DRAFT' && !etxn.tx.project_id))
) {
const autoFillProject = recentProjects && recentProjects.length > 0 && recentProjects[0];
if (autoFillProject) {
project = autoFillProject;
this.presetProjectId = project.project_id;
}
}
// Check if recent cost centers exist
const doRecentCostCenterIdsExist =
isAutofillsEnabled &&
recentValue &&
recentValue.recent_cost_center_ids &&
recentValue.recent_cost_center_ids.length > 0;
if (recentCostCenters && recentCostCenters.length > 0) {
this.recentCostCenters = recentCostCenters;
}
/* Autofill cost center during these cases:
* 1. Autofills is allowed and enabled
* 2. During add expense - When cost center field is empty
* 3. During edit expense - When the expense is in draft state and there is no cost center already added - optional
* 4. When there exists recently used cost center ids to auto-fill
*/
if (
doRecentCostCenterIdsExist &&
(!etxn.tx.id || (etxn.tx.id && etxn.tx.state === 'DRAFT' && !etxn.tx.cost_center_id))
) {
const autoFillCostCenter = recentCostCenters && recentCostCenters.length > 0 && recentCostCenters[0];
if (autoFillCostCenter) {
costCenter = autoFillCostCenter.value;
this.presetCostCenterId = autoFillCostCenter.value.id;
}
}
this.fg.patchValue({
paymentMode: paymentMode || defaultPaymentMode,
project,
sub_category: subCategory,
per_diem_rate: perDiemRate,
purpose: etxn.tx.purpose,
num_days: etxn.tx.num_days,
report,
from_dt: etxn.tx.from_dt ? moment(new Date(etxn.tx.from_dt)).format('y-MM-DD') : null,
to_dt: etxn.tx.to_dt ? moment(new Date(etxn.tx.to_dt)).format('y-MM-DD') : null,
billable: etxn.tx.billable,
duplicate_detection_reason: etxn.tx.user_reason_for_duplicate_expenses,
costCenter,
});
this.initialFetch = false;
setTimeout(() => {
this.fg.controls.custom_inputs.patchValue(customInputValues);
}, 1000);
}
);
this.paymentModeInvalid$ = iif(() => this.activatedRoute.snapshot.params.id, this.etxn$, of(null)).pipe(
map((etxn) => {
if (this.fg.value.paymentMode.acc.type === 'PERSONAL_ADVANCE_ACCOUNT') {
if (
etxn &&
etxn.id &&
this.fg.value.paymentMode.acc.id === etxn.source_account_id &&
etxn.state !== 'DRAFT'
) {
return (
this.fg.value.paymentMode.acc.tentative_balance_amount + etxn.amount < this.fg.value.currencyObj.amount
);
} else {
return this.fg.value.paymentMode.acc.tentative_balance_amount < this.fg.value.currencyObj.amount;
}
} else {
return false;
}
})
);
}
generateEtxnFromFg(etxn$, standardisedCustomProperties$) {
return forkJoin({
etxn: etxn$,
customProperties: standardisedCustomProperties$,
}).pipe(
map((res) => {
const etxn: any = res.etxn;
let customProperties: any = res.customProperties;
customProperties = customProperties.map((customProperty) => {
if (customProperty.type === 'DATE') {
customProperty.value = customProperty.value && this.dateService.getUTCDate(new Date(customProperty.value));
}
return customProperty;
});
const skipReimbursement =
this.fg.value.paymentMode.acc.type === 'PERSONAL_ACCOUNT' && !this.fg.value.paymentMode.acc.isReimbursable;
const formValue = this.fg.value;
const currencyObj = this.fg.controls.currencyObj.value;
const amountData: any = {
currency: currencyObj.currency,
amount: currencyObj.amount,
orig_currency: currencyObj.orig_currency,
orig_amount: currencyObj.orig_amount,
};
return {
tx: {
...etxn.tx,
source_account_id: formValue.paymentMode.acc.id,
billable: formValue.billable,
org_category_id: (formValue.sub_category && formValue.sub_category.id) || etxn.tx.org_category_id,
skip_reimbursement: skipReimbursement,
per_diem_rate_id: formValue.per_diem_rate.id,
source: 'MOBILE',
currency: amountData.currency,
amount: parseInt(amountData.amount, 10),
orig_currency: amountData.orig_currency,
orig_amount: amountData.orig_amount,
project_id: formValue.project && formValue.project.project_id,
purpose: formValue.purpose,
custom_properties: customProperties || [],
org_user_id: etxn.tx.org_user_id,
from_dt: formValue.from_dt && this.dateService.getUTCDate(new Date(formValue.from_dt)),
to_dt: formValue.from_dt && this.dateService.getUTCDate(new Date(formValue.to_dt)),
category: null,
num_days: formValue.num_days,
cost_center_id: formValue.costCenter && formValue.costCenter.id,
cost_center_name: formValue.costCenter && formValue.costCenter.name,
cost_center_code: formValue.costCenter && formValue.costCenter.code,
user_reason_for_duplicate_expenses: formValue.duplicate_detection_reason,
},
dataUrls: [],
ou: etxn.ou,
};
})
);
}
checkPolicyViolation(etxn) {
// Prepare etxn object with just tx and ou object required for test call
return from(this.authService.getEou()).pipe(
switchMap((currentEou) => {
const policyETxn = {
tx: cloneDeep(etxn.tx),
ou: cloneDeep(etxn.ou),
};
if (!etxn.tx.id) {
policyETxn.ou = currentEou.ou;
}
/* Adding number of attachements and sending in test call as tx_num_files
* If editing an expense with receipts, check for already uploaded receipts
*/
if (etxn.tx) {
policyETxn.tx.num_files = etxn.tx.num_files;
// Check for receipts uploaded from mobile
if (etxn.dataUrls && etxn.dataUrls.length > 0) {
policyETxn.tx.num_files = etxn.tx.num_files + etxn.dataUrls.length;
}
}
return this.offlineService.getAllEnabledCategories().pipe(
map((categories: any[]) => {
// policy engine expects org_category and sub_category fields
if (policyETxn.tx.org_category_id) {
const orgCategory = categories.find((cat) => cat.id === policyETxn.tx.org_category_id);
policyETxn.tx.org_category = orgCategory && orgCategory.name;
policyETxn.tx.sub_category = orgCategory && orgCategory.sub_category;
} else {
policyETxn.tx.org_category_id = null;
policyETxn.tx.sub_category = null;
policyETxn.tx.org_category = null;
}
// Flatten the etxn obj
return this.dataTransformService.etxnRaw(policyETxn);
})
);
}),
switchMap((policyETxn) => this.transactionService.testPolicy(policyETxn))
);
}
async continueWithCriticalPolicyViolation(criticalPolicyViolations: string[]) {
const fyCriticalPolicyViolationPopOver = await this.popoverController.create({
component: FyCriticalPolicyViolationComponent,
componentProps: {
criticalViolationMessages: criticalPolicyViolations,
},
cssClass: 'pop-up-in-center',
});
await fyCriticalPolicyViolationPopOver.present();
const { data } = await fyCriticalPolicyViolationPopOver.onWillDismiss();
return !!data;
}
async continueWithPolicyViolations(policyViolations: string[], policyActionDescription: string) {
const currencyModal = await this.modalController.create({
component: PolicyViolationComponent,
componentProps: {
policyViolationMessages: policyViolations,
policyActionDescription,
},
mode: 'ios',
presentingElement: await this.modalController.getTop(),
...this.modalProperties.getModalDefaultProperties(),
});
await currencyModal.present();
const { data } = await currencyModal.onWillDismiss();
return data;
}
addExpense(redirectedFrom) {
this.savePerDiemLoader = redirectedFrom === 'SAVE_PER_DIEM';
this.saveAndNextPerDiemLoader = redirectedFrom === 'SAVE_AND_NEXT_PERDIEM';
this.saveAndPrevPerDiemLoader = redirectedFrom === 'SAVE_AND_PREV_PERDIEM';
const customFields$ = this.customInputs$.pipe(
take(1),
map((customInputs) =>
customInputs.map((customInput, i) => ({
id: customInput.id,
mandatory: customInput.mandatory,
name: customInput.name,
options: customInput.options,
placeholder: customInput.placeholder,
prefix: customInput.prefix,
type: customInput.type,
value: this.fg.value.custom_inputs[i].value,
}))
)
);
return from(this.generateEtxnFromFg(this.etxn$, customFields$)).pipe(
switchMap((etxn) =>
this.isConnected$.pipe(
take(1),
switchMap((isConnected) => {
if (isConnected) {
const policyViolations$ = this.checkPolicyViolation(etxn).pipe(shareReplay(1));
return policyViolations$.pipe(
map(this.policyService.getCriticalPolicyRules),
switchMap((criticalPolicyViolations) => {
if (criticalPolicyViolations.length > 0) {
return throwError({
type: 'criticalPolicyViolations',
policyViolations: criticalPolicyViolations,
etxn,
});
} else {
return policyViolations$;
}
}),
map((policyViolations: any) => [
this.policyService.getPolicyRules(policyViolations),
policyViolations &&
policyViolations.transaction_desired_state &&
policyViolations.transaction_desired_state.action_description,
]),
switchMap(([policyViolations, policyActionDescription]) => {
if (policyViolations.length > 0) {
return throwError({
type: 'policyViolations',
policyViolations,
policyActionDescription,
etxn,
});
} else {
return of({ etxn, comment: null });
}
})
);
} else {
return of({ etxn, comment: null });
}
})
)
),
catchError((err) => {
if (err.status === 500) {
return this.generateEtxnFromFg(this.etxn$, customFields$).pipe(map((etxn) => ({ etxn })));
}
if (err.type === 'criticalPolicyViolations') {
return from(this.loaderService.hideLoader()).pipe(
switchMap(() => this.continueWithCriticalPolicyViolation(err.policyViolations)),
switchMap((continueWithTransaction) => {
if (continueWithTransaction) {
return from(this.loaderService.showLoader()).pipe(switchMap(() => of({ etxn: err.etxn })));
} else {
return throwError('unhandledError');
}
})
);
} else if (err.type === 'policyViolations') {
return from(this.loaderService.hideLoader()).pipe(
switchMap(() => this.continueWithPolicyViolations(err.policyViolations, err.policyActionDescription)),
switchMap((continueWithTransaction) => {
if (continueWithTransaction) {
return from(this.loaderService.showLoader()).pipe(
switchMap(() => of({ etxn: err.etxn, comment: continueWithTransaction.comment }))
);
} else {
return throwError('unhandledError');
}
})
);
} else {
return throwError(err);
}
}),
switchMap(({ etxn, comment }: any) =>
from(this.authService.getEou()).pipe(
switchMap((eou) => {
const comments = [];
this.trackingService.createExpense({
Type: 'Receipt',
Amount: etxn.tx.amount,
Currency: etxn.tx.currency,
Category: etxn.tx.org_category,
Time_Spent: this.getTimeSpentOnPage() + ' secs',
Used_Autofilled_Project:
etxn.tx.project_id && this.presetProjectId && etxn.tx.project_id === this.presetProjectId,
Used_Autofilled_CostCenter:
etxn.tx.cost_center_id && this.presetCostCenterId && etxn.tx.cost_center_id === this.presetCostCenterId,
});
if (comment) {
comments.push(comment);
}
let reportId;
if (
this.fg.value.report &&
(etxn.tx.policy_amount === null || (etxn.tx.policy_amount && !(etxn.tx.policy_amount < 0.0001)))
) {
reportId = this.fg.value.report.rp.id;
}
let entry;
if (this.fg.value.add_to_new_report) {
entry = {
comments,
reportId,
};
}
if (entry) {
return from(
this.transactionsOutboxService.addEntryAndSync(etxn.tx, etxn.dataUrls, entry.comments, entry.reportId)
);
} else {
return of(
this.transactionsOutboxService.addEntry(etxn.tx, etxn.dataUrls, comments, reportId, null, null)
);
}
})
)
),
finalize(() => {
this.savePerDiemLoader = false;
this.saveAndNextPerDiemLoader = false;
this.saveAndPrevPerDiemLoader = false;
})
);
}
trackPolicyCorrections() {
this.isCriticalPolicyViolated$.subscribe((isCriticalPolicyViolated) => {
if (isCriticalPolicyViolated && this.fg.dirty) {
this.trackingService.policyCorrection({ Violation: 'Critical', Mode: 'Edit Expense' });
}
});
this.comments$
.pipe(
map((estatuses) => estatuses.filter((estatus) => estatus.st_org_user_id === 'POLICY')),
map((policyViolationComments) => policyViolationComments.length > 0)
)
.subscribe((policyViolated) => {
if (policyViolated && this.fg.dirty) {
this.trackingService.policyCorrection({ Violation: 'Regular', Mode: 'Edit Expense' });
}
});
}
editExpense(redirectedFrom) {
this.savePerDiemLoader = redirectedFrom === 'SAVE_PER_DIEM';
this.saveAndNextPerDiemLoader = redirectedFrom === 'SAVE_AND_NEXT_PERDIEM';
this.saveAndPrevPerDiemLoader = redirectedFrom === 'SAVE_AND_PREV_PERDIEM';
this.trackPolicyCorrections();
const customFields$ = this.customInputs$.pipe(
take(1),
map((customInputs) =>
customInputs.map((customInput, i) => ({
id: customInput.id,
mandatory: customInput.mandatory,
name: customInput.name,
options: customInput.options,
placeholder: customInput.placeholder,
prefix: customInput.prefix,
type: customInput.type,
value: this.fg.value.custom_inputs[i].value,
}))
)
);
return from(this.generateEtxnFromFg(this.etxn$, customFields$)).pipe(
switchMap((etxn) => {
const policyViolations$ = this.checkPolicyViolation(etxn).pipe(shareReplay(1));
return policyViolations$.pipe(
map(this.policyService.getCriticalPolicyRules),
switchMap((policyViolations) => {
if (policyViolations.length > 0) {
return throwError({
type: 'criticalPolicyViolations',
policyViolations,
etxn,
});
} else {
return policyViolations$;
}
}),
map((policyViolations: any) => [
this.policyService.getPolicyRules(policyViolations),
policyViolations &&
policyViolations.transaction_desired_state &&
policyViolations.transaction_desired_state.action_description,
]),
switchMap(([policyViolations, policyActionDescription]) => {
if (policyViolations.length > 0) {
return throwError({
type: 'policyViolations',
policyViolations,
policyActionDescription,
etxn,
});
} else {
return of({ etxn });
}
})
);
}),
catchError((err) => {
if (err.status === 500) {
return this.generateEtxnFromFg(this.etxn$, customFields$).pipe(map((etxn) => ({ etxn })));
}
if (err.type === 'criticalPolicyViolations') {
return from(this.continueWithCriticalPolicyViolation(err.policyViolations)).pipe(
switchMap((continueWithTransaction) => {
if (continueWithTransaction) {
return from(this.loaderService.showLoader()).pipe(switchMap(() => of({ etxn: err.etxn })));
} else {
return throwError('unhandledError');
}
})
);
} else if (err.type === 'policyViolations') {
return from(this.continueWithPolicyViolations(err.policyViolations, err.policyActionDescription)).pipe(
switchMap((continueWithTransaction) => {
if (continueWithTransaction) {
return from(this.loaderService.showLoader()).pipe(
switchMap(() => of({ etxn: err.etxn, comment: continueWithTransaction.comment }))
);
} else {
return throwError('unhandledError');
}
})
);
} else {
return throwError(err);
}
}),
switchMap(({ etxn, comment }: any) =>
this.etxn$.pipe(
switchMap((txnCopy) => {
if (!isEqual(etxn.tx, txnCopy)) {
// only if the form is edited
this.trackingService.editExpense({
Type: 'Per Diem',
Amount: etxn.tx.amount,
Currency: etxn.tx.currency,
Category: etxn.tx.org_category,
Time_Spent: this.getTimeSpentOnPage() + ' secs',
Used_Autofilled_Project:
etxn.tx.project_id && this.presetProjectId && etxn.tx.project_id === this.presetProjectId,
Used_Autofilled_CostCenter:
etxn.tx.cost_center_id &&
this.presetCostCenterId &&
etxn.tx.cost_center_id === this.presetCostCenterId,
});
} else {
// tracking expense closed without editing
this.trackingService.viewExpense({ Type: 'Per Diem' });
}
return this.transactionService.upsert(etxn.tx).pipe(
switchMap((txn) => this.transactionService.getETxn(txn.id)),
map((savedEtxn) => savedEtxn && savedEtxn.tx),
switchMap((tx) => {
const selectedReportId = this.fg.value.report && this.fg.value.report.rp && this.fg.value.report.rp.id;
const criticalPolicyViolated = isNumber(etxn.tx_policy_amount) && etxn.tx_policy_amount < 0.0001;
if (!criticalPolicyViolated) {
if (!txnCopy.tx.report_id && selectedReportId) {
return this.reportService.addTransactions(selectedReportId, [tx.id]).pipe(
tap(() => this.trackingService.addToExistingReportAddEditExpense()),
map(() => tx)
);
}
if (txnCopy.tx.report_id && selectedReportId && selectedReportId !== txnCopy.tx.report_id) {
return this.reportService.removeTransaction(txnCopy.tx.report_id, tx.id).pipe(
switchMap(() => this.reportService.addTransactions(selectedReportId, [tx.id])),
tap(() => this.trackingService.addToExistingReportAddEditExpense()),
map(() => tx)
);
}
if (txnCopy.tx.report_id && !selectedReportId) {
return this.reportService.removeTransaction(txnCopy.tx.report_id, tx.id).pipe(
tap(() => this.trackingService.removeFromExistingReportEditExpense()),
map(() => tx)
);
}
}
return of(null).pipe(map(() => tx));
}),
switchMap((tx) => {
const criticalPolicyViolated = isNumber(etxn.tx_policy_amount) && etxn.tx_policy_amount < 0.0001;
if (!criticalPolicyViolated && etxn.tx.user_review_needed) {
return this.transactionService.review(tx.id).pipe(map(() => tx));
}
return of(null).pipe(map(() => tx));
})
);
}),
switchMap((txn) => {
if (comment) {
return this.statusService.findLatestComment(txn.id, 'transactions', txn.org_user_id).pipe(
switchMap((result) => {
if (result !== comment) {
return this.statusService.post('transactions', txn.id, { comment }, true).pipe(map(() => txn));
} else {
return of(txn);
}
})
);
} else {
return of(txn);
}
})
)
),
finalize(() => {
this.savePerDiemLoader = false;
this.saveAndNextPerDiemLoader = false;
this.saveAndPrevPerDiemLoader = false;
})
);
}
addToNewReport(txnId: string) {
const that = this;
from(this.loaderService.showLoader())
.pipe(
switchMap(() => this.transactionService.getEtxn(txnId)),
finalize(() => from(this.loaderService.hideLoader()))
)
.subscribe((etxn) => {
const criticalPolicyViolated = isNumber(etxn.tx_policy_amount) && etxn.tx_policy_amount < 0.0001;
if (!criticalPolicyViolated) {
that.router.navigate(['/', 'enterprise', 'my_create_report', { txn_ids: JSON.stringify([txnId]) }]);
} else {
that.goBack();
}
});
}
showAddToReportSuccessToast(reportId: string) {
const toastMessageData = {
message: 'Per diem expense added to report successfully',
redirectionText: 'View Report',
};
const expensesAddedToReportSnackBar = this.matSnackBar.openFromComponent(ToastMessageComponent, {
...this.snackbarProperties.setSnackbarProperties('success', toastMessageData),
panelClass: ['msb-success-with-camera-icon'],
});
this.trackingService.showToastMessage({ ToastContent: toastMessageData.message });
expensesAddedToReportSnackBar.onAction().subscribe(() => {
this.router.navigate(['/', 'enterprise', 'my_view_report', { id: reportId, navigateBack: true }]);
});
}
savePerDiem() {
const that = this;
that
.checkIfInvalidPaymentMode()
.pipe(take(1))
.subscribe((invalidPaymentMode) => {
if (that.fg.valid && !invalidPaymentMode) {
if (that.mode === 'add') {
that.addExpense('SAVE_PER_DIEM').subscribe((res: any) => {
if (that.fg.controls.add_to_new_report.value && res && res.transaction) {
this.addToNewReport(res.transaction.id);
} else if (that.fg.value.report && that.fg.value.report.rp && that.fg.value.report.rp.id) {
that.goBack();
this.showAddToReportSuccessToast(that.fg.value.report.rp.id);
} else {
that.goBack();
}
});
} else {
that.editExpense('SAVE_PER_DIEM').subscribe((res) => {
if (that.fg.controls.add_to_new_report.value && res && res.id) {
this.addToNewReport(res.id);
} else if (that.fg.value.report && that.fg.value.report.rp && that.fg.value.report.rp.id) {
that.goBack();
this.showAddToReportSuccessToast(that.fg.value.report.rp.id);
} else {
that.goBack();
}
});
}
} else {
that.fg.markAllAsTouched();
const formContainer = that.formContainer.nativeElement as HTMLElement;
if (formContainer) {
const invalidElement = formContainer.querySelector('.ng-invalid');
if (invalidElement) {
invalidElement.scrollIntoView({
behavior: 'smooth',
});
}
}
if (invalidPaymentMode) {
that.invalidPaymentMode = true;
setTimeout(() => {
that.invalidPaymentMode = false;
}, 3000);
}
}
});
}
async reloadCurrentRoute() {
await this.router.navigateByUrl('/enterprise/my_expenses', { skipLocationChange: true });
await this.router.navigate(['/', 'enterprise', 'add_edit_per_diem']);
}
saveAndNewExpense() {
const that = this;
that
.checkIfInvalidPaymentMode()
.pipe(take(1))
.subscribe((invalidPaymentMode) => {
if (that.fg.valid && !invalidPaymentMode) {
if (that.mode === 'add') {
that.addExpense('SAVE_AND_NEW_PER_DIEM').subscribe(() => {
this.reloadCurrentRoute();
});
} else {
// to do edit
that.editExpense('SAVE_AND_NEW_PER_DIEM').subscribe(() => {
that.goBack();
});
}
} else {
that.fg.markAllAsTouched();
const formContainer = that.formContainer.nativeElement as HTMLElement;
if (formContainer) {
const invalidElement = formContainer.querySelector('.ng-invalid');
if (invalidElement) {
invalidElement.scrollIntoView({
behavior: 'smooth',
});
}
}
if (invalidPaymentMode) {
that.invalidPaymentMode = true;
setTimeout(() => {
that.invalidPaymentMode = false;
}, 3000);
}
}
});
}
saveExpenseAndGotoPrev() {
const that = this;
if (that.fg.valid) {
if (that.mode === 'add') {
that.addExpense('SAVE_AND_PREV_PERDIEM').subscribe(() => {
if (+this.activeIndex === 0) {
that.close();
} else {
that.goToPrev();
}
});
} else {
// to do edit
that.editExpense('SAVE_AND_PREV_PERDIEM').subscribe(() => {
if (+this.activeIndex === 0) {
that.close();
} else {
that.goToPrev();
}
});
}
} else {
that.fg.markAllAsTouched();
const formContainer = that.formContainer.nativeElement as HTMLElement;
if (formContainer) {
const invalidElement = formContainer.querySelector('.ng-invalid');
if (invalidElement) {
invalidElement.scrollIntoView({
behavior: 'smooth',
});
}
}
}
}
saveExpenseAndGotoNext() {
const that = this;
if (that.fg.valid) {
if (that.mode === 'add') {
that.addExpense('SAVE_AND_NEXT_PERDIEM').subscribe(() => {
if (+this.activeIndex === this.reviewList.length - 1) {
that.close();
} else {
that.goToNext();
}
});
} else {
// to do edit
that.editExpense('SAVE_AND_NEXT_PERDIEM').subscribe(() => {
if (+this.activeIndex === this.reviewList.length - 1) {
that.close();
} else {
that.goToNext();
}
});
}
} else {
that.fg.markAllAsTouched();
const formContainer = that.formContainer.nativeElement as HTMLElement;
if (formContainer) {
const invalidElement = formContainer.querySelector('.ng-invalid');
if (invalidElement) {
invalidElement.scrollIntoView({
behavior: 'smooth',
});
}
}
}
}
close() {
this.router.navigate(['/', 'enterprise', 'my_expenses']);
}
// getFormValidationErrors() {
// Object.keys(this.fg.controls).forEach(key => {
// const controlErrors: ValidationErrors = this.fg.get(key).errors;
// if (controlErrors != null) {
// Object.keys(controlErrors).forEach(keyError => {
// console.log('Key control: ' + key + ', keyError: ' + keyError + ', err value: ', controlErrors[keyError]);
// });
// }
// });
// }
async deleteExpense(reportId?: string) {
const id = this.activatedRoute.snapshot.params.id;
const removeExpenseFromReport = this.activatedRoute.snapshot.params.remove_from_report;
const header = reportId && removeExpenseFromReport ? 'Remove Per Diem' : 'Delete Per Diem';
const body =
reportId && removeExpenseFromReport
? 'Are you sure you want to remove this Per Diem expense from this report?'
: 'Are you sure you want to delete this Per Diem expense?';
const ctaText = reportId && removeExpenseFromReport ? 'Remove' : 'Delete';
const ctaLoadingText = reportId && removeExpenseFromReport ? 'Removing' : 'Deleting';
const deletePopover = await this.popoverController.create({
component: FyDeleteDialogComponent,
cssClass: 'delete-dialog',
backdropDismiss: false,
componentProps: {
header,
body,
ctaText,
ctaLoadingText,
deleteMethod: () => {
if (reportId && removeExpenseFromReport) {
return this.reportService.removeTransaction(reportId, id);
}
return this.transactionService.delete(id);
},
},
});
await deletePopover.present();
const { data } = await deletePopover.onDidDismiss();
if (data && data.status === 'success') {
if (this.reviewList && this.reviewList.length && +this.activeIndex < this.reviewList.length - 1) {
this.reviewList.splice(+this.activeIndex, 1);
this.transactionService.getETxn(this.reviewList[+this.activeIndex]).subscribe((etxn) => {
this.goToTransaction(etxn, this.reviewList, +this.activeIndex);
});
} else {
this.router.navigate(['/', 'enterprise', 'my_expenses']);
}
} else {
if (this.mode === 'add') {
this.trackingService.clickDeleteExpense({ Type: 'Per Diem' });
}
}
}
scrollCommentsIntoView() {
if (this.commentsContainer) {
const commentsContainer = this.commentsContainer.nativeElement as HTMLElement;
if (commentsContainer) {
commentsContainer.scrollIntoView({
behavior: 'smooth',
block: 'nearest',
inline: 'start',
});
}
}
}
getFormValidationErrors() {
Object.keys(this.fg.controls).forEach((key) => {
const controlErrors: ValidationErrors = this.fg.get(key).errors;
if (controlErrors != null) {
Object.keys(controlErrors).forEach((keyError) => {
console.log('Key control: ' + key + ', keyError: ' + keyError + ', err value: ', controlErrors[keyError]);
});
}
});
}
async openCommentsModal() {
const etxn = await this.etxn$.toPromise();
const modal = await this.modalController.create({
component: ViewCommentComponent,
componentProps: {
objectType: 'transactions',
objectId: etxn.tx.id,
},
presentingElement: await this.modalController.getTop(),
...this.modalProperties.getModalDefaultProperties(),
});
await modal.present();
const { data } = await modal.onDidDismiss();
if (data && data.updated) {
this.trackingService.addComment();
} else {
this.trackingService.viewComment();
}
}
async setDuplicateBoxOpen(value) {
this.duplicateBoxOpen = value;
if (value) {
await this.trackingService.duplicateDetectionUserActionExpand({
Page: this.mode === 'add' ? 'Add Per Diem' : 'Edit Per Diem',
});
} else {
await this.trackingService.duplicateDetectionUserActionCollapse({
Page: this.mode === 'add' ? 'Add Per Diem' : 'Edit Per Diem',
});
}
}
hideFields() {
this.trackingService.hideMoreClicked({
source: 'Add Edit Per Diem page',
});
this.isExpandedView = false;
}
showFields() {
this.trackingService.showMoreClicked({
source: 'Add Edit Per Diem page',
});
this.isExpandedView = true;
}
getPolicyDetails() {
const txnId = this.activatedRoute.snapshot.params.id;
if (txnId) {
from(this.policyService.getPolicyViolationRules(txnId))
.pipe()
.subscribe((details) => {
this.policyDetails = details;
});
}
}
}
| AddEditPerDiemPage |
imageuploadcommand.js | /**
* @license Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
* For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
/* globals console */
import VirtualTestEditor from '@ckeditor/ckeditor5-core/tests/_utils/virtualtesteditor';
import Plugin from '@ckeditor/ckeditor5-core/src/plugin';
import ImageUploadCommand from '../../src/imageupload/imageuploadcommand';
import FileRepository from '@ckeditor/ckeditor5-upload/src/filerepository';
import { createNativeFileMock, UploadAdapterMock } from '@ckeditor/ckeditor5-upload/tests/_utils/mocks';
import { setData as setModelData, getData as getModelData } from '@ckeditor/ckeditor5-engine/src/dev-utils/model';
import Image from '../../src/image/imageediting';
import Paragraph from '@ckeditor/ckeditor5-paragraph/src/paragraph';
describe( 'ImageUploadCommand', () => {
let editor, command, model, fileRepository;
class | extends Plugin {
init() {
fileRepository = this.editor.plugins.get( FileRepository );
fileRepository.createUploadAdapter = loader => {
return new UploadAdapterMock( loader );
};
}
}
beforeEach( () => {
return VirtualTestEditor
.create( {
plugins: [ FileRepository, Image, Paragraph, UploadAdapterPluginMock ]
} )
.then( newEditor => {
editor = newEditor;
model = editor.model;
command = new ImageUploadCommand( editor );
const schema = model.schema;
schema.extend( 'image', { allowAttributes: 'uploadId' } );
} );
} );
afterEach( () => {
sinon.restore();
return editor.destroy();
} );
describe( 'isEnabled', () => {
it( 'should be true when the selection directly in the root', () => {
model.enqueueChange( 'transparent', () => {
setModelData( model, '[]' );
command.refresh();
expect( command.isEnabled ).to.be.true;
} );
} );
it( 'should be true when the selection is in empty block', () => {
setModelData( model, '<paragraph>[]</paragraph>' );
expect( command.isEnabled ).to.be.true;
} );
it( 'should be true when the selection directly in a paragraph', () => {
setModelData( model, '<paragraph>foo[]</paragraph>' );
expect( command.isEnabled ).to.be.true;
} );
it( 'should be true when the selection directly in a block', () => {
model.schema.register( 'block', { inheritAllFrom: '$block' } );
model.schema.extend( '$text', { allowIn: 'block' } );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'block', view: 'block' } );
setModelData( model, '<block>foo[]</block>' );
expect( command.isEnabled ).to.be.true;
} );
it( 'should be true when the selection is on other image', () => {
setModelData( model, '[]' );
expect( command.isEnabled ).to.be.true;
} );
it( 'should be false when the selection is inside other image', () => {
model.schema.register( 'caption', {
allowIn: 'image',
allowContentOf: '$block',
isLimit: true
} );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'caption', view: 'figcaption' } );
setModelData( model, '' );
expect( command.isEnabled ).to.be.false;
} );
it( 'should be false when the selection is on other object', () => {
model.schema.register( 'object', { isObject: true, allowIn: '$root' } );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'object', view: 'object' } );
setModelData( model, '[<object></object>]' );
expect( command.isEnabled ).to.be.false;
} );
it( 'should be true when the selection is inside block element inside isLimit element which allows image', () => {
model.schema.register( 'table', { allowWhere: '$block', isLimit: true, isObject: true, isBlock: true } );
model.schema.register( 'tableRow', { allowIn: 'table', isLimit: true } );
model.schema.register( 'tableCell', { allowIn: 'tableRow', isLimit: true, isSelectable: true } );
model.schema.extend( '$block', { allowIn: 'tableCell' } );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'table', view: 'table' } );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'tableRow', view: 'tableRow' } );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'tableCell', view: 'tableCell' } );
setModelData( model, '<table><tableRow><tableCell><paragraph>foo[]</paragraph></tableCell></tableRow></table>' );
} );
it( 'should be false when schema disallows image', () => {
model.schema.register( 'block', { inheritAllFrom: '$block' } );
model.schema.extend( 'paragraph', { allowIn: 'block' } );
// Block image in block.
model.schema.addChildCheck( ( context, childDefinition ) => {
if ( childDefinition.name === 'image' && context.last.name === 'block' ) {
return false;
}
} );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'block', view: 'block' } );
setModelData( model, '<block><paragraph>[]</paragraph></block>' );
expect( command.isEnabled ).to.be.false;
} );
} );
describe( 'execute()', () => {
it( 'should insert image at selection position as other widgets', () => {
const file = createNativeFileMock();
setModelData( model, '<paragraph>f[o]o</paragraph>' );
command.execute( { file } );
const id = fileRepository.getLoader( file ).id;
expect( getModelData( model ) )
.to.equal( `[<image uploadId="${ id }"></image>]<paragraph>foo</paragraph>` );
} );
it( 'should use parent batch', () => {
const file = createNativeFileMock();
setModelData( model, '<paragraph>[]foo</paragraph>' );
model.change( writer => {
expect( writer.batch.operations ).to.length( 0 );
command.execute( { file } );
expect( writer.batch.operations ).to.length.above( 0 );
} );
} );
it( 'should not insert image nor crash when image could not be inserted', () => {
const file = createNativeFileMock();
model.schema.register( 'other', {
allowIn: '$root',
isLimit: true
} );
model.schema.extend( '$text', { allowIn: 'other' } );
editor.conversion.for( 'downcast' ).elementToElement( { model: 'other', view: 'p' } );
setModelData( model, '<other>[]</other>' );
command.execute( { file } );
expect( getModelData( model ) ).to.equal( '<other>[]</other>' );
} );
it( 'should not throw when upload adapter is not set (FileRepository will log an warn anyway)', () => {
const file = createNativeFileMock();
fileRepository.createUploadAdapter = undefined;
const consoleWarnStub = sinon.stub( console, 'warn' );
setModelData( model, '<paragraph>fo[]o</paragraph>' );
expect( () => {
command.execute( { file } );
} ).to.not.throw();
expect( getModelData( model ) ).to.equal( '<paragraph>fo[]o</paragraph>' );
sinon.assert.calledOnce( consoleWarnStub );
} );
} );
} );
| UploadAdapterPluginMock |
config.rs | use std::fs::File;
use std::io;
use std::path::Path;
use std::path::PathBuf;
use serde_json;
use atomic_file;
#[derive(Serialize, Deserialize, Clone)]
pub(crate) struct Config {
pub(crate) secret_store_type: SecretStoreType,
}
#[derive(Serialize, Deserialize, Copy, Clone)]
pub(crate) enum SecretStoreType {
File,
SecretService,
}
#[derive(Clone)]
pub(crate) struct ConfigFilePath(PathBuf);
impl ConfigFilePath {
pub fn from_dir(dir: &Path) -> ConfigFilePath {
ConfigFilePath(dir.join("config.json"))
}
pub fn get(&self) -> &Path {
&self.0
}
}
pub(crate) struct ConfigFile {
config: Config,
path: ConfigFilePath,
}
impl ConfigFile {
pub fn create(path: ConfigFilePath, config: Config) -> io::Result<ConfigFile> {
let config_file = ConfigFile { config, path };
config_file.save()?;
Ok(config_file)
}
pub fn load(path: ConfigFilePath) -> io::Result<Option<ConfigFile>> {
match File::open(path.get()) {
Ok(file) => serde_json::from_reader(file)
.map_err(|e| e.into())
.map(|config| Some(ConfigFile { config, path })),
Err(ref err) if err.kind() == io::ErrorKind::NotFound => Ok(None),
Err(err) => Err(err.into()),
}
}
| pub fn save(&self) -> io::Result<()> {
atomic_file::overwrite(self.path(), move |writer| {
serde_json::to_writer_pretty(writer, &self.config).map_err(|e| e.into())
})
}
fn path(&self) -> &Path {
self.path.get()
}
}
#[cfg(test)]
mod tests {
extern crate tempdir;
use super::*;
use self::tempdir::TempDir;
#[test]
fn load_with_not_existing_file_returns_none() {
let temp_dir = TempDir::new("config_tests").unwrap();
let file_path = ConfigFilePath::from_dir(temp_dir.path());
assert!(ConfigFile::load(file_path).unwrap().is_none());
}
} | pub fn config(&self) -> &Config {
&self.config
}
|
triggers-details.component.js | import React from 'react';
import { PropTypes } from 'prop-types';
import { connect } from 'react-redux';
import get from 'lodash/fp/get';
import map from 'lodash/fp/map'; | import notification from '@stackstorm/module-notification';
import setTitle from '@stackstorm/module-title';
import { Link } from 'react-router-dom';
import { Toggle } from '@stackstorm/module-forms/button.component';
import Highlight from '@stackstorm/module-highlight';
import {
PanelDetails,
DetailsHeader,
DetailsSwitch,
DetailsBody,
DetailsPanel,
DetailsPanelHeading,
DetailsPanelBody,
DetailsLine,
DetailsLineNote,
} from '@stackstorm/module-panel';
import InstancePanel from './panels/instances';
@connect(
({
instances,
triggers,
sensors,
}, props) => ({
trigger: triggers.find(trigger => props.id === trigger.ref),
sensor: sensors[props.id],
instances,
}),
(dispatch, props) => ({
onComponentUpdate: () => dispatch({
type: 'FETCH_INSTANCES',
promise: api.request({ path: '/triggerinstances', query: {
trigger_type: props.id,
limit: 10,
} }),
}),
onToggleEnable: (sensor) => dispatch({
type: 'TOGGLE_ENABLE',
promise: api.request({ method: 'put', path: `/sensortypes/${sensor.ref}`}, { ...sensor, enabled: !sensor.enabled }),
}).catch((err) => {
notification.error(`Unable to retrieve sensor "${sensor.ref}".`, { err });
throw err;
}),
}),
(state, dispatch, props) => ({
...props,
...state,
...dispatch,
onSelect: () => dispatch.onSelect(state.trigger),
onToggleEnable: () => dispatch.onToggleEnable(state.sensor),
})
)
export default class TriggersDetails extends React.Component {
static propTypes = {
handleNavigate: PropTypes.func.isRequired,
id: PropTypes.string,
section: PropTypes.string,
trigger: PropTypes.object,
sensor: PropTypes.object,
instances: PropTypes.array,
onComponentUpdate: PropTypes.func,
onToggleEnable: PropTypes.func,
}
componentDidMount() {
this.props.onComponentUpdate && this.props.onComponentUpdate();
}
componentDidUpdate(prevProps) {
if (prevProps.id === this.props.id) {
return;
}
this.props.onComponentUpdate && this.props.onComponentUpdate();
}
handleSection(section) {
const id = this.props.trigger.ref;
return this.props.handleNavigate({ id, section });
}
handleToggleEnable() {
return this.props.onToggleEnable();
}
render() {
const { section, trigger, sensor, instances } = this.props;
if (!trigger) {
return null;
}
const parameters = flow([
get('parameters_schema.properties'),
toPairs,
map(([ key, value ]) => {
return <DetailsLine key={key} name={key} value={get('description')(value) || ''} />;
}),
])(trigger);
const payload = flow([
get('payload_schema.properties'),
toPairs,
map(([ key, value ]) => {
return <DetailsLine key={key} name={key} value={get('description')(value) || ''} />;
}),
])(trigger);
setTitle([ trigger.ref, 'Trigger Types' ]);
return (
<PanelDetails data-test="details">
<DetailsHeader
title={( <Link to={`/triggers/${trigger.ref}`}>{trigger.ref}</Link> )}
subtitle={trigger.description}
{...(sensor && { status: sensor.enabled ? 'enabled' : 'disabled' })}
/>
<DetailsSwitch
sections={[
{ label: 'General', path: 'general' },
{ label: 'Instances', path: 'instances' },
{ label: 'Code', path: 'code', className: [ 'icon-code', 'st2-details__switch-button' ] },
]}
current={section}
onChange={({ path }) => this.handleSection(path)}
/>
<DetailsBody>
{ section === 'general' ? (
<form name="form">
{ sensor ? (
<DetailsPanel>
<DetailsPanelHeading title="Sensor" />
<DetailsPanelBody>
<DetailsLine name="ref" value={sensor.ref} />
<Toggle title="enabled" value={sensor.enabled} onChange={() => this.handleToggleEnable(sensor)} />
</DetailsPanelBody>
</DetailsPanel>
) : null }
<DetailsPanel>
<DetailsPanelHeading title="Parameters" />
<DetailsPanelBody>
{ parameters.length > 0 ? (
parameters
) : (
<DetailsLineNote>
Trigger type does not have any parameters
</DetailsLineNote>
) }
</DetailsPanelBody>
</DetailsPanel>
<DetailsPanel>
<DetailsPanelHeading title="Payload" />
<DetailsPanelBody>
{ payload.length > 0 ? (
payload
) : (
<DetailsLineNote>
Trigger type does not have any payload
</DetailsLineNote>
) }
</DetailsPanelBody>
</DetailsPanel>
</form>
) : null }
{ section === 'code' ? (
<DetailsPanel data-test="trigger_code">
<Highlight lines={20} code={trigger} />
</DetailsPanel>
) : null }
{ section === 'instances' ? (
<InstancePanel instances={instances} key="panel" data-test="trigger_instances" />
) : null }
</DetailsBody>
</PanelDetails>
);
}
} | import flow from 'lodash/fp/flow';
import toPairs from 'lodash/fp/toPairs';
import api from '@stackstorm/module-api'; |
plugins.go | package api
import (
"sort"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/bus"
m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/plugins"
"github.com/grafana/grafana/pkg/setting"
)
func GetPluginList(c *m.ReqContext) Response {
typeFilter := c.Query("type")
enabledFilter := c.Query("enabled")
embeddedFilter := c.Query("embedded")
coreFilter := c.Query("core")
pluginSettingsMap, err := plugins.GetPluginSettings(c.OrgId)
if err != nil {
return Error(500, "Failed to get list of plugins", err)
}
result := make(dtos.PluginList, 0)
for _, pluginDef := range plugins.Plugins {
// filter out app sub plugins
if embeddedFilter == "0" && pluginDef.IncludedInAppId != "" {
continue
}
// filter out core plugins
if coreFilter == "0" && pluginDef.IsCorePlugin {
continue
}
// filter on type
if typeFilter != "" && typeFilter != pluginDef.Type {
continue
}
listItem := dtos.PluginListItem{
Id: pluginDef.Id,
Name: pluginDef.Name,
Type: pluginDef.Type,
Info: &pluginDef.Info,
LatestVersion: pluginDef.GrafanaNetVersion,
HasUpdate: pluginDef.GrafanaNetHasUpdate,
DefaultNavUrl: pluginDef.DefaultNavUrl,
State: pluginDef.State,
}
if pluginSetting, exists := pluginSettingsMap[pluginDef.Id]; exists {
listItem.Enabled = pluginSetting.Enabled
listItem.Pinned = pluginSetting.Pinned
}
if listItem.DefaultNavUrl == "" || !listItem.Enabled {
listItem.DefaultNavUrl = setting.AppSubUrl + "/plugins/" + listItem.Id + "/edit"
}
// filter out disabled
if enabledFilter == "1" && !listItem.Enabled {
continue
}
// filter out built in data sources
if ds, exists := plugins.DataSources[pluginDef.Id]; exists {
if ds.BuiltIn {
continue
}
}
result = append(result, listItem)
}
sort.Sort(result)
return JSON(200, result)
}
func GetPluginSettingByID(c *m.ReqContext) Response {
pluginID := c.Params(":pluginId")
def, exists := plugins.Plugins[pluginID]
if !exists {
return Error(404, "Plugin not found, no installed plugin with that id", nil)
}
dto := &dtos.PluginSetting{
Type: def.Type,
Id: def.Id,
Name: def.Name,
Info: &def.Info,
Dependencies: &def.Dependencies,
Includes: def.Includes,
BaseUrl: def.BaseUrl,
Module: def.Module,
DefaultNavUrl: def.DefaultNavUrl,
LatestVersion: def.GrafanaNetVersion,
HasUpdate: def.GrafanaNetHasUpdate,
State: def.State,
}
query := m.GetPluginSettingByIdQuery{PluginId: pluginID, OrgId: c.OrgId}
if err := bus.Dispatch(&query); err != nil {
if err != m.ErrPluginSettingNotFound {
return Error(500, "Failed to get login settings", nil)
}
} else {
dto.Enabled = query.Result.Enabled
dto.Pinned = query.Result.Pinned
dto.JsonData = query.Result.JsonData
}
return JSON(200, dto)
}
func | (c *m.ReqContext, cmd m.UpdatePluginSettingCmd) Response {
pluginID := c.Params(":pluginId")
cmd.OrgId = c.OrgId
cmd.PluginId = pluginID
if _, ok := plugins.Apps[cmd.PluginId]; !ok {
return Error(404, "Plugin not installed.", nil)
}
if err := bus.Dispatch(&cmd); err != nil {
return Error(500, "Failed to update plugin setting", err)
}
return Success("Plugin settings updated")
}
func GetPluginDashboards(c *m.ReqContext) Response {
pluginID := c.Params(":pluginId")
list, err := plugins.GetPluginDashboards(c.OrgId, pluginID)
if err != nil {
if notfound, ok := err.(plugins.PluginNotFoundError); ok {
return Error(404, notfound.Error(), nil)
}
return Error(500, "Failed to get plugin dashboards", err)
}
return JSON(200, list)
}
func GetPluginMarkdown(c *m.ReqContext) Response {
pluginID := c.Params(":pluginId")
name := c.Params(":name")
content, err := plugins.GetPluginMarkdown(pluginID, name)
if err != nil {
if notfound, ok := err.(plugins.PluginNotFoundError); ok {
return Error(404, notfound.Error(), nil)
}
return Error(500, "Could not get markdown file", err)
}
resp := Respond(200, content)
resp.Header("Content-Type", "text/plain; charset=utf-8")
return resp
}
func ImportDashboard(c *m.ReqContext, apiCmd dtos.ImportDashboardCommand) Response {
cmd := plugins.ImportDashboardCommand{
OrgId: c.OrgId,
User: c.SignedInUser,
PluginId: apiCmd.PluginId,
Path: apiCmd.Path,
Inputs: apiCmd.Inputs,
Overwrite: apiCmd.Overwrite,
Dashboard: apiCmd.Dashboard,
}
if err := bus.Dispatch(&cmd); err != nil {
return Error(500, "Failed to import dashboard", err)
}
return JSON(200, cmd.Result)
}
| UpdatePluginSetting |
routines.py | from __future__ import annotations
import math
import warnings
from collections.abc import Iterable
from functools import partial, reduce, wraps
from numbers import Integral, Real
import numpy as np
from tlz import concat, interleave, sliding_window
from dask.array import chunk
from dask.array.core import (
Array,
asanyarray,
asarray,
blockwise,
broadcast_arrays,
broadcast_shapes,
broadcast_to,
concatenate,
elemwise,
from_array,
implements,
is_scalar_for_elemwise,
map_blocks,
stack,
tensordot_lookup,
)
from dask.array.creation import arange, diag, empty, indices, tri
from dask.array.einsumfuncs import einsum # noqa
from dask.array.numpy_compat import _numpy_120
from dask.array.reductions import reduction
from dask.array.ufunc import multiply, sqrt
from dask.array.utils import (
array_safe,
asarray_safe,
meta_from_array,
safe_wraps,
validate_axis,
)
from dask.array.wrap import ones
from dask.base import is_dask_collection, tokenize
from dask.core import flatten
from dask.delayed import Delayed, unpack_collections
from dask.highlevelgraph import HighLevelGraph
from dask.utils import apply, derived_from, funcname, is_arraylike, is_cupy_type
# save built-in for histogram functions which use range as a kwarg.
_range = range
@derived_from(np)
def array(x, dtype=None, ndmin=None, *, like=None):
if not _numpy_120 and like is not None:
raise RuntimeError("The use of ``like`` required NumPy >= 1.20")
x = asarray(x, like=like)
while ndmin is not None and x.ndim < ndmin:
x = x[None, :]
if dtype is not None and x.dtype != dtype:
x = x.astype(dtype)
return x
@derived_from(np)
def result_type(*args):
args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]
return np.result_type(*args)
@derived_from(np)
def atleast_3d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None, None]
elif x.ndim == 1:
x = x[None, :, None]
elif x.ndim == 2:
x = x[:, :, None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def atleast_2d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None]
elif x.ndim == 1:
x = x[None, :]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def atleast_1d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def vstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``vstack`` expects a sequence of arrays as the first argument"
)
tup = tuple(atleast_2d(x) for x in tup)
return concatenate(tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes)
@derived_from(np)
def hstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``hstack`` expects a sequence of arrays as the first argument"
)
if all(x.ndim == 1 for x in tup):
return concatenate(
tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes
)
else:
return concatenate(
tup, axis=1, allow_unknown_chunksizes=allow_unknown_chunksizes
)
@derived_from(np)
def dstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``dstack`` expects a sequence of arrays as the first argument"
)
tup = tuple(atleast_3d(x) for x in tup)
return concatenate(tup, axis=2, allow_unknown_chunksizes=allow_unknown_chunksizes)
@derived_from(np)
def swapaxes(a, axis1, axis2):
if axis1 == axis2:
return a
if axis1 < 0:
axis1 = axis1 + a.ndim
if axis2 < 0:
axis2 = axis2 + a.ndim
ind = list(range(a.ndim))
out = list(ind)
out[axis1], out[axis2] = axis2, axis1
return blockwise(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2, dtype=a.dtype)
@derived_from(np)
def transpose(a, axes=None):
if axes:
if len(axes) != a.ndim:
raise ValueError("axes don't match array")
axes = tuple(d + a.ndim if d < 0 else d for d in axes)
else:
axes = tuple(range(a.ndim))[::-1]
return blockwise(
np.transpose, axes, a, tuple(range(a.ndim)), dtype=a.dtype, axes=axes
)
def flip(m, axis=None):
"""
Reverse element order along axis.
Parameters
----------
m : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes to reverse element order of. None will reverse all axes.
Returns
-------
dask.array.Array
The flipped array.
"""
m = asanyarray(m)
sl = m.ndim * [slice(None)]
if axis is None:
axis = range(m.ndim)
if not isinstance(axis, Iterable):
axis = (axis,)
try:
for ax in axis:
sl[ax] = slice(None, None, -1)
except IndexError as e:
raise ValueError(
f"`axis` of {str(axis)} invalid for {str(m.ndim)}-D array"
) from e
sl = tuple(sl)
return m[sl]
@derived_from(np)
def flipud(m):
return flip(m, 0)
@derived_from(np)
def fliplr(m):
return flip(m, 1)
@derived_from(np)
def rot90(m, k=1, axes=(0, 1)):
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = asanyarray(m)
if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim:
raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.")
k %= 4
if k == 0:
return m[:]
if k == 2:
return flip(flip(m, axes[0]), axes[1])
axes_list = list(range(0, m.ndim))
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]])
if k == 1:
return transpose(flip(m, axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
def _tensordot(a, b, axes, is_sparse):
x = max([a, b], key=lambda x: x.__array_priority__)
tensordot = tensordot_lookup.dispatch(type(x))
x = tensordot(a, b, axes=axes)
if is_sparse and len(axes[0]) == 1:
return x
else:
ind = [slice(None, None)] * x.ndim
for a in sorted(axes[0]):
ind.insert(a, None)
x = x[tuple(ind)]
return x
def _tensordot_is_sparse(x):
is_sparse = "sparse" in str(type(x._meta))
if is_sparse:
# exclude pydata sparse arrays, no workaround required for these in tensordot
is_sparse = "sparse._coo.core.COO" not in str(type(x._meta))
return is_sparse
@derived_from(np)
def tensordot(lhs, rhs, axes=2):
if not isinstance(lhs, Array):
lhs = from_array(lhs)
if not isinstance(rhs, Array):
rhs = from_array(rhs)
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - axes, lhs.ndim))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, Integral):
left_axes = (left_axes,)
if isinstance(right_axes, Integral):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
is_sparse = _tensordot_is_sparse(lhs) or _tensordot_is_sparse(rhs)
if is_sparse and len(left_axes) == 1:
concatenate = True
else:
concatenate = False
dt = np.promote_types(lhs.dtype, rhs.dtype)
left_index = list(range(lhs.ndim))
right_index = list(range(lhs.ndim, lhs.ndim + rhs.ndim))
out_index = left_index + right_index
adjust_chunks = {}
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
right_index[r] = left_index[l]
if concatenate:
out_index.remove(left_index[l])
else:
adjust_chunks[left_index[l]] = lambda c: 1
intermediate = blockwise(
_tensordot,
out_index,
lhs,
left_index,
rhs,
right_index,
dtype=dt,
concatenate=concatenate,
adjust_chunks=adjust_chunks,
axes=(left_axes, right_axes),
is_sparse=is_sparse,
)
if concatenate:
return intermediate
else:
return intermediate.sum(axis=left_axes)
@derived_from(np)
def dot(a, b):
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
@derived_from(np)
def vdot(a, b):
return dot(a.conj().ravel(), b.ravel())
def _chunk_sum(a, axis=None, dtype=None, keepdims=None):
# Caution: this is not your conventional array-sum: due
# to the special nature of the preceding blockwise con-
# traction, each chunk is expected to have exactly the
# same shape, with a size of 1 for the dimension given
# by `axis` (the reduction axis). This makes mere ele-
# ment-wise addition of the arrays possible. Besides,
# the output can be merely squeezed to lose the `axis`-
# dimension when keepdims = False
if type(a) is list:
out = reduce(partial(np.add, dtype=dtype), a)
else:
out = a
if keepdims:
return out
else:
return out.squeeze(axis[0])
def _sum_wo_cat(a, axis=None, dtype=None):
if dtype is None:
dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), "dtype", object)
if a.shape[axis] == 1:
return a.squeeze(axis)
return reduction(
a, _chunk_sum, _chunk_sum, axis=axis, dtype=dtype, concatenate=False
)
def _matmul(a, b):
xp = np
if is_cupy_type(a):
# This branch appears to be unnecessary since cupy
# version 9.0. See the following link:
# https://github.com/dask/dask/pull/8423#discussion_r768291271
# But it remains here for backward-compatibility.
# Consider removing it in a future version of dask.
import cupy
xp = cupy
chunk = xp.matmul(a, b)
# Since we have performed the contraction via xp.matmul
# but blockwise expects all dimensions back (including
# the contraction-axis in the 2nd-to-last position of
# the output), we must then put it back in the expected
# the position ourselves:
return chunk[..., xp.newaxis, :]
@derived_from(np)
def matmul(a, b):
a = asanyarray(a)
b = asanyarray(b)
if a.ndim == 0 or b.ndim == 0:
raise ValueError("`matmul` does not support scalars.")
a_is_1d = False
if a.ndim == 1:
a_is_1d = True
a = a[np.newaxis, :]
b_is_1d = False
if b.ndim == 1:
b_is_1d = True
b = b[:, np.newaxis]
if a.ndim < b.ndim:
a = a[(b.ndim - a.ndim) * (np.newaxis,)]
elif a.ndim > b.ndim:
b = b[(a.ndim - b.ndim) * (np.newaxis,)]
# out_ind includes all dimensions to prevent contraction
# in the blockwise below. We set the last two dimensions
# of the output to the contraction axis and the 2nd
# (last) dimension of b in that order
out_ind = tuple(range(a.ndim + 1))
# lhs_ind includes `a`/LHS dimensions
lhs_ind = tuple(range(a.ndim))
# on `b`/RHS everything above 2nd dimension, is the same
# as `a`, -2 dimension is "contracted" with the last dimension
# of `a`, last dimension of `b` is `b` specific
rhs_ind = tuple(range(a.ndim - 2)) + (lhs_ind[-1], a.ndim)
out = blockwise(
_matmul,
out_ind,
a,
lhs_ind,
b,
rhs_ind,
adjust_chunks={lhs_ind[-1]: 1},
dtype=result_type(a, b),
concatenate=False,
)
# Because contraction + concatenate in blockwise leads to high
# memory footprints, we want to avoid them. Instead we will perform
# blockwise (without contraction) followed by reduction. More about
# this issue: https://github.com/dask/dask/issues/6874
# We will also perform the reduction without concatenation
out = _sum_wo_cat(out, axis=-2)
if a_is_1d:
out = out.squeeze(-2)
if b_is_1d:
out = out.squeeze(-1)
return out
@derived_from(np)
def outer(a, b):
a = a.flatten()
b = b.flatten()
dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype
return blockwise(np.outer, "ij", a, "i", b, "j", dtype=dtype)
def _inner_apply_along_axis(arr, func1d, func1d_axis, func1d_args, func1d_kwargs):
return np.apply_along_axis(func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs)
@derived_from(np)
def apply_along_axis(func1d, axis, arr, *args, dtype=None, shape=None, **kwargs):
"""
This is a blocked variant of :func:`numpy.apply_along_axis` implemented via
:func:`dask.array.map_blocks`
Notes
-----
If either of `dtype` or `shape` are not provided, Dask attempts to
determine them by calling `func1d` on a dummy array. This may produce
incorrect values for `dtype` or `shape`, so we recommend providing them.
"""
arr = asarray(arr)
# Verify that axis is valid and throw an error otherwise
axis = len(arr.shape[:axis])
# If necessary, infer dtype and shape of the output of func1d by calling it on test data.
if shape is None or dtype is None:
test_data = np.ones((1,), dtype=arr.dtype)
test_result = np.array(func1d(test_data, *args, **kwargs))
if shape is None:
shape = test_result.shape
if dtype is None:
dtype = test_result.dtype
# Rechunk so that func1d is applied over the full axis.
arr = arr.rechunk(
arr.chunks[:axis] + (arr.shape[axis : axis + 1],) + arr.chunks[axis + 1 :]
)
# Map func1d over the data to get the result
# Adds other axes as needed.
result = arr.map_blocks(
_inner_apply_along_axis,
name=funcname(func1d) + "-along-axis",
dtype=dtype,
chunks=(arr.chunks[:axis] + shape + arr.chunks[axis + 1 :]),
drop_axis=axis,
new_axis=list(range(axis, axis + len(shape), 1)),
func1d=func1d,
func1d_axis=axis,
func1d_args=args,
func1d_kwargs=kwargs,
)
return result
@derived_from(np)
def apply_over_axes(func, a, axes):
# Validate arguments
a = asarray(a)
try:
axes = tuple(axes)
except TypeError:
axes = (axes,)
sl = a.ndim * (slice(None),)
# Compute using `apply_along_axis`.
result = a
for i in axes:
result = apply_along_axis(func, i, result, 0)
# Restore original dimensionality or error.
if result.ndim == (a.ndim - 1):
result = result[sl[:i] + (None,)]
elif result.ndim != a.ndim:
raise ValueError(
"func must either preserve dimensionality of the input"
" or reduce it by one."
)
return result
@derived_from(np)
def ptp(a, axis=None):
return a.max(axis=axis) - a.min(axis=axis)
@derived_from(np)
def diff(a, n=1, axis=-1, prepend=None, append=None):
a = asarray(a)
n = int(n)
axis = int(axis)
if n == 0:
return a
if n < 0:
raise ValueError("order must be non-negative but got %d" % n)
combined = []
if prepend is not None:
prepend = asarray_safe(prepend, like=meta_from_array(a))
if prepend.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
prepend = broadcast_to(prepend, tuple(shape))
combined.append(prepend)
combined.append(a)
if append is not None:
append = asarray_safe(append, like=meta_from_array(a))
if append.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
append = np.broadcast_to(append, tuple(shape))
combined.append(append)
if len(combined) > 1:
a = concatenate(combined, axis)
sl_1 = a.ndim * [slice(None)]
sl_2 = a.ndim * [slice(None)]
sl_1[axis] = slice(1, None)
sl_2[axis] = slice(None, -1)
sl_1 = tuple(sl_1)
sl_2 = tuple(sl_2)
r = a
for i in range(n):
r = r[sl_1] - r[sl_2]
return r
@derived_from(np)
def ediff1d(ary, to_end=None, to_begin=None):
ary = asarray(ary)
aryf = ary.flatten()
r = aryf[1:] - aryf[:-1]
r = [r]
if to_begin is not None:
r = [asarray(to_begin).flatten()] + r
if to_end is not None:
r = r + [asarray(to_end).flatten()]
r = concatenate(r)
return r
def _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):
"""
x: nd-array
array of one block
coord: 1d-array or scalar
coordinate along which the gradient is computed.
axis: int
axis along which the gradient is computed
array_locs:
actual location along axis. None if coordinate is scalar
grad_kwargs:
keyword to be passed to np.gradient
"""
block_loc = block_id[axis]
if array_locs is not None:
coord = coord[array_locs[0][block_loc] : array_locs[1][block_loc]]
grad = np.gradient(x, coord, axis=axis, **grad_kwargs)
return grad
@derived_from(np)
def gradient(f, *varargs, axis=None, **kwargs):
f = asarray(f)
kwargs["edge_order"] = math.ceil(kwargs.get("edge_order", 1))
if kwargs["edge_order"] > 2:
raise ValueError("edge_order must be less than or equal to 2.")
drop_result_list = False
if axis is None:
axis = tuple(range(f.ndim))
elif isinstance(axis, Integral):
drop_result_list = True
axis = (axis,)
axis = validate_axis(axis, f.ndim)
if len(axis) != len(set(axis)):
raise ValueError("duplicate axes not allowed")
axis = tuple(ax % f.ndim for ax in axis)
if varargs == ():
varargs = (1,)
if len(varargs) == 1:
varargs = len(axis) * varargs
if len(varargs) != len(axis):
raise TypeError(
"Spacing must either be a single scalar, or a scalar / 1d-array per axis"
)
if issubclass(f.dtype.type, (np.bool8, Integral)):
f = f.astype(float)
elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:
f = f.astype(float)
results = []
for i, ax in enumerate(axis):
for c in f.chunks[ax]:
if np.min(c) < kwargs["edge_order"] + 1:
raise ValueError(
"Chunk size must be larger than edge_order + 1. "
"Minimum chunk for axis {} is {}. Rechunk to "
"proceed.".format(ax, np.min(c))
)
if np.isscalar(varargs[i]):
array_locs = None
else:
if isinstance(varargs[i], Array):
raise NotImplementedError("dask array coordinated is not supported.")
# coordinate position for each block taking overlap into account
chunk = np.array(f.chunks[ax])
array_loc_stop = np.cumsum(chunk) + 1
array_loc_start = array_loc_stop - chunk - 2
array_loc_stop[-1] -= 1
array_loc_start[0] = 0
array_locs = (array_loc_start, array_loc_stop)
results.append(
f.map_overlap(
_gradient_kernel,
dtype=f.dtype,
depth={j: 1 if j == ax else 0 for j in range(f.ndim)},
boundary="none",
coord=varargs[i],
axis=ax,
array_locs=array_locs,
grad_kwargs=kwargs,
)
)
if drop_result_list:
results = results[0]
return results
def _bincount_agg(bincounts, dtype, **kwargs):
if not isinstance(bincounts, list):
return bincounts
n = max(map(len, bincounts))
out = np.zeros_like(bincounts[0], shape=n, dtype=dtype)
for b in bincounts:
out[: len(b)] += b
return out
@derived_from(np)
def bincount(x, weights=None, minlength=0, split_every=None):
if x.ndim != 1:
raise ValueError("Input array must be one dimensional. Try using x.ravel()")
if weights is not None:
if weights.chunks != x.chunks:
raise ValueError("Chunks of input array x and weights must match.")
token = tokenize(x, weights, minlength)
args = [x, "i"]
if weights is not None:
meta = array_safe(np.bincount([1], weights=[1]), like=meta_from_array(x))
args.extend([weights, "i"])
else:
meta = array_safe(np.bincount([]), like=meta_from_array(x))
if minlength == 0:
output_size = (np.nan,)
else:
output_size = (minlength,)
chunked_counts = blockwise(
partial(np.bincount, minlength=minlength), "i", *args, token=token, meta=meta
)
chunked_counts._chunks = (
output_size * len(chunked_counts.chunks[0]),
*chunked_counts.chunks[1:],
)
from dask.array.reductions import _tree_reduce
output = _tree_reduce(
chunked_counts,
aggregate=partial(_bincount_agg, dtype=meta.dtype),
axis=(0,),
keepdims=True,
dtype=meta.dtype,
split_every=split_every,
concatenate=False,
)
output._chunks = (output_size, *chunked_counts.chunks[1:])
output._meta = meta
return output
@derived_from(np)
def | (a, bins, right=False):
bins = asarray_safe(bins, like=meta_from_array(a))
dtype = np.digitize(asarray_safe([0], like=bins), bins, right=False).dtype
return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)
def _searchsorted_block(x, y, side):
res = np.searchsorted(x, y, side=side)
# 0 is only correct for the first block of a, but blockwise doesn't have a way
# of telling which block is being operated on (unlike map_blocks),
# so set all 0 values to a special value and set back at the end of searchsorted
res[res == 0] = -1
return res[np.newaxis, :]
@derived_from(np)
def searchsorted(a, v, side="left", sorter=None):
if a.ndim != 1:
raise ValueError("Input array a must be one dimensional")
if sorter is not None:
raise NotImplementedError(
"da.searchsorted with a sorter argument is not supported"
)
# call np.searchsorted for each pair of blocks in a and v
meta = np.searchsorted(a._meta, v._meta)
out = blockwise(
_searchsorted_block,
list(range(v.ndim + 1)),
a,
[0],
v,
list(range(1, v.ndim + 1)),
side,
None,
meta=meta,
adjust_chunks={0: 1}, # one row for each block in a
)
# add offsets to take account of the position of each block within the array a
a_chunk_sizes = array_safe((0, *a.chunks[0]), like=meta_from_array(a))
a_chunk_offsets = np.cumsum(a_chunk_sizes)[:-1]
a_chunk_offsets = a_chunk_offsets[(Ellipsis,) + v.ndim * (np.newaxis,)]
a_offsets = asarray(a_chunk_offsets, chunks=1)
out = where(out < 0, out, out + a_offsets)
# combine the results from each block (of a)
out = out.max(axis=0)
# fix up any -1 values
out[out == -1] = 0
return out
# TODO: dask linspace doesn't support delayed values
def _linspace_from_delayed(start, stop, num=50):
linspace_name = "linspace-" + tokenize(start, stop, num)
(start_ref, stop_ref, num_ref), deps = unpack_collections([start, stop, num])
if len(deps) == 0:
return np.linspace(start, stop, num=num)
linspace_dsk = {(linspace_name, 0): (np.linspace, start_ref, stop_ref, num_ref)}
linspace_graph = HighLevelGraph.from_collections(
linspace_name, linspace_dsk, dependencies=deps
)
chunks = ((np.nan,),) if is_dask_collection(num) else ((num,),)
return Array(linspace_graph, linspace_name, chunks, dtype=float)
def _block_hist(x, bins, range=None, weights=None):
return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of :func:`numpy.histogram`.
Parameters
----------
a : dask.array.Array
Input data; the histogram is computed over the flattened
array. If the ``weights`` argument is used, the chunks of
``a`` are accessed to check chunking compatibility between
``a`` and ``weights``. If ``weights`` is ``None``, a
:py:class:`dask.dataframe.Series` object can be passed as
input data.
bins : int or sequence of scalars, optional
Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This is equivalent to the ``density`` argument, but produces incorrect
results for unequal bin widths. It should not be used.
weights : dask.array.Array, optional
A dask.array.Array of weights, of the same block structure as ``a``. Each value in
``a`` only contributes its associated weight towards the bin count
(instead of 1). If ``density`` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
If ``density`` is True, ``bins`` cannot be a single-number delayed
value. It must be a concrete number, or a (possibly-delayed)
array/sequence of the bin edges.
Returns
-------
hist : dask Array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : dask Array of dtype float
Return the bin edges ``(length(hist)+1)``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,
8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if isinstance(bins, Array):
scalar_bins = bins.ndim == 0
# ^ `np.ndim` is not implemented by Dask array.
elif isinstance(bins, Delayed):
scalar_bins = bins._length is None or bins._length == 1
else:
scalar_bins = np.ndim(bins) == 0
if bins is None or (scalar_bins and range is None):
raise ValueError(
"dask.array.histogram requires either specifying "
"bins as an iterable or specifying both a range and "
"the number of bins"
)
if weights is not None and weights.chunks != a.chunks:
raise ValueError("Input array and weights must have the same chunked structure")
if normed is not False:
raise ValueError(
"The normed= keyword argument has been deprecated. "
"Please use density instead. "
"See the numpy.histogram docstring for more information."
)
if density and scalar_bins and isinstance(bins, (Array, Delayed)):
raise NotImplementedError(
"When `density` is True, `bins` cannot be a scalar Dask object. "
"It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."
)
for argname, val in [("bins", bins), ("range", range), ("weights", weights)]:
if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):
raise TypeError(
"Dask types besides Array and Delayed are not supported "
"for `histogram`. For argument `{}`, got: {!r}".format(argname, val)
)
if range is not None:
try:
if len(range) != 2:
raise ValueError(
f"range must be a sequence or array of length 2, but got {len(range)} items"
)
if isinstance(range, (Array, np.ndarray)) and range.shape != (2,):
raise ValueError(
f"range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"
)
except TypeError:
raise TypeError(
f"Expected a sequence or array for range, not {range}"
) from None
token = tokenize(a, bins, range, weights, density)
name = "histogram-sum-" + token
if scalar_bins:
bins = _linspace_from_delayed(range[0], range[1], bins + 1)
# ^ NOTE `range[1]` is safe because of the above check, and the initial check
# that range must not be None if `scalar_bins`
else:
if not isinstance(bins, (Array, np.ndarray)):
bins = asarray(bins)
if bins.ndim != 1:
raise ValueError(
f"bins must be a 1-dimensional array or sequence, got shape {bins.shape}"
)
(bins_ref, range_ref), deps = unpack_collections([bins, range])
# Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk
if weights is None:
dsk = {
(name, i, 0): (_block_hist, k, bins_ref, range_ref)
for i, k in enumerate(flatten(a.__dask_keys__()))
}
dtype = np.histogram([])[0].dtype
else:
a_keys = flatten(a.__dask_keys__())
w_keys = flatten(weights.__dask_keys__())
dsk = {
(name, i, 0): (_block_hist, k, bins_ref, range_ref, w)
for i, (k, w) in enumerate(zip(a_keys, w_keys))
}
dtype = weights.dtype
deps = (a,) + deps
if weights is not None:
deps += (weights,)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
# Turn graph into a 2D Array of shape (nchunks, nbins)
nchunks = len(list(flatten(a.__dask_keys__())))
nbins = bins.size - 1 # since `bins` is 1D
chunks = ((1,) * nchunks, (nbins,))
mapped = Array(graph, name, chunks, dtype=dtype)
# Sum over chunks to get the final histogram
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = asarray(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
return n, bins
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, density=None):
"""Blocked variant of :func:`numpy.histogram2d`.
Parameters
----------
x : dask.array.Array
An array containing the `x`-coordinates of the points to be
histogrammed.
y : dask.array.Array
An array containing the `y`-coordinates of the points to be
histogrammed.
bins : sequence of arrays describing bin edges, int, or sequence of ints
The bin specification. See the `bins` argument description for
:py:func:`histogramdd` for a complete description of all
possible bin configurations (this function is a 2D specific
version of histogramdd).
range : tuple of pairs, optional.
The leftmost and rightmost edges of the bins along each
dimension when integers are passed to `bins`; of the form:
((xmin, xmax), (ymin, ymax)).
normed : bool, optional
An alias for the density argument that behaves identically. To
avoid confusion with the broken argument in the `histogram`
function, `density` should be preferred.
weights : dask.array.Array, optional
An array of values weighing each sample in the input data. The
chunks of the weights must be identical to the chunking along
the 0th (row) axis of the data sample.
density : bool, optional
If False (the default) return the number of samples in each
bin. If True, the returned array represents the probability
density function at each bin.
Returns
-------
dask.array.Array
The values of the histogram.
dask.array.Array
The edges along the `x`-dimension.
dask.array.Array
The edges along the `y`-dimension.
See Also
--------
histogram
histogramdd
Examples
--------
>>> import dask.array as da
>>> x = da.array([2, 4, 2, 4, 2, 4])
>>> y = da.array([2, 2, 4, 4, 2, 4])
>>> bins = 2
>>> range = ((0, 6), (0, 6))
>>> h, xedges, yedges = da.histogram2d(x, y, bins=bins, range=range)
>>> h
dask.array<sum-aggregate, shape=(2, 2), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>
>>> xedges
dask.array<array, shape=(3,), dtype=float64, chunksize=(3,), chunktype=numpy.ndarray>
>>> h.compute()
array([[2., 1.],
[1., 2.]])
"""
counts, edges = histogramdd(
(x, y),
bins=bins,
range=range,
normed=normed,
weights=weights,
density=density,
)
return counts, edges[0], edges[1]
def _block_histogramdd_rect(sample, bins, range, weights):
"""Call numpy.histogramdd for a blocked/chunked calculation.
Slurps the result into an additional outer axis; this new axis
will be used to stack chunked calls of the numpy function and add
them together later.
Returns
-------
:py:object:`np.ndarray`
NumPy array with an additional outer dimension.
"""
return np.histogramdd(sample, bins, range=range, weights=weights)[0:1]
def _block_histogramdd_multiarg(*args):
"""Call numpy.histogramdd for a multi argument blocked/chunked calculation.
Slurps the result into an additional outer axis; this new axis
will be used to stack chunked calls of the numpy function and add
them together later.
The last three arguments _must be_ (bins, range, weights).
The difference between this function and
_block_histogramdd_rect is that here we expect the sample
to be composed of multiple arguments (multiple 1D arrays, each one
representing a coordinate), while _block_histogramdd_rect
expects a single rectangular (2D array where columns are
coordinates) sample.
"""
bins, range, weights = args[-3:]
sample = args[:-3]
return np.histogramdd(sample, bins=bins, range=range, weights=weights)[0:1]
def histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):
"""Blocked variant of :func:`numpy.histogramdd`.
Chunking of the input data (``sample``) is only allowed along the
0th (row) axis (the axis corresponding to the total number of
samples). Data chunked along the 1st axis (column) axis is not
compatible with this function. If weights are used, they must be
chunked along the 0th axis identically to the input sample.
An example setup for a three dimensional histogram, where the
sample shape is ``(8, 3)`` and weights are shape ``(8,)``, sample
chunks would be ``((4, 4), (3,))`` and the weights chunks would be
``((4, 4),)`` a table of the structure:
+-------+-----------------------+-----------+
| | sample (8 x 3) | weights |
+=======+=====+=====+=====+=====+=====+=====+
| chunk | row | `x` | `y` | `z` | row | `w` |
+-------+-----+-----+-----+-----+-----+-----+
| | 0 | 5 | 6 | 6 | 0 | 0.5 |
| +-----+-----+-----+-----+-----+-----+
| | 1 | 8 | 9 | 2 | 1 | 0.8 |
| 0 +-----+-----+-----+-----+-----+-----+
| | 2 | 3 | 3 | 1 | 2 | 0.3 |
| +-----+-----+-----+-----+-----+-----+
| | 3 | 2 | 5 | 6 | 3 | 0.7 |
+-------+-----+-----+-----+-----+-----+-----+
| | 4 | 3 | 1 | 1 | 4 | 0.3 |
| +-----+-----+-----+-----+-----+-----+
| | 5 | 3 | 2 | 9 | 5 | 1.3 |
| 1 +-----+-----+-----+-----+-----+-----+
| | 6 | 8 | 1 | 5 | 6 | 0.8 |
| +-----+-----+-----+-----+-----+-----+
| | 7 | 3 | 5 | 3 | 7 | 0.7 |
+-------+-----+-----+-----+-----+-----+-----+
If the sample 0th dimension and weight 0th (row) dimension are
chunked differently, a ``ValueError`` will be raised. If
coordinate groupings ((x, y, z) trios) are separated by a chunk
boundry, then a ``ValueError`` will be raised. We suggest that you
rechunk your data if it is of that form.
The chunks property of the data (and optional weights) are used to
check for compatibility with the blocked algorithm (as described
above); therefore, you must call `to_dask_array` on a collection
from ``dask.dataframe``, i.e. :class:`dask.dataframe.Series` or
:class:`dask.dataframe.DataFrame`.
The function is also compatible with `x`, `y`, and `z` being
individual 1D arrays with equal chunking. In that case, the data
should be passed as a tuple: ``histogramdd((x, y, z), ...)``
Parameters
----------
sample : dask.array.Array (N, D) or sequence of dask.array.Array
Multidimensional data to be histogrammed.
Note the unusual interpretation of a sample when it is a
sequence of dask Arrays:
* When a (N, D) dask Array, each row is an entry in the sample
(coordinate in D dimensional space).
* When a sequence of dask Arrays, each element in the sequence
is the array of values for a single coordinate.
bins : sequence of arrays describing bin edges, int, or sequence of ints
The bin specification.
The possible binning configurations are:
* A sequence of arrays describing the monotonically increasing
bin edges along each dimension.
* A single int describing the total number of bins that will
be used in each dimension (this requires the ``range``
argument to be defined).
* A sequence of ints describing the total number of bins to be
used in each dimension (this requires the ``range`` argument
to be defined).
When bins are described by arrays, the rightmost edge is
included. Bins described by arrays also allows for non-uniform
bin widths.
range : sequence of pairs, optional
A sequence of length D, each a (min, max) tuple giving the
outer bin edges to be used if the edges are not given
explicitly in `bins`. If defined, this argument is required to
have an entry for each dimension. Unlike
:func:`numpy.histogramdd`, if `bins` does not define bin
edges, this argument is required (this function will not
automatically use the min and max of of the value in a given
dimension because the input data may be lazy in dask).
normed : bool, optional
An alias for the density argument that behaves identically. To
avoid confusion with the broken argument to `histogram`,
`density` should be preferred.
weights : dask.array.Array, optional
An array of values weighing each sample in the input data. The
chunks of the weights must be identical to the chunking along
the 0th (row) axis of the data sample.
density : bool, optional
If ``False`` (default), the returned array represents the
number of samples in each bin. If ``True``, the returned array
represents the probability density function at each bin.
See Also
--------
histogram
Returns
-------
dask.array.Array
The values of the histogram.
list(dask.array.Array)
Sequence of arrays representing the bin edges along each
dimension.
Examples
--------
Computing the histogram in 5 blocks using different bin edges
along each dimension:
>>> import dask.array as da
>>> x = da.random.uniform(0, 1, size=(1000, 3), chunks=(200, 3))
>>> edges = [
... np.linspace(0, 1, 5), # 4 bins in 1st dim
... np.linspace(0, 1, 6), # 5 in the 2nd
... np.linspace(0, 1, 4), # 3 in the 3rd
... ]
>>> h, edges = da.histogramdd(x, bins=edges)
>>> result = h.compute()
>>> result.shape
(4, 5, 3)
Defining the bins by total number and their ranges, along with
using weights:
>>> bins = (4, 5, 3)
>>> ranges = ((0, 1),) * 3 # expands to ((0, 1), (0, 1), (0, 1))
>>> w = da.random.uniform(0, 1, size=(1000,), chunks=x.chunksize[0])
>>> h, edges = da.histogramdd(x, bins=bins, range=ranges, weights=w)
>>> np.isclose(h.sum().compute(), w.sum().compute())
True
Using a sequence of 1D arrays as the input:
>>> x = da.array([2, 4, 2, 4, 2, 4])
>>> y = da.array([2, 2, 4, 4, 2, 4])
>>> z = da.array([4, 2, 4, 2, 4, 2])
>>> bins = ([0, 3, 6],) * 3
>>> h, edges = da.histogramdd((x, y, z), bins)
>>> h
dask.array<sum-aggregate, shape=(2, 2, 2), dtype=float64, chunksize=(2, 2, 2), chunktype=numpy.ndarray>
>>> edges[0]
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> h.compute()
array([[[0., 2.],
[0., 1.]],
<BLANKLINE>
[[1., 0.],
[2., 0.]]])
>>> edges[0].compute()
array([0, 3, 6])
>>> edges[1].compute()
array([0, 3, 6])
>>> edges[2].compute()
array([0, 3, 6])
"""
# logic used in numpy.histogramdd to handle normed/density.
if normed is None:
if density is None:
density = False
elif density is None:
# an explicit normed argument was passed, alias it to the new name
density = normed
else:
raise TypeError("Cannot specify both 'normed' and 'density'")
# check if any dask collections (dc) were passed to bins= or
# range= these are unsupported.
dc_bins = is_dask_collection(bins)
if isinstance(bins, (list, tuple)):
dc_bins = dc_bins or any([is_dask_collection(b) for b in bins])
dc_range = (
any([is_dask_collection(r) for r in range]) if range is not None else False
)
if dc_bins or dc_range:
raise NotImplementedError(
"Passing dask collections to bins=... or range=... is not supported."
)
# generate token and name for task
token = tokenize(sample, bins, range, weights, density)
name = f"histogramdd-sum-{token}"
# N == total number of samples
# D == total number of dimensions
if hasattr(sample, "shape"):
if len(sample.shape) != 2:
raise ValueError("Single array input to histogramdd should be columnar")
else:
_, D = sample.shape
n_chunks = sample.numblocks[0]
rectangular_sample = True
# Require data to be chunked along the first axis only.
if sample.shape[1:] != sample.chunksize[1:]:
raise ValueError("Input array can only be chunked along the 0th axis.")
elif isinstance(sample, (tuple, list)):
rectangular_sample = False
D = len(sample)
n_chunks = sample[0].numblocks[0]
for i in _range(1, D):
if sample[i].chunks != sample[0].chunks:
raise ValueError("All coordinate arrays must be chunked identically.")
else:
raise ValueError(
"Incompatible sample. Must be a 2D array or a sequence of 1D arrays."
)
# Require only Array or Delayed objects for bins, range, and weights.
for argname, val in [("bins", bins), ("range", range), ("weights", weights)]:
if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):
raise TypeError(
"Dask types besides Array and Delayed are not supported "
"for `histogramdd`. For argument `{}`, got: {!r}".format(argname, val)
)
# Require that the chunking of the sample and weights are compatible.
if weights is not None:
if rectangular_sample and weights.chunks[0] != sample.chunks[0]:
raise ValueError(
"Input array and weights must have the same shape "
"and chunk structure along the first dimension."
)
elif not rectangular_sample and weights.numblocks[0] != n_chunks:
raise ValueError(
"Input arrays and weights must have the same shape "
"and chunk structure."
)
# if bins is a list, tuple, then make sure the length is the same
# as the number dimensions.
if isinstance(bins, (list, tuple)):
if len(bins) != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample."
)
# if range is defined, check that it's the right length and also a
# sequence of pairs.
if range is not None:
if len(range) != D:
raise ValueError(
"range argument requires one entry, a min max pair, per dimension."
)
if not all(len(r) == 2 for r in range):
raise ValueError("range argument should be a sequence of pairs")
# If bins is a single int, create a tuple of len `D` containing `bins`.
if isinstance(bins, int):
bins = (bins,) * D
# we will return the edges to mimic the NumPy API (we also use the
# edges later as a way to calculate the total number of bins).
if all(isinstance(b, int) for b in bins) and all(len(r) == 2 for r in range):
edges = [np.linspace(r[0], r[1], b + 1) for b, r in zip(bins, range)]
else:
edges = [np.asarray(b) for b in bins]
if rectangular_sample:
deps = (sample,)
else:
deps = tuple(sample)
if weights is not None:
w_keys = flatten(weights.__dask_keys__())
deps += (weights,)
dtype = weights.dtype
else:
w_keys = (None,) * n_chunks
dtype = np.histogramdd([])[0].dtype
# This tuple of zeros represents the chunk index along the columns
# (we only allow chunking along the rows).
column_zeros = tuple(0 for _ in _range(D))
# With dsk below, we will construct a (D + 1) dimensional array
# stacked for each chunk. For example, if the histogram is going
# to be 3 dimensions, this creates a stack of cubes (1 cube for
# each sample chunk) that will be collapsed into a final cube (the
# result). Depending on the input data, we can do this in two ways
#
# 1. The rectangular case: when the sample is a single 2D array
# where each column in the sample represents a coordinate of
# the sample).
#
# 2. The sequence-of-arrays case, when the sample is a tuple or
# list of arrays, with each array in that sequence representing
# the entirety of one coordinate of the complete sample.
if rectangular_sample:
sample_keys = flatten(sample.__dask_keys__())
dsk = {
(name, i, *column_zeros): (_block_histogramdd_rect, k, bins, range, w)
for i, (k, w) in enumerate(zip(sample_keys, w_keys))
}
else:
sample_keys = [
list(flatten(sample[i].__dask_keys__())) for i in _range(len(sample))
]
fused_on_chunk_keys = [
tuple(sample_keys[j][i] for j in _range(D)) for i in _range(n_chunks)
]
dsk = {
(name, i, *column_zeros): (
_block_histogramdd_multiarg,
*(*k, bins, range, w),
)
for i, (k, w) in enumerate(zip(fused_on_chunk_keys, w_keys))
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
all_nbins = tuple((b.size - 1,) for b in edges)
stacked_chunks = ((1,) * n_chunks, *all_nbins)
mapped = Array(graph, name, stacked_chunks, dtype=dtype)
# Finally, sum over chunks providing to get the final D
# dimensional result array.
n = mapped.sum(axis=0)
if density:
# compute array of values to divide by the bin width along
# each dimension.
width_divider = np.ones(n.shape)
for i in _range(D):
shape = np.ones(D, int)
shape[i] = width_divider.shape[i]
width_divider *= np.diff(edges[i]).reshape(shape)
width_divider = asarray(width_divider, chunks=n.chunks)
return n / width_divider / n.sum(), edges
return n, [asarray(entry) for entry in edges]
@derived_from(np)
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
# This was copied almost verbatim from np.cov
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
# Handles complex arrays too
m = asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X = X - X.mean(axis=1 - axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
@derived_from(np)
def corrcoef(x, y=None, rowvar=1):
c = cov(x, y, rowvar)
if c.shape == ():
return c / c
d = diag(c)
d = d.reshape((d.shape[0], 1))
sqr_d = sqrt(d)
return (c / sqr_d) / sqr_d.T
@implements(np.round, np.round_)
@derived_from(np)
def round(a, decimals=0):
return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)
@implements(np.ndim)
@derived_from(np)
def ndim(a):
return a.ndim
@implements(np.iscomplexobj)
@derived_from(np)
def iscomplexobj(x):
return issubclass(x.dtype.type, np.complexfloating)
def _unique_internal(ar, indices, counts, return_inverse=False):
"""
Helper/wrapper function for :func:`numpy.unique`.
Uses :func:`numpy.unique` to find the unique values for the array chunk.
Given this chunk may not represent the whole array, also take the
``indices`` and ``counts`` that are in 1-to-1 correspondence to ``ar``
and reduce them in the same fashion as ``ar`` is reduced. Namely sum
any counts that correspond to the same value and take the smallest
index that corresponds to the same value.
To handle the inverse mapping from the unique values to the original
array, simply return a NumPy array created with ``arange`` with enough
values to correspond 1-to-1 to the unique values. While there is more
work needed to be done to create the full inverse mapping for the
original array, this provides enough information to generate the
inverse mapping in Dask.
Given Dask likes to have one array returned from functions like
``blockwise``, some formatting is done to stuff all of the resulting arrays
into one big NumPy structured array. Dask is then able to handle this
object and can split it apart into the separate results on the Dask side,
which then can be passed back to this function in concatenated chunks for
further reduction or can be return to the user to perform other forms of
analysis.
By handling the problem in this way, it does not matter where a chunk
is in a larger array or how big it is. The chunk can still be computed
on the same way. Also it does not matter if the chunk is the result of
other chunks being run through this function multiple times. The end
result will still be just as accurate using this strategy.
"""
return_index = indices is not None
return_counts = counts is not None
u = np.unique(ar)
dt = [("values", u.dtype)]
if return_index:
dt.append(("indices", np.intp))
if return_inverse:
dt.append(("inverse", np.intp))
if return_counts:
dt.append(("counts", np.intp))
r = np.empty(u.shape, dtype=dt)
r["values"] = u
if return_inverse:
r["inverse"] = np.arange(len(r), dtype=np.intp)
if return_index or return_counts:
for i, v in enumerate(r["values"]):
m = ar == v
if return_index:
indices[m].min(keepdims=True, out=r["indices"][i : i + 1])
if return_counts:
counts[m].sum(keepdims=True, out=r["counts"][i : i + 1])
return r
def unique_no_structured_arr(
ar, return_index=False, return_inverse=False, return_counts=False
):
# A simplified version of `unique`, that allows computing unique for array
# types that don't support structured arrays (such as cupy.ndarray), but
# can only compute values at the moment.
if (
return_index is not False
or return_inverse is not False
or return_counts is not False
):
raise ValueError(
"dask.array.unique does not support `return_index`, `return_inverse` "
"or `return_counts` with array types that don't support structured "
"arrays."
)
ar = ar.ravel()
args = [ar, "i"]
meta = meta_from_array(ar)
out = blockwise(np.unique, "i", *args, meta=meta)
out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)
out_parts = [out]
name = "unique-aggregate-" + out.name
dsk = {
(name, 0): (
(np.unique,)
+ tuple(
(np.concatenate, o.__dask_keys__())
if hasattr(o, "__dask_keys__")
else o
for o in out_parts
)
)
}
dependencies = [o for o in out_parts if hasattr(o, "__dask_keys__")]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
chunks = ((np.nan,),)
out = Array(graph, name, chunks, meta=meta)
result = [out]
if len(result) == 1:
result = result[0]
else:
result = tuple(result)
return result
@derived_from(np)
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
# Test whether the downstream library supports structured arrays. If the
# `np.empty_like` call raises a `TypeError`, the downstream library (e.g.,
# CuPy) doesn't support it. In that case we return the
# `unique_no_structured_arr` implementation, otherwise (e.g., NumPy) just
# continue as normal.
try:
meta = meta_from_array(ar)
np.empty_like(meta, dtype=[("a", int), ("b", float)])
except TypeError:
return unique_no_structured_arr(
ar,
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
)
ar = ar.ravel()
# Run unique on each chunk and collect results in a Dask Array of
# unknown size.
args = [ar, "i"]
out_dtype = [("values", ar.dtype)]
if return_index:
args.extend([arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]), "i"])
out_dtype.append(("indices", np.intp))
else:
args.extend([None, None])
if return_counts:
args.extend([ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]), "i"])
out_dtype.append(("counts", np.intp))
else:
args.extend([None, None])
out = blockwise(_unique_internal, "i", *args, dtype=out_dtype, return_inverse=False)
out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)
# Take the results from the unique chunks and do the following.
#
# 1. Collect all results as arguments.
# 2. Concatenate each result into one big array.
# 3. Pass all results as arguments to the internal unique again.
#
# TODO: This should be replaced with a tree reduction using this strategy.
# xref: https://github.com/dask/dask/issues/2851
out_parts = [out["values"]]
if return_index:
out_parts.append(out["indices"])
else:
out_parts.append(None)
if return_counts:
out_parts.append(out["counts"])
else:
out_parts.append(None)
name = "unique-aggregate-" + out.name
dsk = {
(name, 0): (
(_unique_internal,)
+ tuple(
(np.concatenate, o.__dask_keys__())
if hasattr(o, "__dask_keys__")
else o
for o in out_parts
)
+ (return_inverse,)
)
}
out_dtype = [("values", ar.dtype)]
if return_index:
out_dtype.append(("indices", np.intp))
if return_inverse:
out_dtype.append(("inverse", np.intp))
if return_counts:
out_dtype.append(("counts", np.intp))
dependencies = [o for o in out_parts if hasattr(o, "__dask_keys__")]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
chunks = ((np.nan,),)
out = Array(graph, name, chunks, out_dtype)
# Split out all results to return to the user.
result = [out["values"]]
if return_index:
result.append(out["indices"])
if return_inverse:
# Using the returned unique values and arange of unknown length, find
# each value matching a unique value and replace it with its
# corresponding index or `0`. There should be only one entry for this
# index in axis `1` (the one of unknown length). Reduce axis `1`
# through summing to get an array with known dimensionality and the
# mapping of the original values.
mtches = (ar[:, None] == out["values"][None, :]).astype(np.intp)
result.append((mtches * out["inverse"]).sum(axis=1))
if return_counts:
result.append(out["counts"])
if len(result) == 1:
result = result[0]
else:
result = tuple(result)
return result
def _isin_kernel(element, test_elements, assume_unique=False):
values = np.in1d(element.ravel(), test_elements, assume_unique=assume_unique)
return values.reshape(element.shape + (1,) * test_elements.ndim)
@safe_wraps(getattr(np, "isin", None))
def isin(element, test_elements, assume_unique=False, invert=False):
element = asarray(element)
test_elements = asarray(test_elements)
element_axes = tuple(range(element.ndim))
test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))
mapped = blockwise(
_isin_kernel,
element_axes + test_axes,
element,
element_axes,
test_elements,
test_axes,
adjust_chunks={axis: lambda _: 1 for axis in test_axes},
dtype=bool,
assume_unique=assume_unique,
)
result = mapped.any(axis=test_axes)
if invert:
result = ~result
return result
@derived_from(np)
def roll(array, shift, axis=None):
result = array
if axis is None:
result = ravel(result)
if not isinstance(shift, Integral):
raise TypeError(
"Expect `shift` to be an instance of Integral when `axis` is None."
)
shift = (shift,)
axis = (0,)
else:
try:
len(shift)
except TypeError:
shift = (shift,)
try:
len(axis)
except TypeError:
axis = (axis,)
if len(shift) != len(axis):
raise ValueError("Must have the same number of shifts as axes.")
for i, s in zip(axis, shift):
s = -s
s %= result.shape[i]
sl1 = result.ndim * [slice(None)]
sl2 = result.ndim * [slice(None)]
sl1[i] = slice(s, None)
sl2[i] = slice(None, s)
sl1 = tuple(sl1)
sl2 = tuple(sl2)
result = concatenate([result[sl1], result[sl2]], axis=i)
result = result.reshape(array.shape)
# Ensure that the output is always a new array object
result = result.copy() if result is array else result
return result
@derived_from(np)
def shape(array):
return array.shape
@derived_from(np)
def union1d(ar1, ar2):
return unique(concatenate((ar1.ravel(), ar2.ravel())))
@derived_from(np)
def ravel(array_like):
return asanyarray(array_like).reshape((-1,))
@derived_from(np)
def expand_dims(a, axis):
if type(axis) not in (tuple, list):
axis = (axis,)
out_ndim = len(axis) + a.ndim
axis = validate_axis(axis, out_ndim)
shape_it = iter(a.shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
return a.reshape(shape)
@derived_from(np)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
elif not isinstance(axis, tuple):
axis = (axis,)
if any(a.shape[i] != 1 for i in axis):
raise ValueError("cannot squeeze axis with size other than one")
axis = validate_axis(axis, a.ndim)
sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))
a = a[sl]
return a
@derived_from(np)
def compress(condition, a, axis=None):
if not is_arraylike(condition):
# Allow `condition` to be anything array-like, otherwise ensure `condition`
# is a numpy array.
condition = np.asarray(condition)
condition = condition.astype(bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError("Condition must be one dimensional")
if axis is None:
a = a.ravel()
axis = 0
axis = validate_axis(axis, a.ndim)
# Treat `condition` as filled with `False` (if it is too short)
a = a[
tuple(
slice(None, len(condition)) if i == axis else slice(None)
for i in range(a.ndim)
)
]
# Use `condition` to select along 1 dimension
a = a[tuple(condition if i == axis else slice(None) for i in range(a.ndim))]
return a
@derived_from(np)
def extract(condition, arr):
condition = asarray(condition).astype(bool)
arr = asarray(arr)
return compress(condition.ravel(), arr.ravel())
@derived_from(np)
def take(a, indices, axis=0):
axis = validate_axis(axis, a.ndim)
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(
lambda block: np.take(a, block, axis), chunks=indices.chunks, dtype=a.dtype
)
@derived_from(np)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def _asarray_isnull(values):
import pandas as pd
return np.asarray(pd.isnull(values))
def isnull(values):
"""pandas.isnull for dask arrays"""
# eagerly raise ImportError, if pandas isn't available
import pandas as pd # noqa
return elemwise(_asarray_isnull, values, dtype="bool")
def notnull(values):
"""pandas.notnull for dask arrays"""
return ~isnull(values)
@derived_from(np)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype="bool")
@derived_from(np)
def allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()
def variadic_choose(a, *choices):
return np.choose(a, choices)
@derived_from(np)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
def _isnonzero_vec(v):
return bool(np.count_nonzero(v))
_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])
def isnonzero(a):
if a.dtype.kind in {"U", "S"}:
# NumPy treats all-whitespace strings as falsy (like in `np.nonzero`).
# but not in `.astype(bool)`. To match the behavior of numpy at least until
# 1.19, we use `_isnonzero_vec`. When NumPy changes behavior, we should just
# use the try block below.
# https://github.com/numpy/numpy/issues/9875
return a.map_blocks(_isnonzero_vec, dtype=bool)
try:
np.zeros(tuple(), dtype=a.dtype).astype(bool)
except ValueError:
######################################################
# Handle special cases where conversion to bool does #
# not work correctly. #
# #
# xref: https://github.com/numpy/numpy/issues/9479 #
######################################################
return a.map_blocks(_isnonzero_vec, dtype=bool)
else:
return a.astype(bool)
@derived_from(np)
def argwhere(a):
a = asarray(a)
nz = isnonzero(a).flatten()
ind = indices(a.shape, dtype=np.intp, chunks=a.chunks)
if ind.ndim > 1:
ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)
ind = compress(nz, ind, axis=0)
return ind
@derived_from(np)
def where(condition, x=None, y=None):
if (x is None) != (y is None):
raise ValueError("either both or neither of x and y should be given")
if (x is None) and (y is None):
return nonzero(condition)
if np.isscalar(condition):
dtype = result_type(x, y)
x = asarray(x)
y = asarray(y)
shape = broadcast_shapes(x.shape, y.shape)
out = x if condition else y
return broadcast_to(out, shape).astype(dtype)
else:
return elemwise(np.where, condition, x, y)
@derived_from(np)
def count_nonzero(a, axis=None):
return isnonzero(asarray(a)).astype(np.intp).sum(axis=axis)
@derived_from(np)
def flatnonzero(a):
return argwhere(asarray(a).ravel())[:, 0]
@derived_from(np)
def nonzero(a):
ind = argwhere(a)
if ind.ndim > 1:
return tuple(ind[:, i] for i in range(ind.shape[1]))
else:
return (ind,)
def _unravel_index_kernel(indices, func_kwargs):
return np.stack(np.unravel_index(indices, **func_kwargs))
@derived_from(np)
def unravel_index(indices, shape, order="C"):
if shape and indices.size:
unraveled_indices = tuple(
indices.map_blocks(
_unravel_index_kernel,
dtype=np.intp,
chunks=(((len(shape),),) + indices.chunks),
new_axis=0,
func_kwargs={"shape": shape, "order": order},
)
)
else:
unraveled_indices = tuple(empty((0,), dtype=np.intp, chunks=1) for i in shape)
return unraveled_indices
@wraps(np.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode="raise", order="C"):
if np.isscalar(dims):
dims = (dims,)
if is_dask_collection(dims) or any(is_dask_collection(d) for d in dims):
raise NotImplementedError(
f"Dask types are not supported in the `dims` argument: {dims!r}"
)
if is_arraylike(multi_index):
index_stack = asarray(multi_index)
else:
multi_index_arrs = broadcast_arrays(*multi_index)
index_stack = stack(multi_index_arrs)
if not np.isnan(index_stack.shape).any() and len(index_stack) != len(dims):
raise ValueError(
f"parameter multi_index must be a sequence of length {len(dims)}"
)
if not np.issubdtype(index_stack.dtype, np.signedinteger):
raise TypeError("only int indices permitted")
return index_stack.map_blocks(
np.ravel_multi_index,
dtype=np.intp,
chunks=index_stack.chunks[1:],
drop_axis=0,
dims=dims,
mode=mode,
order=order,
)
def _int_piecewise(x, *condlist, **kwargs):
return np.piecewise(
x, list(condlist), kwargs["funclist"], *kwargs["func_args"], **kwargs["func_kw"]
)
@derived_from(np)
def piecewise(x, condlist, funclist, *args, **kw):
return map_blocks(
_int_piecewise,
x,
*condlist,
dtype=x.dtype,
name="piecewise",
funclist=funclist,
func_args=args,
func_kw=kw,
)
def _select(*args, **kwargs):
"""
This is a version of :func:`numpy.select` that acceptes an arbitrary number of arguments and
splits them in half to create ``condlist`` and ``choicelist`` params.
"""
split_at = len(args) // 2
condlist = args[:split_at]
choicelist = args[split_at:]
return np.select(condlist, choicelist, **kwargs)
@derived_from(np)
def select(condlist, choicelist, default=0):
# Making the same checks that np.select
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError("list of cases must be same length as list of conditions")
if len(condlist) == 0:
raise ValueError("select with an empty condition list is not possible")
choicelist = [asarray(choice) for choice in choicelist]
try:
intermediate_dtype = result_type(*choicelist)
except TypeError as e:
msg = "Choicelist elements do not have a common dtype."
raise TypeError(msg) from e
blockwise_shape = tuple(range(choicelist[0].ndim))
condargs = [arg for elem in condlist for arg in (elem, blockwise_shape)]
choiceargs = [arg for elem in choicelist for arg in (elem, blockwise_shape)]
return blockwise(
_select,
blockwise_shape,
*condargs,
*choiceargs,
dtype=intermediate_dtype,
name="select",
default=default,
)
def _partition(total: int, divisor: int) -> tuple[tuple[int, ...], tuple[int, ...]]:
"""Given a total and a divisor, return two tuples: A tuple containing `divisor`
repeated the number of times it divides `total`, and length-1 or empty tuple
containing the remainder when `total` is divided by `divisor`. If `divisor` factors
`total`, i.e. if the remainder is 0, then `remainder` is empty.
"""
multiples = (divisor,) * (total // divisor)
remainder = (total % divisor,) if total % divisor else ()
return multiples, remainder
def aligned_coarsen_chunks(chunks: list[int], multiple: int) -> tuple[int, ...]:
"""
Returns a new chunking aligned with the coarsening multiple.
Any excess is at the end of the array.
Examples
--------
>>> aligned_coarsen_chunks(chunks=(1, 2, 3), multiple=4)
(4, 2)
>>> aligned_coarsen_chunks(chunks=(1, 20, 3, 4), multiple=4)
(4, 20, 4)
>>> aligned_coarsen_chunks(chunks=(20, 10, 15, 23, 24), multiple=10)
(20, 10, 20, 20, 20, 2)
"""
overflow = np.array(chunks) % multiple
excess = overflow.sum()
new_chunks = np.array(chunks) - overflow
# valid chunks are those that are already factorizable by `multiple`
chunk_validity = new_chunks == chunks
valid_inds, invalid_inds = np.where(chunk_validity)[0], np.where(~chunk_validity)[0]
# sort the invalid chunks by size (ascending), then concatenate the results of
# sorting the valid chunks by size (ascending)
chunk_modification_order = [
*invalid_inds[np.argsort(new_chunks[invalid_inds])],
*valid_inds[np.argsort(new_chunks[valid_inds])],
]
partitioned_excess, remainder = _partition(excess, multiple)
# add elements the partitioned excess to the smallest invalid chunks,
# then smallest valid chunks if needed.
for idx, extra in enumerate(partitioned_excess):
new_chunks[chunk_modification_order[idx]] += extra
# create excess chunk with remainder, if any remainder exists
new_chunks = np.array([*new_chunks, *remainder])
# remove 0-sized chunks
new_chunks = new_chunks[new_chunks > 0]
return tuple(new_chunks)
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes, trim_excess=False, **kwargs):
if not trim_excess and not all(x.shape[i] % div == 0 for i, div in axes.items()):
msg = f"Coarsening factors {axes} do not align with array shape {x.shape}."
raise ValueError(msg)
if reduction.__module__.startswith("dask."):
reduction = getattr(np, reduction.__name__)
new_chunks = {}
for i, div in axes.items():
aligned = aligned_coarsen_chunks(x.chunks[i], div)
if aligned != x.chunks[i]:
new_chunks[i] = aligned
if new_chunks:
x = x.rechunk(new_chunks)
name = "coarsen-" + tokenize(reduction, x, axes, trim_excess)
dsk = {
(name,)
+ key[1:]: (apply, chunk.coarsen, [reduction, key, axes, trim_excess], kwargs)
for key in flatten(x.__dask_keys__())
}
chunks = tuple(
tuple(int(bd // axes.get(i, 1)) for bd in bds) for i, bds in enumerate(x.chunks)
)
meta = reduction(np.empty((1,) * x.ndim, dtype=x.dtype), **kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
def split_at_breaks(array, breaks, axis=0):
"""Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@derived_from(np)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
axis = validate_axis(axis, arr.ndim)
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
"da.insert only implemented for monotonic ``obj`` argument"
)
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, "ndim", 0) == 0:
# we need to turn values into a dask array
name = "values-" + tokenize(values)
dtype = getattr(values, "dtype", type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(
len(obj) if axis == n else s for n, s in enumerate(arr.shape)
)
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(
values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd) in enumerate(zip(arr.chunks, values.chunks))
)
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@derived_from(np)
def delete(arr, obj, axis):
"""
NOTE: If ``obj`` is a dask array it is implicitly computed when this function
is called.
"""
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
axis = validate_axis(axis, arr.ndim)
if isinstance(obj, slice):
tmp = np.arange(*obj.indices(arr.shape[axis]))
obj = tmp[::-1] if obj.step and obj.step < 0 else tmp
else:
obj = np.asarray(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
obj = np.unique(obj)
target_arr = split_at_breaks(arr, obj, axis)
target_arr = [
arr[
tuple(slice(1, None) if axis == n else slice(None) for n in range(arr.ndim))
]
if i != 0
else arr
for i, arr in enumerate(target_arr)
]
return concatenate(target_arr, axis=axis)
@derived_from(np)
def append(arr, values, axis=None):
# based on numpy.append
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(asanyarray(values))
axis = arr.ndim - 1
return concatenate((arr, values), axis=axis)
def _average(a, axis=None, weights=None, returned=False, is_masked=False):
# This was minimally modified from numpy.average
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
# Wrapper used by da.average or da.ma.average.
a = asanyarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size / avg.size)
else:
wgt = asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = result_type(a.dtype, wgt.dtype, "f8")
else:
result_dtype = result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights differ."
)
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ."
)
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis."
)
# setup wgt to broadcast along axis
wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if is_masked:
from dask.array.ma import getmaskarray
wgt = wgt * (~getmaskarray(a))
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl
if returned:
if scl.shape != avg.shape:
scl = broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
@derived_from(np)
def average(a, axis=None, weights=None, returned=False):
return _average(a, axis, weights, returned, is_masked=False)
@derived_from(np)
def tril(m, k=0):
m = asarray_safe(m, like=m)
mask = tri(
*m.shape[-2:],
k=k,
dtype=bool,
chunks=m.chunks[-2:],
like=meta_from_array(m) if _numpy_120 else None,
)
return where(mask, m, np.zeros_like(m, shape=(1,)))
@derived_from(np)
def triu(m, k=0):
m = asarray_safe(m, like=m)
mask = tri(
*m.shape[-2:],
k=k - 1,
dtype=bool,
chunks=m.chunks[-2:],
like=meta_from_array(m) if _numpy_120 else None,
)
return where(mask, np.zeros_like(m, shape=(1,)), m)
@derived_from(np)
def tril_indices(n, k=0, m=None, chunks="auto"):
return nonzero(tri(n, m, k=k, dtype=bool, chunks=chunks))
@derived_from(np)
def tril_indices_from(arr, k=0):
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)
@derived_from(np)
def triu_indices(n, k=0, m=None, chunks="auto"):
return nonzero(~tri(n, m, k=k - 1, dtype=bool, chunks=chunks))
@derived_from(np)
def triu_indices_from(arr, k=0):
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)
| digitize |
create_verify_scheme.go | package dypnsapi
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// CreateVerifyScheme invokes the dypnsapi.CreateVerifyScheme API synchronously
// api document: https://help.aliyun.com/api/dypnsapi/createverifyscheme.html
func (client *Client) CreateVerifyScheme(request *CreateVerifySchemeRequest) (response *CreateVerifySchemeResponse, err error) {
response = CreateCreateVerifySchemeResponse()
err = client.DoAction(request, response)
return
}
// CreateVerifySchemeWithChan invokes the dypnsapi.CreateVerifyScheme API asynchronously
// api document: https://help.aliyun.com/api/dypnsapi/createverifyscheme.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) CreateVerifySchemeWithChan(request *CreateVerifySchemeRequest) (<-chan *CreateVerifySchemeResponse, <-chan error) {
responseChan := make(chan *CreateVerifySchemeResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.CreateVerifyScheme(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// CreateVerifySchemeWithCallback invokes the dypnsapi.CreateVerifyScheme API asynchronously
// api document: https://help.aliyun.com/api/dypnsapi/createverifyscheme.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) CreateVerifySchemeWithCallback(request *CreateVerifySchemeRequest, callback func(response *CreateVerifySchemeResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *CreateVerifySchemeResponse
var err error
defer close(result)
response, err = client.CreateVerifyScheme(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// CreateVerifySchemeRequest is the request struct for api CreateVerifyScheme
type CreateVerifySchemeRequest struct {
*requests.RpcRequest
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
BundleId string `position:"Query" name:"BundleId"`
AppName string `position:"Query" name:"AppName"`
PackSign string `position:"Query" name:"PackSign"`
PackName string `position:"Query" name:"PackName"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
OsType string `position:"Query" name:"OsType"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
SchemeName string `position:"Query" name:"SchemeName"`
}
// CreateVerifySchemeResponse is the response struct for api CreateVerifyScheme
type CreateVerifySchemeResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
GateVerifySchemeDTO GateVerifySchemeDTO `json:"GateVerifySchemeDTO" xml:"GateVerifySchemeDTO"`
}
// CreateCreateVerifySchemeRequest creates a request to invoke CreateVerifyScheme API
func CreateCreateVerifySchemeRequest() (request *CreateVerifySchemeRequest) {
request = &CreateVerifySchemeRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Dypnsapi", "2017-05-25", "CreateVerifyScheme", "dypns", "openAPI")
request.Method = requests.POST
return
}
// CreateCreateVerifySchemeResponse creates a response to parse from CreateVerifyScheme response
func CreateCreateVerifySchemeResponse() (response *CreateVerifySchemeResponse) | {
response = &CreateVerifySchemeResponse{
BaseResponse: &responses.BaseResponse{},
}
return
} |
|
accessKey.ts | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import * as utilities from "../utilities";
/**
* Provides an IAM access key. This is a set of credentials that allow API requests to be made as an IAM user.
*
*
* > This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/iam_access_key.html.markdown.
*/
export class AccessKey extends pulumi.CustomResource {
/**
* Get an existing AccessKey resource's state with the given name, ID, and optional extra
* properties used to qualify the lookup.
*
* @param name The _unique_ name of the resulting resource.
* @param id The _unique_ provider ID of the resource to lookup.
* @param state Any extra arguments used during the lookup.
*/
public static get(name: string, id: pulumi.Input<pulumi.ID>, state?: AccessKeyState, opts?: pulumi.CustomResourceOptions): AccessKey {
return new AccessKey(name, <any>state, { ...opts, id: id });
}
/** @internal */
public static readonly __pulumiType = 'aws:iam/accessKey:AccessKey';
/**
* Returns true if the given object is an instance of AccessKey. This is designed to work even
* when multiple copies of the Pulumi SDK have been loaded into the same process.
*/
public static isInstance(obj: any): obj is AccessKey {
if (obj === undefined || obj === null) {
return false;
}
return obj['__pulumiType'] === AccessKey.__pulumiType;
}
/**
* The encrypted secret, base64 encoded, if `pgpKey` was specified.
* > **NOTE:** The encrypted secret may be decrypted using the command line,
*/
public /*out*/ readonly encryptedSecret!: pulumi.Output<string>;
/**
* The fingerprint of the PGP key used to encrypt
* the secret
*/
public /*out*/ readonly keyFingerprint!: pulumi.Output<string>;
/**
* Either a base-64 encoded PGP public key, or a
* keybase username in the form `keybase:some_person_that_exists`, for use
* in the `encryptedSecret` output attribute.
*/
public readonly pgpKey!: pulumi.Output<string | undefined>;
/**
* The secret access key. Note that this will be written
* to the state file. If you use this, please protect your backend state file
* judiciously. Alternatively, you may supply a `pgpKey` instead, which will
* prevent the secret from being stored in plaintext, at the cost of preventing
* the use of the secret key in automation.
*/
public /*out*/ readonly secret!: pulumi.Output<string>;
/**
* **DEPRECATED** The secret access key converted into an SES SMTP | /**
* The secret access key converted into an SES SMTP
* password by applying [AWS's documented Sigv4 conversion
* algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert).
* As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region)
*/
public /*out*/ readonly sesSmtpPasswordV4!: pulumi.Output<string>;
/**
* The access key status to apply. Defaults to `Active`.
* Valid values are `Active` and `Inactive`.
*/
public readonly status!: pulumi.Output<string>;
/**
* The IAM user to associate with this access key.
*/
public readonly user!: pulumi.Output<string>;
/**
* Create a AccessKey resource with the given unique name, arguments, and options.
*
* @param name The _unique_ name of the resource.
* @param args The arguments to use to populate this resource's properties.
* @param opts A bag of options that control this resource's behavior.
*/
constructor(name: string, args: AccessKeyArgs, opts?: pulumi.CustomResourceOptions)
constructor(name: string, argsOrState?: AccessKeyArgs | AccessKeyState, opts?: pulumi.CustomResourceOptions) {
let inputs: pulumi.Inputs = {};
if (opts && opts.id) {
const state = argsOrState as AccessKeyState | undefined;
inputs["encryptedSecret"] = state ? state.encryptedSecret : undefined;
inputs["keyFingerprint"] = state ? state.keyFingerprint : undefined;
inputs["pgpKey"] = state ? state.pgpKey : undefined;
inputs["secret"] = state ? state.secret : undefined;
inputs["sesSmtpPassword"] = state ? state.sesSmtpPassword : undefined;
inputs["sesSmtpPasswordV4"] = state ? state.sesSmtpPasswordV4 : undefined;
inputs["status"] = state ? state.status : undefined;
inputs["user"] = state ? state.user : undefined;
} else {
const args = argsOrState as AccessKeyArgs | undefined;
if (!args || args.user === undefined) {
throw new Error("Missing required property 'user'");
}
inputs["pgpKey"] = args ? args.pgpKey : undefined;
inputs["status"] = args ? args.status : undefined;
inputs["user"] = args ? args.user : undefined;
inputs["encryptedSecret"] = undefined /*out*/;
inputs["keyFingerprint"] = undefined /*out*/;
inputs["secret"] = undefined /*out*/;
inputs["sesSmtpPassword"] = undefined /*out*/;
inputs["sesSmtpPasswordV4"] = undefined /*out*/;
}
if (!opts) {
opts = {}
}
if (!opts.version) {
opts.version = utilities.getVersion();
}
super(AccessKey.__pulumiType, name, inputs, opts);
}
}
/**
* Input properties used for looking up and filtering AccessKey resources.
*/
export interface AccessKeyState {
/**
* The encrypted secret, base64 encoded, if `pgpKey` was specified.
* > **NOTE:** The encrypted secret may be decrypted using the command line,
*/
readonly encryptedSecret?: pulumi.Input<string>;
/**
* The fingerprint of the PGP key used to encrypt
* the secret
*/
readonly keyFingerprint?: pulumi.Input<string>;
/**
* Either a base-64 encoded PGP public key, or a
* keybase username in the form `keybase:some_person_that_exists`, for use
* in the `encryptedSecret` output attribute.
*/
readonly pgpKey?: pulumi.Input<string>;
/**
* The secret access key. Note that this will be written
* to the state file. If you use this, please protect your backend state file
* judiciously. Alternatively, you may supply a `pgpKey` instead, which will
* prevent the secret from being stored in plaintext, at the cost of preventing
* the use of the secret key in automation.
*/
readonly secret?: pulumi.Input<string>;
/**
* **DEPRECATED** The secret access key converted into an SES SMTP
* password by applying [AWS's documented conversion
*
* @deprecated AWS SigV2 for SES SMTP passwords isy deprecated.
Use 'ses_smtp_password_v4' for region-specific AWS SigV4 signed SES SMTP password instead.
*/
readonly sesSmtpPassword?: pulumi.Input<string>;
/**
* The secret access key converted into an SES SMTP
* password by applying [AWS's documented Sigv4 conversion
* algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert).
* As SigV4 is region specific, valid Provider regions are `ap-south-1`, `ap-southeast-2`, `eu-central-1`, `eu-west-1`, `us-east-1` and `us-west-2`. See current [AWS SES regions](https://docs.aws.amazon.com/general/latest/gr/rande.html#ses_region)
*/
readonly sesSmtpPasswordV4?: pulumi.Input<string>;
/**
* The access key status to apply. Defaults to `Active`.
* Valid values are `Active` and `Inactive`.
*/
readonly status?: pulumi.Input<string>;
/**
* The IAM user to associate with this access key.
*/
readonly user?: pulumi.Input<string>;
}
/**
* The set of arguments for constructing a AccessKey resource.
*/
export interface AccessKeyArgs {
/**
* Either a base-64 encoded PGP public key, or a
* keybase username in the form `keybase:some_person_that_exists`, for use
* in the `encryptedSecret` output attribute.
*/
readonly pgpKey?: pulumi.Input<string>;
/**
* The access key status to apply. Defaults to `Active`.
* Valid values are `Active` and `Inactive`.
*/
readonly status?: pulumi.Input<string>;
/**
* The IAM user to associate with this access key.
*/
readonly user: pulumi.Input<string>;
} | * password by applying [AWS's documented conversion
*/
public /*out*/ readonly sesSmtpPassword!: pulumi.Output<string>; |
peopleManager.d.ts | import * as SP from "gd-sprest-def/lib/SP/UserProfiles/entitytypes";
import { ITargetInfoProps } from "../utils";
/**
* #### REST API
* _api/SP.UserProfiles.PeopleManager
*/
export const PeopleManager: IPeopleManager;
/**
* People Manager
* @category People Manager
*/
export interface IPeopleManager { | */
(targetInfo?: ITargetInfoProps): SP.IPeopleManager;
} | /**
* Creates an instance of the people manager library.
* @param targetInfo - (Optional) The target information. |
elo.py | """
Elo Rating Calculator
"""
from whist.core.scoring.score_card import ScoreCard
from whist.core.scoring.team import Team
from whist.core.user.player import Player
# pylint: disable=too-few-public-methods
class EloRater:
"""
Static class that calculates the Elo-Rating for players after several hands played.
"""
@staticmethod
def rate(teams: list[Team], scores: ScoreCard) -> None:
"""
Calculates the new rating of player after several hand played.
:param teams:
:type teams:
:param scores:
:type scores:
:return:
:rtype:
""" | won = scores.won(team)
player.rating += round(k_factor * delta * won)
@staticmethod
def _k_factor(player: Player) -> int:
if player.rating > 2400 and player.games > 30:
return 10
if player.rating < 2300 and player.games < 30:
return 40
return 20
@staticmethod
def _score_delta(team: Team, opponent: Team, scores: ScoreCard) -> float:
num_games = len(scores)
num_wins = scores.score(team)
expected_score = EloRater._expected_score(team, opponent)
return num_wins - num_games * expected_score
@staticmethod
def _expected_score(team: Team, opponent: Team) -> float:
q_a = EloRater._team_quotient(team)
q_b = EloRater._team_quotient(opponent)
return q_a / (q_a + q_b)
@staticmethod
def _team_quotient(team: Team):
return 10 ** (team.rating / 400) | delta = EloRater._score_delta(teams[0], teams[1], scores)
for team in teams:
for player in team.players:
k_factor = EloRater._k_factor(player) |
lib.py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The library and base Map for defining full maps.
To define your own map just import this library and subclass Map. It will be
automatically registered for creation by `get`.
class NewMap(lib.Map):
prefix = "map_dir"
filename = "map_name"
players = 3
You can build a hierarchy of classes to make your definitions less verbose.
To use a map, either import the map module and instantiate the map directly, or
import the maps lib and use `get`. Using `get` from this lib will work, but only
if you've imported the map module somewhere.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import os
class DuplicateMapException(Exception):
pass
class NoMapException(Exception):
pass
class Map(object):
"""Base map object to configure a map. To define a map just subclass this.
Properties:
directory: Directory for the map
filename: Actual filename. You can skip the ".SC2Map" file ending.
download: Where to download the map.
game_steps_per_episode: Game steps per episode, independent of the step_mul.
0 (default) means no limit.
step_mul: How many game steps per agent step?
score_index: Which score to give for this map. -1 means the win/loss
reward. >=0 is the index into score_cumulative.
score_multiplier: A score multiplier to allow make small scores good.
players: Max number of players for this map.
"""
directory = ""
filename = None
download = None
game_steps_per_episode = 0
step_mul = 8
score_index = -1
score_multiplier = 1
players = None
@property
def path(self):
"""The full path to the map file: directory, filename and file ending."""
if self.filename:
map_path = os.path.join(self.directory, self.filename)
if not map_path.endswith(".SC2Map"):
map_path += ".SC2Map"
return map_path
def data(self, run_config):
"""Return the map data."""
try:
return run_config.map_data(self.path)
except (IOError, OSError) as e: # Catch both for python 2/3 compatibility.
if self.download and hasattr(e, "filename"):
logging.error("Error reading map '%s' from: %s", self.name, e.filename)
logging.error("Download the map from: %s", self.download)
raise
@property
def name(self):
|
def __str__(self):
return "\n".join([
self.name,
" %s" % self.path,
" players: %s, score_index: %s, score_multiplier: %s" % (
self.players, self.score_index, self.score_multiplier),
" step_mul: %s, game_steps_per_episode: %s" % (
self.step_mul, self.game_steps_per_episode),
])
@classmethod
def all_subclasses(cls):
"""An iterator over all subclasses of `cls`."""
for s in cls.__subclasses__():
yield s
for c in s.all_subclasses():
yield c
def get_maps():
"""Get the full dict of maps {map_name: map_class}."""
maps = {}
for mp in Map.all_subclasses():
if mp.filename:
map_name = mp.__name__
if map_name in maps:
raise DuplicateMapException("Duplicate map found: " + map_name)
maps[map_name] = mp
return maps
def get(map_name):
"""Get an instance of a map by name. Errors if the map doesn't exist."""
if isinstance(map_name, Map):
return map_name
# Get the list of maps. This isn't at module scope to avoid problems of maps
# being defined after this module is imported.
maps = get_maps()
map_class = maps.get(map_name)
if map_class:
return map_class()
raise NoMapException("Map doesn't exist: %s" % map_name)
| return self.__class__.__name__ |
pelog_he.js | {"frequencies":[261.6255653006,283.17034563789,338.50336851425,364.68988616898,389.06292924114,420.13030572059,493.31307433255,523.2511306012],"description":"Observed Javanese Pelog scale, Helmholtz/Ellis p. 518, nr.96"} | ||
App.tsx | import React from "react";
import Editor from "./Editor";
import { AppContextProvider } from "./lib/AppContext";
import Parsed from "./Parsed";
import Rendered from "./Rendered";
export default function App() {
return (
<AppContextProvider>
<Editor />
<div id="output">
<Rendered />
<Parsed />
</div>
</AppContextProvider> | } | ); |
lib.rs | // Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Blockchain explorer allows to get information about blocks and transactions in the blockchain.
//! It allows to request transactions from a block together with the execution statuses,
//! iterate over blocks, etc.
//!
//! This crate is distinct from the [explorer *service*][explorer-service] crate. While this crate
//! provides Rust language APIs for retrieving info from the blockchain, the explorer service
//! translates these APIs into REST and WebSocket endpoints. Correspondingly, this crate is
//! primarily useful for Rust-language client apps. Another use case is testing; the [testkit]
//! returns [`BlockWithTransactions`] from its `create_block*` methods and re-exports the entire
//! crate as `explorer`.
//!
//! See the examples in the crate for examples of usage.
//!
//! [explorer-service]: https://docs.rs/exonum-explorer-service/
//! [`BlockWithTransactions`]: struct.BlockWithTransactions.html
//! [testkit]: https://docs.rs/exonum-testkit/latest/exonum_testkit/struct.TestKit.html
use chrono::{DateTime, Utc};
use exonum::{
blockchain::{Block, CallInBlock, Schema, TxLocation},
crypto::Hash,
helpers::Height,
merkledb::{ListProof, MapProof, ObjectHash, Snapshot},
messages::{AnyTx, Precommit, Verified},
runtime::{ExecutionError, ExecutionErrorSerde, ExecutionStatus},
};
use serde::{Serialize, Serializer};
use serde_derive::*;
use std::{
cell::{Ref, RefCell},
collections::{BTreeMap, Bound},
fmt,
ops::{Index, RangeBounds},
slice,
time::UNIX_EPOCH,
};
pub mod api;
/// Ending height of the range (exclusive), given the a priori max height.
fn end_height(bound: Bound<&Height>, max: Height) -> Height {
use std::cmp::min;
let inner_end = match bound {
Bound::Included(height) => height.next(),
Bound::Excluded(height) => *height,
Bound::Unbounded => max.next(),
};
min(inner_end, max.next())
}
/// Information about a block in the blockchain.
///
/// # JSON presentation
///
/// JSON object with the following fields:
///
/// | Name | Equivalent type | Description |
/// |------|-------|--------|
/// | `block` | [`Block`] | Block header as recorded in the blockchain |
/// | `precommits` | `Vec<`[`Precommit`]`>` | Precommits authorizing the block |
/// | `txs` | `Vec<`[`Hash`]`>` | Hashes of transactions in the block |
///
/// [`Block`]: https://docs.rs/exonum/latest/exonum/blockchain/struct.Block.html
/// [`Precommit`]: https://docs.rs/exonum/latest/exonum/messages/struct.Precommit.html
/// [`Hash`]: https://docs.rs/exonum-crypto/latest/exonum_crypto/struct.Hash.html
#[derive(Debug)]
pub struct BlockInfo<'a> {
header: Block,
explorer: &'a BlockchainExplorer<'a>,
precommits: RefCell<Option<Vec<Verified<Precommit>>>>,
txs: RefCell<Option<Vec<Hash>>>,
}
impl<'a> BlockInfo<'a> {
fn new(explorer: &'a BlockchainExplorer<'_>, height: Height) -> Self {
let schema = explorer.schema;
let hashes = schema.block_hashes_by_height();
let blocks = schema.blocks();
let block_hash = hashes
.get(height.0)
.unwrap_or_else(|| panic!("Block not found, height: {:?}", height));
let header = blocks
.get(&block_hash)
.unwrap_or_else(|| panic!("Block not found, hash: {:?}", block_hash));
BlockInfo {
explorer,
header,
precommits: RefCell::new(None),
txs: RefCell::new(None),
}
}
/// Returns block header as recorded in the blockchain.
pub fn header(&self) -> &Block {
&self.header
}
/// Extracts the header discarding all other information.
pub fn into_header(self) -> Block {
self.header
}
/// Returns the height of this block.
///
/// This method is equivalent to calling `block.header().height()`.
pub fn height(&self) -> Height {
self.header.height
}
/// Returns the number of transactions in this block.
pub fn len(&self) -> usize {
self.header.tx_count as usize
}
/// Is this block empty (i.e., contains no transactions)?
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns a list of precommits for this block.
pub fn precommits(&self) -> Ref<'_, [Verified<Precommit>]> {
if self.precommits.borrow().is_none() {
let precommits = self.explorer.precommits(&self.header);
*self.precommits.borrow_mut() = Some(precommits);
}
Ref::map(self.precommits.borrow(), |cache| {
cache.as_ref().unwrap().as_ref()
})
}
/// Lists hashes of transactions included in this block.
pub fn transaction_hashes(&self) -> Ref<'_, [Hash]> {
if self.txs.borrow().is_none() {
let txs = self.explorer.transaction_hashes(&self.header);
*self.txs.borrow_mut() = Some(txs);
}
Ref::map(self.txs.borrow(), |cache| cache.as_ref().unwrap().as_ref())
}
/// Returns a transaction with the specified index in the block.
pub fn transaction(&self, index: usize) -> Option<CommittedTransaction> {
self.transaction_hashes()
.get(index)
.map(|hash| self.explorer.committed_transaction(hash, None))
}
/// Returns the proof for the execution status of a call within this block.
///
/// Note that if the call did not result in an error or did not happen at all, the returned
/// proof will not contain entries. To distinguish between two cases, one can inspect
/// the number of transactions in the block or IDs of the active services when the block
/// was executed.
pub fn error_proof(&self, call_location: CallInBlock) -> MapProof<CallInBlock, ExecutionError> {
self.explorer
.schema
.call_errors(self.header.height)
.get_proof(call_location)
}
/// Iterates over transactions in the block.
pub fn iter(&self) -> Transactions<'_, '_> {
Transactions {
block: self,
ptr: 0,
len: self.len(),
}
}
/// Loads transactions, errors and precommits for the block.
pub fn with_transactions(self) -> BlockWithTransactions {
let (explorer, header, precommits, transactions) =
(self.explorer, self.header, self.precommits, self.txs);
let precommits = precommits
.into_inner()
.unwrap_or_else(|| explorer.precommits(&header));
let transactions = transactions
.into_inner()
.unwrap_or_else(|| explorer.transaction_hashes(&header))
.iter()
.map(|tx_hash| explorer.committed_transaction(tx_hash, None))
.collect();
let errors: Vec<_> = self
.explorer
.schema
.call_errors(header.height)
.iter()
.map(|(location, error)| ErrorWithLocation { location, error })
.collect();
BlockWithTransactions {
header,
precommits,
transactions,
errors,
}
}
}
impl<'a> Serialize for BlockInfo<'a> {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
use serde::ser::SerializeStruct;
let mut s = serializer.serialize_struct("BlockInfo", 3)?;
s.serialize_field("block", &self.header)?;
s.serialize_field("precommits", &*self.precommits())?;
s.serialize_field("txs", &*self.transaction_hashes())?;
s.end()
}
}
/// Iterator over transactions in a block.
#[derive(Debug)]
pub struct Transactions<'r, 'a> {
block: &'r BlockInfo<'a>,
ptr: usize,
len: usize,
}
impl<'a, 'r> Iterator for Transactions<'a, 'r> {
type Item = CommittedTransaction;
fn next(&mut self) -> Option<CommittedTransaction> {
if self.ptr == self.len {
None
} else {
let transaction = self.block.transaction(self.ptr);
self.ptr += 1;
transaction
}
}
}
impl<'a, 'r: 'a> IntoIterator for &'r BlockInfo<'a> {
type Item = CommittedTransaction;
type IntoIter = Transactions<'a, 'r>;
fn into_iter(self) -> Transactions<'a, 'r> {
self.iter()
}
}
/// Information about a block in the blockchain with info on transactions eagerly loaded.
#[derive(Debug, Serialize, Deserialize)]
pub struct BlockWithTransactions {
/// Block header as recorded in the blockchain.
#[serde(rename = "block")]
pub header: Block,
/// Precommits.
pub precommits: Vec<Verified<Precommit>>,
/// Transactions in the order they appear in the block.
pub transactions: Vec<CommittedTransaction>,
/// Errors that have occurred within the block.
pub errors: Vec<ErrorWithLocation>,
}
/// Execution error together with its location within the block.
#[derive(Debug, Serialize, Deserialize)]
pub struct ErrorWithLocation {
/// Location of the error.
pub location: CallInBlock,
/// Error data.
#[serde(with = "ExecutionErrorSerde")]
pub error: ExecutionError,
}
impl fmt::Display for ErrorWithLocation {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "In {}: {}", self.location, self.error)
}
}
impl BlockWithTransactions {
/// Returns the height of this block.
///
/// This method is equivalent to calling `block.header.height()`.
pub fn height(&self) -> Height {
self.header.height
}
/// Returns the number of transactions in this block.
pub fn len(&self) -> usize {
self.transactions.len()
}
/// Is this block empty (i.e., contains no transactions)?
pub fn is_empty(&self) -> bool {
self.transactions.is_empty()
}
/// Iterates over transactions in the block.
pub fn iter(&self) -> EagerTransactions<'_> {
self.transactions.iter()
}
/// Returns errors converted into a map. Note that this is potentially a costly operation.
pub fn error_map(&self) -> BTreeMap<CallInBlock, &ExecutionError> {
self.errors.iter().map(|e| (e.location, &e.error)).collect()
}
}
/// Iterator over transactions in [`BlockWithTransactions`].
///
/// [`BlockWithTransactions`]: struct.BlockWithTransactions.html
pub type EagerTransactions<'a> = slice::Iter<'a, CommittedTransaction>;
impl Index<usize> for BlockWithTransactions {
type Output = CommittedTransaction;
fn index(&self, index: usize) -> &CommittedTransaction {
self.transactions.get(index).unwrap_or_else(|| {
panic!(
"Index exceeds number of transactions in block {}",
self.len()
);
})
}
}
/// Returns a transaction in the block by its hash. Beware that this is a slow operation
/// (linear w.r.t. the number of transactions in a block).
impl Index<Hash> for BlockWithTransactions {
type Output = CommittedTransaction;
fn index(&self, index: Hash) -> &CommittedTransaction {
self.transactions
.iter()
.find(|&tx| tx.message.object_hash() == index)
.unwrap_or_else(|| {
panic!("No transaction with hash {} in the block", index);
})
}
}
impl<'a> IntoIterator for &'a BlockWithTransactions {
type Item = &'a CommittedTransaction;
type IntoIter = EagerTransactions<'a>;
fn into_iter(self) -> EagerTransactions<'a> {
self.iter()
}
}
/// Information about a particular transaction in the blockchain.
///
/// # JSON presentation
///
/// | Name | Equivalent type | Description |
/// |------|-------|--------|
/// | `message` | `Verified<AnyTx>` | Transaction as recorded in the blockchain |
/// | `location` | [`TxLocation`] | Location of the transaction in the block |
/// | `location_proof` | [`ListProof`]`<`[`Hash`]`>` | Proof of transaction inclusion into a block |
/// | `status` | (custom; see below) | Execution status |
/// | `time` | [`DateTime`]`<`[`Utc`]`>` | Commitment time* |
///
/// \* By commitment time we mean an approximate commitment time of the block
/// which includes the transaction. This time is a median time of the precommit local times
/// of each validator.
///
/// ## `status` field
///
/// The `status` field is a more readable representation of the [`ExecutionStatus`] type.
///
/// For successfully executed transactions, `status` is equal to
///
/// ```json
/// { "type": "success" }
/// ```
///
/// For transactions that cause an [`ExecutionError`], `status` contains the error code
/// and an optional description, i.e., has the following type in the [TypeScript] notation:
///
/// ```typescript
/// type Error = {
/// type: 'service_error' | 'core_error' | 'common_error' | 'runtime_error' | 'unexpected_error',
/// code?: number,
/// description?: string,
/// runtime_id: number,
/// call_site?: CallSite,
/// };
///
/// type CallSite = MethodCallSite | HookCallSite;
///
/// type MethodCallSite = {
/// call_type: 'method',
/// instance_id: number,
/// interface?: string,
/// method_id: number,
/// };
///
/// type HookCallSite = {
/// call_type: 'constructor' | 'before_transactions' | 'after_transactions',
/// instance_id: number,
/// };
/// ```
///
/// Explanations:
///
/// - `Error.type` determines the component responsible for the error. Usually, errors
/// are generated by the service code, but they can also be caused by the dispatch logic,
/// runtime associated with the service, or come from another source (`unexpected_error`s).
/// - `Error.code` is the error code. For service errors, this code is specific
/// to the service instance (which can be obtained from `call_site`), and for runtime errors -
/// to the runtime. For core errors, the codes are fixed; their meaning can be found
/// in the [`CoreError`] docs. The code is present for all error types except
/// `unexpected_error`s, in which the code is always absent.
/// Besides types listed above, there is also a set of errors that can occur within any context,
/// which are organized in the [`CommonError`].
/// - `Error.description` is an optional human-readable description of the error.
/// - `Error.runtime_id` is the numeric ID of the runtime in which the error has occurred. Note
/// that the runtime is defined for all error types, not just `runtime_error`s, since
/// for any request it's possible to say which runtime is responsible for its processing.
/// - `Error.call_site` provides most precise known location of the call in which the error
/// has occurred.
///
/// [`TxLocation`]: https://docs.rs/exonum/latest/exonum/blockchain/struct.TxLocation.html
/// [`ListProof`]: https://docs.rs/exonum-merkledb/latest/exonum_merkledb/indexes/proof_list/struct.ListProof.html
/// [`Hash`]: https://docs.rs/exonum-crypto/latest/exonum_crypto/struct.Hash.html
/// [`ExecutionStatus`]: https://docs.rs/exonum/latest/exonum/runtime/struct.ExecutionStatus.html
/// [`ExecutionError`]: https://docs.rs/exonum/latest/exonum/runtime/struct.ExecutionError.html
/// [`CoreError`]: https://docs.rs/exonum/latest/exonum/runtime/enum.CoreError.html
/// [`CommonError`]: https://docs.rs/exonum/latest/exonum/runtime/enum.CommonError.html
/// [TypeScript]: https://www.typescriptlang.org/
/// [`DateTime`]: https://docs.rs/chrono/0.4.10/chrono/struct.DateTime.html
/// [`Utc`]: https://docs.rs/chrono/0.4.10/chrono/offset/struct.Utc.html
#[derive(Debug, Serialize, Deserialize)]
pub struct CommittedTransaction {
message: Verified<AnyTx>,
location: TxLocation,
location_proof: ListProof<Hash>,
status: ExecutionStatus,
time: DateTime<Utc>,
}
impl CommittedTransaction {
/// Returns the content of the transaction.
pub fn message(&self) -> &Verified<AnyTx> {
&self.message
}
/// Returns the transaction location in block.
pub fn location(&self) -> &TxLocation {
&self.location
}
/// Returns a proof that transaction is recorded in the blockchain.
pub fn location_proof(&self) -> &ListProof<Hash> {
&self.location_proof
}
/// Returns the status of the transaction execution.
pub fn status(&self) -> Result<(), &ExecutionError> {
self.status.0.as_ref().map(drop)
}
/// Returns an approximate commit time of the block which includes this transaction.
pub fn time(&self) -> &DateTime<Utc> {
&self.time
}
}
/// Information about the transaction.
///
/// Values of this type are returned by the `transaction()` method of the `BlockchainExplorer`.
///
/// # JSON presentation
///
/// ## Committed transactions
///
/// Committed transactions are represented just like a `CommittedTransaction`,
/// with the additional `type` field equal to `"committed"`.
///
/// ## Transaction in pool
///
/// Transactions in pool are represented with a 2-field object:
///
/// - `type` field contains transaction type (`"in-pool"`).
/// - `message` is the full transaction message serialized to the hexadecimal form.
///
/// # Examples
///
/// ```
/// use exonum_explorer::TransactionInfo;
/// use exonum::{crypto::gen_keypair, runtime::InstanceId};
/// # use exonum_derive::*;
/// # use serde_derive::*;
/// # use serde_json::json;
///
/// /// Service interface.
/// #[exonum_interface]
/// trait ServiceInterface<Ctx> {
/// type Output;
/// #[interface_method(id = 0)]
/// fn create_wallet(&self, ctx: Ctx, username: String) -> Self::Output;
/// }
///
/// # fn main() {
/// // Create a signed transaction.
/// let keypair = gen_keypair();
/// const SERVICE_ID: InstanceId = 100;
/// let tx = keypair.create_wallet(SERVICE_ID, "Alice".to_owned());
/// // This transaction in pool will be represented as follows:
/// let json = json!({
/// "type": "in_pool",
/// "message": tx,
/// });
/// let parsed: TransactionInfo = serde_json::from_value(json).unwrap();
/// assert!(parsed.is_in_pool());
/// # }
/// ```
#[derive(Debug, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum TransactionInfo {
/// Transaction is in the memory pool, but not yet committed to the blockchain.
InPool {
/// A content of the uncommitted transaction.
message: Verified<AnyTx>,
},
/// Transaction is already committed to the blockchain.
Committed(CommittedTransaction),
}
impl TransactionInfo {
/// Returns the content of this transaction.
pub fn message(&self) -> &Verified<AnyTx> {
match *self {
TransactionInfo::InPool { ref message } => message,
TransactionInfo::Committed(ref tx) => tx.message(),
}
}
/// Is this in-pool transaction?
pub fn is_in_pool(&self) -> bool {
match *self {
TransactionInfo::InPool { .. } => true,
_ => false,
}
}
/// Is this a committed transaction?
pub fn is_committed(&self) -> bool {
match *self {
TransactionInfo::Committed(_) => true,
_ => false,
}
}
/// Returns a reference to the inner committed transaction if this transaction is committed.
/// For transactions in pool, returns `None`.
pub fn as_committed(&self) -> Option<&CommittedTransaction> {
match *self {
TransactionInfo::Committed(ref tx) => Some(tx),
_ => None,
}
}
}
/// Blockchain explorer.
///
/// # Notes
///
/// The explorer wraps a specific [`Snapshot`] of the blockchain state; that is,
/// all calls to the methods of an explorer instance are guaranteed to be consistent.
///
/// [`Snapshot`]: https://docs.rs/exonum-merkledb/latest/exonum_merkledb/trait.Snapshot.html
#[derive(Debug, Copy, Clone)]
pub struct BlockchainExplorer<'a> {
schema: Schema<&'a dyn Snapshot>,
}
impl<'a> BlockchainExplorer<'a> {
/// Creates a new `BlockchainExplorer` instance from the provided snapshot.
pub fn new(snapshot: &'a dyn Snapshot) -> Self {
BlockchainExplorer {
schema: Schema::new(snapshot),
}
}
/// Creates a new `BlockchainExplorer` instance from the core schema.
pub fn from_schema(schema: Schema<&'a dyn Snapshot>) -> Self {
BlockchainExplorer { schema }
}
/// Returns information about the transaction identified by the hash.
pub fn transaction(&self, tx_hash: &Hash) -> Option<TransactionInfo> {
let message = self.transaction_without_proof(tx_hash)?;
if self.schema.transactions_pool().contains(tx_hash) {
return Some(TransactionInfo::InPool { message });
}
let tx = self.committed_transaction(tx_hash, Some(message));
Some(TransactionInfo::Committed(tx))
}
/// Returns the status of a call in a block.
///
/// # Return value
///
/// This method will return `Ok(())` both if the call completed successfully, or if
/// was not performed at all. The caller is responsible to distinguish these two outcomes.
pub fn call_status(
&self,
block_height: Height,
call_location: CallInBlock,
) -> Result<(), ExecutionError> {
match self.schema.call_errors(block_height).get(&call_location) {
None => Ok(()),
Some(e) => Err(e),
}
}
/// Return transaction message without proof.
pub fn transaction_without_proof(&self, tx_hash: &Hash) -> Option<Verified<AnyTx>> {
self.schema.transactions().get(tx_hash)
}
fn precommits(&self, block: &Block) -> Vec<Verified<Precommit>> {
self.schema
.precommits(&block.object_hash())
.iter()
.collect()
}
fn transaction_hashes(&self, block: &Block) -> Vec<Hash> {
let tx_hashes_table = self.schema.block_transactions(block.height);
tx_hashes_table.iter().collect()
}
/// Retrieves a transaction that is known to be committed.
fn committed_transaction(
&self,
tx_hash: &Hash,
maybe_content: Option<Verified<AnyTx>>,
) -> CommittedTransaction {
let location = self
.schema
.transactions_locations()
.get(tx_hash)
.unwrap_or_else(|| panic!("Location not found for transaction hash {:?}", tx_hash));
let location_proof = self
.schema
.block_transactions(location.block_height())
.get_proof(u64::from(location.position_in_block()));
let block_precommits = self
.schema
.block_and_precommits(location.block_height())
.unwrap();
let time = median_precommits_time(&block_precommits.precommits);
// Unwrap is OK here, because we already know that transaction is committed.
let status = self.schema.transaction_result(location).unwrap();
CommittedTransaction {
message: maybe_content.unwrap_or_else(|| {
self.schema
.transactions()
.get(tx_hash)
.expect("BUG: Cannot find transaction in database")
}),
location,
location_proof,
status: ExecutionStatus(status),
time,
}
}
/// Return the height of the blockchain.
pub fn height(&self) -> Height {
self.schema.height()
}
/// Returns block information for the specified height or `None` if there is no such block.
pub fn | (&self, height: Height) -> Option<BlockInfo<'_>> {
if self.height() >= height {
Some(BlockInfo::new(self, height))
} else {
None
}
}
/// Return a block together with its transactions at the specified height, or `None`
/// if there is no such block.
pub fn block_with_txs(&self, height: Height) -> Option<BlockWithTransactions> {
let txs_table = self.schema.block_transactions(height);
let block_proof = self.schema.block_and_precommits(height);
let errors = self.schema.call_errors(height);
block_proof.map(|proof| BlockWithTransactions {
header: proof.block,
precommits: proof.precommits,
transactions: txs_table
.iter()
.map(|tx_hash| self.committed_transaction(&tx_hash, None))
.collect(),
errors: errors
.iter()
.map(|(location, error)| ErrorWithLocation { location, error })
.collect(),
})
}
/// Iterates over blocks in the blockchain.
pub fn blocks<R: RangeBounds<Height>>(&self, heights: R) -> Blocks<'_> {
use std::cmp::max;
let max_height = self.schema.height();
let ptr = match heights.start_bound() {
Bound::Included(height) => *height,
Bound::Excluded(height) => height.next(),
Bound::Unbounded => Height(0),
};
Blocks {
explorer: self,
ptr,
back: max(ptr, end_height(heights.end_bound(), max_height)),
}
}
}
/// Iterator over blocks in the blockchain.
pub struct Blocks<'a> {
explorer: &'a BlockchainExplorer<'a>,
ptr: Height,
back: Height,
}
impl<'a> fmt::Debug for Blocks<'a> {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
formatter
.debug_struct("Blocks")
.field("ptr", &self.ptr)
.field("back", &self.back)
.finish()
}
}
impl<'a> Iterator for Blocks<'a> {
type Item = BlockInfo<'a>;
fn next(&mut self) -> Option<BlockInfo<'a>> {
if self.ptr == self.back {
return None;
}
let block = BlockInfo::new(self.explorer, self.ptr);
self.ptr = self.ptr.next();
Some(block)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = (self.back.0 - self.ptr.0) as usize;
(exact, Some(exact))
}
fn count(self) -> usize {
(self.back.0 - self.ptr.0) as usize
}
fn nth(&mut self, n: usize) -> Option<BlockInfo<'a>> {
if self.ptr.0 + n as u64 >= self.back.0 {
self.ptr = self.back;
None
} else {
self.ptr = Height(self.ptr.0 + n as u64);
let block = BlockInfo::new(self.explorer, self.ptr);
self.ptr = self.ptr.next();
Some(block)
}
}
}
impl<'a> DoubleEndedIterator for Blocks<'a> {
fn next_back(&mut self) -> Option<BlockInfo<'a>> {
if self.ptr == self.back {
return None;
}
self.back = self.back.previous();
Some(BlockInfo::new(self.explorer, self.back))
}
}
/// Calculates a median time from precommits.
pub fn median_precommits_time(precommits: &[Verified<Precommit>]) -> DateTime<Utc> {
if precommits.is_empty() {
UNIX_EPOCH.into()
} else {
let mut times: Vec<_> = precommits.iter().map(|p| p.payload().time()).collect();
times.sort();
times[times.len() / 2]
}
}
| block |
models.py | from django.db import models
from django.contrib.auth.models import User
from simple_history.models import HistoricalRecords
from django.core.validators import MinValueValidator
from preferences.models import PaymentMethod
from django.core.exceptions import ValidationError
class Category(models.Model):
"""
A product category
"""
class Meta:
verbose_name="Catégorie"
name = models.CharField(max_length=100, verbose_name="Nom", unique=True)
order = models.IntegerField(default=0)
"""
The name of the category
"""
def __str__(self):
return self.name
@property
def active_products(self):
"""
Return active producs of this category
"""
return self.product_set.filter(is_active=True)
@property
def active_stock_products(self):
"""
Return active products that use stocks
"""
return self.product_set.filter(is_active=True).filter(use_stocks=True)
class Product(models.Model):
"""
Stores a product.
"""
DRAFT_NONE = 0
DRAFT_PINTE = 1
DRAFT_DEMI = 2
DRAFT_GALOPIN = 3
DRAFT_TYPES = (
(DRAFT_NONE, "Pas une bière pression"),
(DRAFT_PINTE, "Pinte"),
(DRAFT_DEMI, "Demi"),
(DRAFT_GALOPIN, "Galopin"),
)
class Meta:
verbose_name = "Produit"
name = models.CharField(max_length=255, verbose_name="Nom", unique=True)
"""
The name of the product.
"""
amount = models.DecimalField(max_digits=5, decimal_places=2, verbose_name="Prix de vente", validators=[MinValueValidator(0)])
"""
The price of the product.
"""
stock = models.IntegerField(default=0, verbose_name="Stock")
"""
Number of product
"""
category = models.ForeignKey('Category', on_delete=models.PROTECT, verbose_name="Catégorie")
"""
The category of the product
"""
needQuantityButton = models.BooleanField(default=False, verbose_name="Bouton quantité")
"""
If True, a javascript quantity button will be displayed
"""
is_active = models.BooleanField(default=True, verbose_name="Actif")
"""
If True, will be displayed on the :func:`gestion.views.manage` view.
"""
volume = models.PositiveIntegerField(default=0)
"""
The volume, if relevant, of the product
"""
deg = models.DecimalField(default=0,max_digits=5, decimal_places=2, verbose_name="Degré", validators=[MinValueValidator(0)])
"""
Degree of alcohol, if relevant
"""
adherentRequired = models.BooleanField(default=True, verbose_name="Adhérent requis")
"""
If True, only adherents will be able to buy this product
"""
showingMultiplier = models.PositiveIntegerField(default=1)
"""
On the graphs on :func:`users.views.profile` view, the number of total consumptions is divised by the showingMultiplier
"""
draft_category = models.IntegerField(choices=DRAFT_TYPES, default=DRAFT_NONE, verbose_name="Type de pression")
use_stocks = models.BooleanField(default=True, verbose_name="Utiliser les stocks ?")
history = HistoricalRecords()
def __str__(self):
if self.draft_category == self.DRAFT_NONE:
return self.name + " (" + str(self.amount) + " €)"
else:
return self.name + " (" + str(self.amount) + " €, " + str(self.deg) + "°)"
def user_ranking(self, pk):
"""
Return the user ranking for the product
"""
user = User.objects.get(pk=pk)
consumptions = Consumption.objects.filter(customer=user).filter(product=self)
if consumptions:
return (user, consumptions[0].quantity)
else:
return (user, 0)
@property
def ranking(self):
"""
Get the first 25 users with :func:`~gestion.models.user_ranking`
"""
users = User.objects.all()
ranking = [self.user_ranking(user.pk) for user in users]
ranking.sort(key=lambda x:x[1], reverse=True)
return ranking[0:25]
def isPinte(id):
product = Product.objects.get(id=id)
if product.draft_category != Product.DRAFT_PINTE:
raise ValidationError(
('%(product)s n\'est pas une pinte'),
params={'product': product},
)
def isDemi(id):
product = Product.objects.get(id=id)
if product.draft_category != Product.DRAFT_DEMI:
raise ValidationError(
('%(product)s n\'est pas un demi'),
params={'product': product},
)
def isGalopin(id):
product = Product.objects.get(id=id)
if product.draft_category != Product.DRAFT_GALOPIN:
raise ValidationError(
('%(product)s n\'est pas un galopin'),
params={'product': product},
)
class Keg(models.Model):
"""
Stores a keg.
"""
class Meta:
verbose_name = "Fût"
permissions = (
("open_keg", "Peut percuter les fûts"),
("close_keg", "Peut fermer les fûts")
)
name = models.CharField(max_length=255, unique=True, verbose_name="Nom")
"""
The name of the keg.
"""
stockHold = models.IntegerField(default=0, verbose_name="Stock en soute")
"""
The number of this keg in the hold.
"""
amount = models.DecimalField(max_digits=7, decimal_places=2, verbose_name="Prix du fût", validators=[MinValueValidator(0)])
"""
The price of the keg.
"""
capacity = models.IntegerField(default=30, verbose_name="Capacité (L)")
"""
The capacity, in liters, of the keg.
"""
pinte = models.ForeignKey(Product, on_delete=models.PROTECT, related_name="futp", validators=[isPinte])
"""
The related :class:`~gestion.models.Product` for pint.
"""
demi = models.ForeignKey(Product, on_delete=models.PROTECT, related_name="futd", validators=[isDemi])
"""
The related :class:`~gestion.models.Product` for demi.
"""
galopin = models.ForeignKey(Product, on_delete=models.PROTECT, related_name="futg", validators=[isGalopin],null=True, blank=True)
"""
The related :class:`~gestion.models.Product` for galopin.
"""
is_active = models.BooleanField(default=False, verbose_name="Actif")
"""
If True, will be displayed on :func:`~gestion.views.manage` view
"""
deg = models.DecimalField(default=0,max_digits=5, decimal_places=2, verbose_name="Degré", validators=[MinValueValidator(0)])
history = HistoricalRecords()
def __str__(self):
return self.name
class KegHistory(models.Model):
"""
Stores a keg history, related to :class:`~gestion.models.Keg`.
"""
class Meta:
verbose_name = "Historique de fût"
keg = models.ForeignKey(Keg, on_delete=models.PROTECT, verbose_name="Fût")
"""
The :class:`~gestion.models.Keg` instance.
"""
openingDate = models.DateTimeField(auto_now_add=True, verbose_name="Date ouverture")
"""
The date when the keg was opened.
"""
quantitySold = models.DecimalField(decimal_places=2, max_digits=5, default=0, verbose_name="Quantité vendue")
"""
The quantity, in liters, sold.
"""
amountSold = models.DecimalField(decimal_places=2, max_digits=5, default=0, verbose_name="Somme vendue")
"""
The quantity, in euros, sold.
"""
closingDate = models.DateTimeField(null=True, blank=True, verbose_name="Date fermeture")
"""
The date when the keg was closed
"""
isCurrentKegHistory = models.BooleanField(default=True, verbose_name="Actuel")
"""
If True, it corresponds to the current Keg history of :class:`~gestion.models.Keg` instance.
"""
history = HistoricalRecords()
def __str__(self):
res = "Fût de " + str(self.keg) + " (" + str(self.openingDate) + " - "
if(self.closingDate):
res += str(self.closingDate) + ")"
else:
res += "?)"
return res
class Reload(models.Model):
"""
Stores reloads.
"""
class Meta:
verbose_name = "Rechargement"
| customer = models.ForeignKey(User, on_delete=models.PROTECT, related_name="reload_taken", verbose_name="Client")
"""
Client (:class:`django.contrib.auth.models.User`).
"""
amount = models.DecimalField(max_digits=7, decimal_places=2, verbose_name="Montant", validators=[MinValueValidator(0)])
"""
Amount of the reload.
"""
PaymentMethod = models.ForeignKey(PaymentMethod, on_delete=models.PROTECT, verbose_name="Moyen de paiement")
"""
:class:`Payment Method <preferences.models.PaymentMethod>` of the reload.
"""
coopeman = models.ForeignKey(User, on_delete=models.PROTECT, related_name="reload_realized")
"""
Coopeman (:class:`django.contrib.auth.models.User`) who collected the reload.
"""
date = models.DateTimeField(auto_now_add=True)
"""
Date of the reload.
"""
history = HistoricalRecords()
def __str__(self):
return "Rechargement effectue par {0} le {1} ({2} euros, coopeman : {3})".format(self.customer, self.date, self.amount, self.coopeman)
class Refund(models.Model):
"""
Stores refunds.
"""
class Meta:
verbose_name = "Remboursement"
date = models.DateTimeField(auto_now_add=True)
"""
Date of the refund
"""
customer = models.ForeignKey(User, on_delete=models.PROTECT, related_name="refund_taken", verbose_name="Client")
"""
Client (:class:`django.contrib.auth.models.User`).
"""
amount = models.DecimalField(max_digits=7, decimal_places=2, verbose_name="Montant", validators=[MinValueValidator(0)])
"""
Amount of the refund.
"""
coopeman = models.ForeignKey(User, on_delete=models.PROTECT, related_name="refund_realized")
"""
Coopeman (:class:`django.contrib.auth.models.User`) who realized the refund.
"""
history = HistoricalRecords()
def __str__(self):
return "{0} remboursé de {1} le {2} (effectué par {3})".format(self.customer, self.amount, self.date, self.coopeman)
class Menu(models.Model):
"""
Stores menus.
"""
name = models.CharField(max_length=255, verbose_name="Nom")
"""
Name of the menu.
"""
amount = models.DecimalField(max_digits=7, decimal_places=2, verbose_name="Montant", validators=[MinValueValidator(0)])
"""
Price of the menu.
"""
articles = models.ManyToManyField(Product, verbose_name="Produits")
"""
Stores :class:`Products <gestion.models.Product>` contained in the menu
"""
is_active = models.BooleanField(default=False, verbose_name="Actif")
"""
If True, the menu will be displayed on the :func:`gestion.views.manage` view
"""
history = HistoricalRecords()
def __str__(self):
return self.name
@property
def adherent_required(self):
"""
Test if the menu contains a restricted :class:`~gestion.models.Product`
"""
res = False
for article in self.articles.all():
res = res or article.adherentRequired
return res
class MenuHistory(models.Model):
"""
Stores MenuHistory related to :class:`~gestion.models.Menu`.
"""
class Meta:
verbose_name = "Historique de menu"
customer = models.ForeignKey(User, on_delete=models.PROTECT, related_name="menu_taken", verbose_name="Client")
quantity = models.PositiveIntegerField(default=0, verbose_name="Quantité")
"""
Client (:class:`django.contrib.auth.models.User`).
"""
paymentMethod = models.ForeignKey(PaymentMethod, on_delete=models.PROTECT, verbose_name="Moyen de paiement")
"""
:class:`Payment Method <preferences.models.PaymentMethod>` of the Menu purchased.
"""
date = models.DateTimeField(auto_now_add=True)
"""
Date of the purhcase.
"""
menu = models.ForeignKey(Menu, on_delete=models.PROTECT)
"""
:class:`gestion.models.Menu` purchased.
"""
amount = models.DecimalField(max_digits=7, decimal_places=2, default=0, verbose_name="Montant")
"""
Price of the purchase.
"""
coopeman = models.ForeignKey(User, on_delete=models.PROTECT, related_name="menu_selled")
"""
Coopeman (:class:django.contrib.auth.models.User`) who collected the money.
"""
history = HistoricalRecords()
def __str__(self):
return "{2} a consommé {0} {1}".format(self.quantity, self.menu, self.customer)
class ConsumptionHistory(models.Model):
"""
Stores consumption history related to Product
"""
class Meta:
verbose_name = "Consommation"
customer = models.ForeignKey(User, on_delete=models.PROTECT, related_name="consumption_taken", verbose_name="Client")
"""
Client (:class:`django.contrib.auth.models.User`).
"""
quantity = models.PositiveIntegerField(default=0, verbose_name="Quantité")
"""
Quantity of :attr:`gestion.models.ConsumptionHistory.product` taken.
"""
paymentMethod = models.ForeignKey(PaymentMethod, on_delete=models.PROTECT, verbose_name="Moyen de paiement")
"""
:class:`Payment Method <preferences.models.PaymentMethod>` of the product purchased.
"""
date = models.DateTimeField(auto_now_add=True)
"""
Date of the purhcase.
"""
product = models.ForeignKey(Product, on_delete=models.PROTECT, verbose_name="Produit")
"""
:class:`gestion.models.product` purchased.
"""
amount = models.DecimalField(max_digits=7, decimal_places=2, default=0, verbose_name="Montant")
"""
Price of the purchase.
"""
coopeman = models.ForeignKey(User, on_delete=models.PROTECT, related_name="consumption_selled")
"""
Coopeman (:class:django.contrib.auth.models.User`) who collected the money.
"""
history = HistoricalRecords()
def __str__(self):
return "{0} {1} consommé par {2} le {3} (encaissé par {4})".format(self.quantity, self.product, self.customer, self.date, self.coopeman)
class Consumption(models.Model):
"""
Stores total consumptions.
"""
class Meta:
verbose_name = "Consommation totale"
customer = models.ForeignKey(User, on_delete=models.PROTECT, related_name="consumption_global_taken", verbose_name="Client")
"""
Client (:class:`django.contrib.auth.models.User`).
"""
product = models.ForeignKey(Product, on_delete=models.PROTECT, verbose_name="Produit")
"""
A :class:`gestion.models.Product` instance.
"""
quantity = models.PositiveIntegerField(default=0, verbose_name="Quantité")
"""
The total number of :attr:`gestion.models.Consumption.product` consumed by the :attr:`gestion.models.Consumption.consumer`.
"""
history = HistoricalRecords()
def __str__(self):
return "Consommation de " + str(self.customer) + " concernant le produit " + str(self.product)
class Pinte(models.Model):
"""
Stores a physical pinte
"""
current_owner = models.ForeignKey(User, on_delete=models.PROTECT, null=True, default=None, related_name="pintes_owned_currently")
"""
The current owner (:class:`django.contrib.auth.models.User`).
"""
previous_owner = models.ForeignKey(User, on_delete=models.PROTECT, null=True, default=None, related_name="pintes_owned_previously")
"""
The previous owner (:class:`django.contrib.auth.models.User`).
"""
last_update_date = models.DateTimeField(auto_now=True)
"""
The last update date
"""
history = HistoricalRecords() | |
log.py | """
Logging module for printing status during an exploit, and internally
within ``pwntools``.
Exploit Developers
------------------
By using the standard ``from pwn import *``, an object named ``log`` will
be inserted into the global namespace. You can use this to print out
status messages during exploitation.
For example,::
log.info('Hello, world!')
prints::
[*] Hello, world!
Additionally, there are some nifty mechanisms for performing status updates
on a running job (e.g. when brute-forcing).::
p = log.progress('Working')
p.status('Reticulating splines')
time.sleep(1)
p.success('Got a shell!')
The verbosity of logging can be most easily controlled by setting
``log_level`` on the global ``context`` object.::
log.info("No you see me")
context.log_level = 'error'
log.info("Now you don't")
The purpose of this attribute is to control what gets printed to the screen,
not what gets emitted. This means that you can put all logging events into
a log file, while only wanting to see a small subset of them on your screen.
Pwnlib Developers
-----------------
A module-specific logger can be imported into the module via::
from pwnlib.log import getLogger
log = getLogger(__name__)
This provides an easy way to filter logging programmatically
or via a configuration file for debugging.
When using ``progress``, you should use the ``with``
keyword to manage scoping, to ensure the spinner stops if an
exception is thrown.
Technical details
-----------------
Familiarity with the :mod:`logging` module is assumed.
A pwnlib root logger named 'pwnlib' is created and a custom handler and
formatter is installed for it. The handler determines its logging level from
:data:`context.log_level`.
Ideally :data:`context.log_level` should only affect which records will be
emitted by the handler such that e.g. logging to a file will not be changed by
it. But for performance reasons it is not feasible log everything in the normal
case. In particular there are tight loops inside :mod:`pwnlib.tubes.tube`, which
we would like to be able to debug, but if we are not debugging them, they should
not spit out messages (even to a log file). For this reason there are a few places
inside pwnlib, that will not even emit a record without :data:`context.log_level`
being set to `logging.DEBUG` or below.
Log records created by ``Progress`` and ``Logger`` objects will set
``'pwnlib_msgtype'`` on the ``extra`` field to signal which kind of message was
generated. This information is used by the formatter to prepend a symbol to the
message, e.g. ``'[+] '`` in ``'[+] got a shell!'``
This field is ignored when using the ``logging`` module's standard formatters.
All status updates (which are not dropped due to throttling) on progress loggers
result in a log record being created. The ``extra`` field then carries a
reference to the ``Progress`` logger as ``'pwnlib_progress'``.
If the custom handler determines that :data:`term.term_mode` is enabled, log
records that have a ``'pwnlib_progess'`` in their ``extra`` field will not
result in a message being emitted but rather an animated progress line (with a
spinner!) being created. Note that other handlers will still see a meaningful
log record.
The custom handler will only handle log records whith a level of at least
:data:`context.log_level`. Thus if e.g. the level for the
``'pwnlib.tubes.ssh'`` is set to ``'DEBUG'`` no additional output will show up
unless :data:`context.log_level` is also set to ``'DEBUG'``. Other handlers
will however see the extra log records generated by the ``'pwnlib.tubes.ssh'``
logger.
"""
from __future__ import absolute_import
from __future__ import division
import logging
import os
import random
import re
import six
import sys
import threading
import time
from pwnlib import term
from pwnlib.config import register_config
from pwnlib.context import Thread
from pwnlib.context import context
from pwnlib.exception import PwnlibException
from pwnlib.term import spinners
from pwnlib.term import text
__all__ = [
'getLogger', 'install_default_handler', 'rootlogger'
]
# list of prefixes to use for the different message types. note that the `text`
# module won't add any escape codes if `pwnlib.context.log_console.isatty()` is `False`
_msgtype_prefixes = {
'status' : [text.magenta, 'x'],
'success' : [text.bold_green, '+'],
'failure' : [text.bold_red, '-'],
'debug' : [text.bold_red, 'DEBUG'],
'info' : [text.bold_blue, '*'],
'warning' : [text.bold_yellow, '!'],
'error' : [text.on_red, 'ERROR'],
'exception' : [text.on_red, 'ERROR'],
'critical' : [text.on_red, 'CRITICAL'],
'info_once' : [text.bold_blue, '*'],
'warning_once' : [text.bold_yellow, '!'],
}
def read_log_config(settings):
log = getLogger(__name__)
for key, value in settings.items():
if '.' not in key:
log.warn("Invalid configuration option %r in section %r" % (key, 'log'))
continue
msgtype, key = key.split('.', 1)
if key == 'color':
current = _msgtype_prefixes[msgtype][0]
_msgtype_prefixes[msgtype][0] = getattr(text, value, current)
elif key == 'symbol':
_msgtype_prefixes[msgtype][1] = value
else:
log.warn("Unknown configuration option %r in section %r" % (key, 'log'))
register_config('log', read_log_config)
# the text decoration to use for spinners. the spinners themselves can be found
# in the `pwnlib.term.spinners` module
_spinner_style = text.bold_blue
class Progress(object):
"""
Progress logger used to generate log records associated with some running
job. Instances can be used as context managers which will automatically
declare the running job a success upon exit or a failure upon a thrown
exception. After :meth:`success` or :meth:`failure` is called the status
can no longer be updated.
This class is intended for internal use. Progress loggers should be created
using :meth:`Logger.progress`.
"""
def __init__(self, logger, msg, status, level, args, kwargs):
self._logger = logger
self._msg = msg
self._status = status
self._level = level
self._stopped = False
self.last_status = 0
self.rate = kwargs.pop('rate', 0)
self._log(status, args, kwargs, 'status')
# it is a common use case to create a logger and then immediately update
# its status line, so we reset `last_status` to accommodate this pattern
self.last_status = 0
def _log(self, status, args, kwargs, msgtype):
# Logs are strings, not bytes. Handle Python3 bytes() objects.
status = six.ensure_text(status)
# this progress logger is stopped, so don't generate any more records
if self._stopped:
return
msg = self._msg
if msg and status:
msg += ': '
msg += status
self._logger._log(self._level, msg, args, kwargs, msgtype, self)
def status(self, status, *args, **kwargs):
"""status(status, *args, **kwargs)
Logs a status update for the running job.
If the progress logger is animated the status line will be updated in
place.
Status updates are throttled at one update per 100ms.
"""
now = time.time()
if (now - self.last_status) > self.rate:
self.last_status = now
self._log(status, args, kwargs, 'status')
def success(self, status = 'Done', *args, **kwargs):
"""success(status = 'Done', *args, **kwargs)
Logs that the running job succeeded. No further status updates are
allowed.
If the Logger is animated, the animation is stopped.
"""
self._log(status, args, kwargs, 'success')
self._stopped = True
def failure(self, status = 'Failed', *args, **kwargs):
"""failure(message)
Logs that the running job failed. No further status updates are
allowed.
If the Logger is animated, the animation is stopped.
"""
self._log(status, args, kwargs, 'failure')
self._stopped = True
def __enter__(self):
return self
def __exit__(self, exc_typ, exc_val, exc_tb):
# if the progress logger is already stopped these are no-ops
if exc_typ is None:
self.success()
else:
self.failure()
class Logger(object):
"""
A class akin to the :class:`logging.LoggerAdapter` class. All public
methods defined on :class:`logging.Logger` instances are defined on this
class.
Also adds some ``pwnlib`` flavor:
* :meth:`progress` (alias :meth:`waitfor`)
* :meth:`success`
* :meth:`failure`
* :meth:`indented`
* :meth:`info_once`
* :meth:`warning_once` (alias :meth:`warn_once`)
Adds ``pwnlib``-specific information for coloring, indentation and progress
logging via log records ``extra`` field.
Loggers instantiated with :func:`getLogger` will be of this class.
"""
_one_time_infos = set()
_one_time_warnings = set()
def __init__(self, logger=None):
if logger is None:
# This is a minor hack to permit user-defined classes which inherit
# from a tube (which do not actually reside in the pwnlib library)
# to receive logging abilities that behave as they would expect from
# the rest of the library
module = self.__module__
if not module.startswith('pwnlib'):
module = 'pwnlib.' + module
# - end hack -
logger_name = '%s.%s.%s' % (module, self.__class__.__name__, id(self))
logger = logging.getLogger(logger_name)
self._logger = logger
def _getlevel(self, levelString):
if isinstance(levelString, six.integer_types):
return levelString
return logging._levelNames[levelString.upper()]
def _log(self, level, msg, args, kwargs, msgtype, progress = None):
# Logs are strings, not bytes. Handle Python3 bytes() objects.
msg = six.ensure_text(msg)
extra = kwargs.get('extra', {})
extra.setdefault('pwnlib_msgtype', msgtype)
extra.setdefault('pwnlib_progress', progress)
kwargs['extra'] = extra
self._logger.log(level, msg, *args, **kwargs)
def progress(self, message, status = '', *args, **kwargs):
"""progress(message, status = '', *args, level = logging.INFO, **kwargs) -> Progress
Creates a new progress logger which creates log records with log level
`level`.
Progress status can be updated using :meth:`Progress.status` and stopped
using :meth:`Progress.success` or :meth:`Progress.failure`.
If `term.term_mode` is enabled the progress logger will be animated.
The progress manager also functions as a context manager. Using context
managers ensures that animations stop even if an exception is raised.
.. code-block:: python
with log.progress('Trying something...') as p:
for i in range(10):
p.status("At %i" % i)
time.sleep(0.5)
x = 1/0
"""
level = self._getlevel(kwargs.pop('level', logging.INFO))
return Progress(self, message, status, level, args, kwargs)
def waitfor(self, *args, **kwargs):
"""Alias for :meth:`progress`."""
return self.progress(*args, **kwargs)
def indented(self, message, *args, **kwargs):
"""indented(message, *args, level = logging.INFO, **kwargs)
Log a message but don't put a line prefix on it.
Arguments:
level(int): Alternate log level at which to set the indented
message. Defaults to :const:`logging.INFO`.
"""
level = self._getlevel(kwargs.pop('level', logging.INFO))
self._log(level, message, args, kwargs, 'indented')
def success(self, message, *args, **kwargs):
"""success(message, *args, **kwargs)
Logs a success message.
"""
self._log(logging.INFO, message, args, kwargs, 'success')
def failure(self, message, *args, **kwargs):
"""failure(message, *args, **kwargs)
Logs a failure message.
"""
self._log(logging.INFO, message, args, kwargs, 'failure')
def info_once(self, message, *args, **kwargs):
"""info_once(message, *args, **kwargs)
Logs an info message. The same message is never printed again.
"""
m = message % args
if m not in self._one_time_infos:
if self.isEnabledFor(logging.INFO):
self._one_time_infos.add(m)
self._log(logging.INFO, message, args, kwargs, 'info_once')
def warning_once(self, message, *args, **kwargs):
"""warning_once(message, *args, **kwargs)
Logs a warning message. The same message is never printed again.
"""
m = message % args
if m not in self._one_time_warnings:
if self.isEnabledFor(logging.WARNING):
self._one_time_warnings.add(m)
self._log(logging.WARNING, message, args, kwargs, 'warning_once')
def warn_once(self, *args, **kwargs):
"""Alias for :meth:`warning_once`."""
return self.warning_once(*args, **kwargs)
# logging functions also exposed by `logging.Logger`
def debug(self, message, *args, **kwargs):
"""debug(message, *args, **kwargs)
Logs a debug message.
"""
self._log(logging.DEBUG, message, args, kwargs, 'debug')
def info(self, message, *args, **kwargs):
"""info(message, *args, **kwargs)
Logs an info message.
"""
self._log(logging.INFO, message, args, kwargs, 'info')
def hexdump(self, message, *args, **kwargs):
# cyclic dependencies FTW!
# TODO: Move pwnlib.util.fiddling.hexdump into a new module.
import pwnlib.util.fiddling
self.info(pwnlib.util.fiddling.hexdump(message, *args, **kwargs))
def warning(self, message, *args, **kwargs):
"""warning(message, *args, **kwargs)
Logs a warning message.
"""
self._log(logging.WARNING, message, args, kwargs, 'warning')
def warn(self, *args, **kwargs):
"""Alias for :meth:`warning`."""
return self.warning(*args, **kwargs)
def error(self, message, *args, **kwargs):
"""error(message, *args, **kwargs)
To be called outside an exception handler.
Logs an error message, then raises a ``PwnlibException``.
"""
self._log(logging.ERROR, message, args, kwargs, 'error')
raise PwnlibException(message % args)
def exception(self, message, *args, **kwargs):
"""exception(message, *args, **kwargs)
To be called from an exception handler.
Logs a error message, then re-raises the current exception.
"""
kwargs["exc_info"] = 1
self._log(logging.ERROR, message, args, kwargs, 'exception')
raise
def critical(self, message, *args, **kwargs):
"""critical(message, *args, **kwargs)
Logs a critical message.
"""
self._log(logging.CRITICAL, message, args, kwargs, 'critical')
def log(self, level, message, *args, **kwargs):
"""log(level, message, *args, **kwargs)
Logs a message with log level `level`. The ``pwnlib`` formatter will
use the default :mod:`logging` formater to format this message.
"""
self._log(level, message, args, kwargs, None)
def isEnabledFor(self, level):
"""isEnabledFor(level) -> bool
See if the underlying logger is enabled for the specified level.
"""
effectiveLevel = self._logger.getEffectiveLevel()
if effectiveLevel == 1:
effectiveLevel = context.log_level
return effectiveLevel <= level
def setLevel(self, level):
"""setLevel(level)
Set the logging level for the underlying logger.
"""
with context.local(log_level=level):
self._logger.setLevel(context.log_level)
def addHandler(self, handler):
"""addHandler(handler)
Add the specified handler to the underlying logger.
"""
self._logger.addHandler(handler)
def removeHandler(self, handler):
"""removeHandler(handler)
Remove the specified handler from the underlying logger.
"""
self._logger.removeHandler(handler)
@property
def level(self):
return self._logger.level
@level.setter
def level(self, value):
with context.local(log_level=value):
self._logger.level = context.log_level
class Handler(logging.StreamHandler):
"""
A custom handler class. This class will report whatever
:data:`context.log_level` is currently set to as its log level.
If :data:`term.term_mode` is enabled log records originating from a progress
logger will not be emitted but rather an animated progress line will be
created.
An instance of this handler is added to the ``'pwnlib'`` logger.
"""
@property
def stream(self):
return context.log_console
@stream.setter
def stream(self, value):
pass
def emit(self, record):
"""
Emit a log record or create/update an animated progress logger
depending on whether :data:`term.term_mode` is enabled.
"""
# We have set the root 'pwnlib' logger to have a logLevel of 1,
# when logging has been enabled via install_default_handler.
#
# If the level is 1, we should only process the record if
# context.log_level is less than the record's log level.
#
# If the level is not 1, somebody else expressly set the log
# level somewhere on the tree, and we should use that value.
level = logging.getLogger(record.name).getEffectiveLevel()
if level == 1:
level = context.log_level
if level > record.levelno:
return
progress = getattr(record, 'pwnlib_progress', None)
# if the record originates from a `Progress` object and term handling
# is enabled we can have animated spinners! so check that
if progress is None or not term.term_mode:
super(Handler, self).emit(record)
return
# yay, spinners!
# since we want to be able to update the spinner we overwrite the
# message type so that the formatter doesn't output a prefix symbol
msgtype = record.pwnlib_msgtype
record.pwnlib_msgtype = 'animated'
msg = "%s\n" % self.format(record)
# we enrich the `Progress` object to keep track of the spinner
if not hasattr(progress, '_spinner_handle'):
spinner_handle = term.output('')
msg_handle = term.output(msg)
stop = threading.Event()
def spin():
'''Wheeeee!'''
state = 0
states = random.choice(spinners.spinners)
while True:
prefix = '[%s] ' % _spinner_style(states[state])
spinner_handle.update(prefix)
state = (state + 1) % len(states)
if stop.wait(0.1):
break
t = Thread(target = spin)
t.daemon = True
t.start()
progress._spinner_handle = spinner_handle
progress._msg_handle = msg_handle
progress._stop_event = stop
progress._spinner_thread = t
else:
progress._msg_handle.update(msg)
# if the message type was not a status message update, then we should
# stop the spinner
if msgtype != 'status':
progress._stop_event.set()
progress._spinner_thread.join()
style, symb = _msgtype_prefixes[msgtype]
prefix = '[%s] ' % style(symb)
progress._spinner_handle.update(prefix)
class Formatter(logging.Formatter):
"""
Logging formatter which performs custom formatting for log records
containing the ``'pwnlib_msgtype'`` attribute. Other records are formatted
using the `logging` modules default formatter.
If ``'pwnlib_msgtype'`` is set, it performs the following actions:
* A prefix looked up in `_msgtype_prefixes` is prepended to the message.
* The message is prefixed such that it starts on column four.
* If the message spans multiple lines they are split, and all subsequent
lines are indented.
This formatter is used by the handler installed on the ``'pwnlib'`` logger.
"""
# Indentation from the left side of the terminal.
# All log messages will be indented at list this far.
indent = ' '
# Newline, followed by an indent. Used to wrap multiple lines.
nlindent = '\n' + indent
def format(self, record):
# use the default formatter to actually format the record
msg = super(Formatter, self).format(record)
# then put on a prefix symbol according to the message type
msgtype = getattr(record, 'pwnlib_msgtype', None)
# if 'pwnlib_msgtype' is not set (or set to `None`) we just return the
# message as it is
if msgtype is None:
return msg
if msgtype in _msgtype_prefixes:
style, symb = _msgtype_prefixes[msgtype]
prefix = '[%s] ' % style(symb)
elif msgtype == 'indented':
prefix = self.indent
elif msgtype == 'animated':
# the handler will take care of updating the spinner, so we will
# not include it here
prefix = ''
else:
# this should never happen
prefix = '[?] '
msg = prefix + msg
msg = self.nlindent.join(msg.splitlines())
return msg
# we keep a dictionary of loggers such that multiple calls to `getLogger` with
# the same name will return the same logger
def getLogger(name):
return Logger(logging.getLogger(name))
class LogfileHandler(logging.FileHandler):
def __init__(self):
super(LogfileHandler, self).__init__('', delay=1)
@property
def stream(self):
return context.log_file
@stream.setter
def stream(self, value):
pass
def handle(self, *a, **kw):
if self.stream.name is not None:
super(LogfileHandler, self).handle(*a, **kw)
iso_8601 = '%Y-%m-%dT%H:%M:%S'
fmt = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
log_file = LogfileHandler()
log_file.setFormatter(logging.Formatter(fmt, iso_8601))
#
# The root 'pwnlib' logger is declared here. To change the target of all
# 'pwntools'-specific logging, only this logger needs to be changed.
#
# Logging cascades upward through the hierarchy,
# so the only point that should ever need to be
# modified is the root 'pwnlib' logger.
#
# For example:
# map(rootlogger.removeHandler, rootlogger.handlers)
# logger.addHandler(myCoolPitchingHandler)
#
rootlogger = getLogger('pwnlib')
console = Handler()
formatter = Formatter()
console.setFormatter(formatter)
def install_default_handler():
| '''install_default_handler()
Instantiates a :class:`Handler` and :class:`Formatter` and installs them for
the ``pwnlib`` root logger. This function is automatically called from when
importing :mod:`pwn`.
'''
logger = logging.getLogger('pwnlib')
if console not in logger.handlers:
logger.addHandler(console)
logger.addHandler(log_file)
logger.setLevel(1) |
|
operations.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::pipeline::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::TokenCredential>, scopes: Vec<String>) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::pipeline::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn application(&self) -> application::Client {
application::Client(self.clone())
}
pub fn application_package(&self) -> application_package::Client {
application_package::Client(self.clone())
}
pub fn batch_account(&self) -> batch_account::Client {
batch_account::Client(self.clone())
}
pub fn certificate(&self) -> certificate::Client {
certificate::Client(self.clone())
}
pub fn location(&self) -> location::Client {
location::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn pool(&self) -> pool::Client {
pool::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
BatchAccount_Get(#[from] batch_account::get::Error),
#[error(transparent)]
BatchAccount_Create(#[from] batch_account::create::Error),
#[error(transparent)]
BatchAccount_Update(#[from] batch_account::update::Error),
#[error(transparent)]
BatchAccount_Delete(#[from] batch_account::delete::Error),
#[error(transparent)]
BatchAccount_List(#[from] batch_account::list::Error),
#[error(transparent)]
BatchAccount_ListByResourceGroup(#[from] batch_account::list_by_resource_group::Error),
#[error(transparent)]
BatchAccount_SynchronizeAutoStorageKeys(#[from] batch_account::synchronize_auto_storage_keys::Error),
#[error(transparent)]
BatchAccount_RegenerateKey(#[from] batch_account::regenerate_key::Error),
#[error(transparent)]
BatchAccount_GetKeys(#[from] batch_account::get_keys::Error),
#[error(transparent)]
ApplicationPackage_Activate(#[from] application_package::activate::Error),
#[error(transparent)]
Application_Get(#[from] application::get::Error),
#[error(transparent)]
Application_Create(#[from] application::create::Error),
#[error(transparent)]
Application_Update(#[from] application::update::Error),
#[error(transparent)]
Application_Delete(#[from] application::delete::Error),
#[error(transparent)]
ApplicationPackage_Get(#[from] application_package::get::Error),
#[error(transparent)]
ApplicationPackage_Create(#[from] application_package::create::Error),
#[error(transparent)]
ApplicationPackage_Delete(#[from] application_package::delete::Error),
#[error(transparent)]
Application_List(#[from] application::list::Error),
#[error(transparent)]
Location_GetQuotas(#[from] location::get_quotas::Error),
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
Location_CheckNameAvailability(#[from] location::check_name_availability::Error),
#[error(transparent)]
Certificate_ListByBatchAccount(#[from] certificate::list_by_batch_account::Error),
#[error(transparent)]
Certificate_Get(#[from] certificate::get::Error),
#[error(transparent)]
Certificate_Create(#[from] certificate::create::Error),
#[error(transparent)]
Certificate_Update(#[from] certificate::update::Error),
#[error(transparent)]
Certificate_Delete(#[from] certificate::delete::Error),
#[error(transparent)]
Certificate_CancelDeletion(#[from] certificate::cancel_deletion::Error),
#[error(transparent)]
Pool_ListByBatchAccount(#[from] pool::list_by_batch_account::Error),
#[error(transparent)]
Pool_Get(#[from] pool::get::Error),
#[error(transparent)]
Pool_Create(#[from] pool::create::Error),
#[error(transparent)]
Pool_Update(#[from] pool::update::Error),
#[error(transparent)]
Pool_Delete(#[from] pool::delete::Error),
#[error(transparent)]
Pool_DisableAutoScale(#[from] pool::disable_auto_scale::Error),
#[error(transparent)]
Pool_StopResize(#[from] pool::stop_resize::Error),
}
pub mod batch_account {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
parameters: impl Into<models::BatchAccountCreateParameters>,
subscription_id: impl Into<String>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
parameters: impl Into<models::BatchAccountUpdateParameters>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn synchronize_auto_storage_keys(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> synchronize_auto_storage_keys::Builder {
synchronize_auto_storage_keys::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn regenerate_key(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
parameters: impl Into<models::BatchAccountRegenerateKeyParameters>,
subscription_id: impl Into<String>,
) -> regenerate_key::Builder {
regenerate_key::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
#[doc = "Gets the account keys for the specified Batch account."]
pub fn get_keys(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get_keys::Builder {
get_keys::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BatchAccount, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchAccount =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::BatchAccount),
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) parameters: models::BatchAccountCreateParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchAccount =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) parameters: models::BatchAccountUpdateParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BatchAccount, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchAccount =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BatchAccountListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Batch/batchAccounts",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchAccountListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BatchAccountListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchAccountListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod synchronize_auto_storage_keys {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/syncAutoStorageKeys",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod regenerate_key {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) parameters: models::BatchAccountRegenerateKeyParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BatchAccountKeys, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/regenerateKeys",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchAccountKeys =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get_keys {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BatchAccountKeys, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/listKeys",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchAccountKeys =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod application_package {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn activate(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
version: impl Into<String>,
parameters: impl Into<models::ActivateApplicationPackageParameters>,
subscription_id: impl Into<String>,
) -> activate::Builder {
activate::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
version: version.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
version: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
version: version.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
version: impl Into<String>,
subscription_id: impl Into<String>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
version: version.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
version: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
version: version.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod activate {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) version: String,
pub(crate) parameters: models::ActivateApplicationPackageParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}/versions/{}/activate" , self . client . endpoint () , & self . subscription_id , & self . resource_group_name , & self . account_name , & self . application_id , & self . version) ;
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct | {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) version: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ApplicationPackage, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}/versions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.application_id,
&self.version
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ApplicationPackage =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) version: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ApplicationPackage, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}/versions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.application_id,
&self.version
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ApplicationPackage =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) version: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}/versions/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.application_id,
&self.version
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod application {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
subscription_id: subscription_id.into(),
parameters: None,
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
parameters: impl Into<models::ApplicationUpdateParameters>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
application_id: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
application_id: application_id.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
subscription_id: subscription_id.into(),
maxresults: None,
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Application, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.application_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Application =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) subscription_id: String,
pub(crate) parameters: Option<models::ApplicationCreateParameters>,
}
impl Builder {
pub fn parameters(mut self, parameters: impl Into<models::ApplicationCreateParameters>) -> Self {
self.parameters = Some(parameters.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Application, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.application_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(parameters) = &self.parameters {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(parameters).map_err(Error::Serialize)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Application =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) parameters: models::ApplicationUpdateParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.application_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) application_id: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<(), Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.application_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::NO_CONTENT => Ok(()),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) subscription_id: String,
pub(crate) maxresults: Option<i32>,
}
impl Builder {
pub fn maxresults(mut self, maxresults: i32) -> Self {
self.maxresults = Some(maxresults);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ListApplicationsResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/applications",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(maxresults) = &self.maxresults {
url.query_pairs_mut().append_pair("maxresults", &maxresults.to_string());
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ListApplicationsResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod location {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get_quotas(&self, location_name: impl Into<String>, subscription_id: impl Into<String>) -> get_quotas::Builder {
get_quotas::Builder {
client: self.0.clone(),
location_name: location_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn check_name_availability(
&self,
location_name: impl Into<String>,
subscription_id: impl Into<String>,
parameters: impl Into<models::CheckNameAvailabilityParameters>,
) -> check_name_availability::Builder {
check_name_availability::Builder {
client: self.0.clone(),
location_name: location_name.into(),
subscription_id: subscription_id.into(),
parameters: parameters.into(),
}
}
}
pub mod get_quotas {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::BatchLocationQuota, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Batch/locations/{}/quotas",
self.client.endpoint(),
&self.subscription_id,
&self.location_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::BatchLocationQuota =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod check_name_availability {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) location_name: String,
pub(crate) subscription_id: String,
pub(crate) parameters: models::CheckNameAvailabilityParameters,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::CheckNameAvailabilityResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Batch/locations/{}/checkNameAvailability",
self.client.endpoint(),
&self.subscription_id,
&self.location_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CheckNameAvailabilityResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod operations {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationListResult, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.Batch/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod certificate {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_by_batch_account(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_batch_account::Builder {
list_by_batch_account::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
subscription_id: subscription_id.into(),
maxresults: None,
select: None,
filter: None,
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
certificate_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
certificate_name: certificate_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
certificate_name: impl Into<String>,
parameters: impl Into<models::CertificateCreateOrUpdateParameters>,
subscription_id: impl Into<String>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
certificate_name: certificate_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
if_match: None,
if_none_match: None,
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
certificate_name: impl Into<String>,
parameters: impl Into<models::CertificateCreateOrUpdateParameters>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
certificate_name: certificate_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
if_match: None,
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
certificate_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
certificate_name: certificate_name.into(),
subscription_id: subscription_id.into(),
}
}
#[doc = "Cancels a failed deletion of a certificate from the specified account."]
pub fn cancel_deletion(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
certificate_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> cancel_deletion::Builder {
cancel_deletion::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
certificate_name: certificate_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list_by_batch_account {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) subscription_id: String,
pub(crate) maxresults: Option<i32>,
pub(crate) select: Option<String>,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn maxresults(mut self, maxresults: i32) -> Self {
self.maxresults = Some(maxresults);
self
}
pub fn select(mut self, select: impl Into<String>) -> Self {
self.select = Some(select.into());
self
}
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ListCertificatesResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/certificates",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(maxresults) = &self.maxresults {
url.query_pairs_mut().append_pair("maxresults", &maxresults.to_string());
}
if let Some(select) = &self.select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ListCertificatesResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) certificate_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Certificate, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/certificates/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.certificate_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Certificate =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) certificate_name: String,
pub(crate) parameters: models::CertificateCreateOrUpdateParameters,
pub(crate) subscription_id: String,
pub(crate) if_match: Option<String>,
pub(crate) if_none_match: Option<String>,
}
impl Builder {
pub fn if_match(mut self, if_match: impl Into<String>) -> Self {
self.if_match = Some(if_match.into());
self
}
pub fn if_none_match(mut self, if_none_match: impl Into<String>) -> Self {
self.if_none_match = Some(if_none_match.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Certificate, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/certificates/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.certificate_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
if let Some(if_match) = &self.if_match {
req_builder = req_builder.header("If-Match", if_match);
}
if let Some(if_none_match) = &self.if_none_match {
req_builder = req_builder.header("If-None-Match", if_none_match);
}
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Certificate =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) certificate_name: String,
pub(crate) parameters: models::CertificateCreateOrUpdateParameters,
pub(crate) subscription_id: String,
pub(crate) if_match: Option<String>,
}
impl Builder {
pub fn if_match(mut self, if_match: impl Into<String>) -> Self {
self.if_match = Some(if_match.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Certificate, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/certificates/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.certificate_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
if let Some(if_match) = &self.if_match {
req_builder = req_builder.header("If-Match", if_match);
}
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Certificate =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) certificate_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/certificates/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.certificate_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod cancel_deletion {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) certificate_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Certificate, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/certificates/{}/cancelDelete",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.certificate_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Certificate =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod pool {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list_by_batch_account(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_batch_account::Builder {
list_by_batch_account::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
subscription_id: subscription_id.into(),
maxresults: None,
select: None,
filter: None,
}
}
pub fn get(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
pool_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
pool_name: pool_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
pool_name: impl Into<String>,
parameters: impl Into<models::Pool>,
subscription_id: impl Into<String>,
) -> create::Builder {
create::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
pool_name: pool_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
if_match: None,
if_none_match: None,
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
pool_name: impl Into<String>,
parameters: impl Into<models::Pool>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
pool_name: pool_name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
if_match: None,
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
pool_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
pool_name: pool_name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn disable_auto_scale(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
pool_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> disable_auto_scale::Builder {
disable_auto_scale::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
pool_name: pool_name.into(),
subscription_id: subscription_id.into(),
}
}
#[doc = "Stops an ongoing resize operation on the pool."]
pub fn stop_resize(
&self,
resource_group_name: impl Into<String>,
account_name: impl Into<String>,
pool_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> stop_resize::Builder {
stop_resize::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
account_name: account_name.into(),
pool_name: pool_name.into(),
subscription_id: subscription_id.into(),
}
}
}
pub mod list_by_batch_account {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) subscription_id: String,
pub(crate) maxresults: Option<i32>,
pub(crate) select: Option<String>,
pub(crate) filter: Option<String>,
}
impl Builder {
pub fn maxresults(mut self, maxresults: i32) -> Self {
self.maxresults = Some(maxresults);
self
}
pub fn select(mut self, select: impl Into<String>) -> Self {
self.select = Some(select.into());
self
}
pub fn filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::ListPoolsResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/pools",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
if let Some(maxresults) = &self.maxresults {
url.query_pairs_mut().append_pair("maxresults", &maxresults.to_string());
}
if let Some(select) = &self.select {
url.query_pairs_mut().append_pair("$select", select);
}
if let Some(filter) = &self.filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ListPoolsResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) pool_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Pool, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/pools/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.pool_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Pool =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) pool_name: String,
pub(crate) parameters: models::Pool,
pub(crate) subscription_id: String,
pub(crate) if_match: Option<String>,
pub(crate) if_none_match: Option<String>,
}
impl Builder {
pub fn if_match(mut self, if_match: impl Into<String>) -> Self {
self.if_match = Some(if_match.into());
self
}
pub fn if_none_match(mut self, if_none_match: impl Into<String>) -> Self {
self.if_none_match = Some(if_none_match.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Pool, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/pools/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.pool_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
if let Some(if_match) = &self.if_match {
req_builder = req_builder.header("If-Match", if_match);
}
if let Some(if_none_match) = &self.if_none_match {
req_builder = req_builder.header("If-None-Match", if_none_match);
}
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Pool =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) pool_name: String,
pub(crate) parameters: models::Pool,
pub(crate) subscription_id: String,
pub(crate) if_match: Option<String>,
}
impl Builder {
pub fn if_match(mut self, if_match: impl Into<String>) -> Self {
self.if_match = Some(if_match.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Pool, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/pools/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.pool_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
if let Some(if_match) = &self.if_match {
req_builder = req_builder.header("If-Match", if_match);
}
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Pool =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
Accepted202,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) pool_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/pools/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.pool_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod disable_auto_scale {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) pool_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Pool, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/pools/{}/disableAutoScale",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.pool_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Pool =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod stop_resize {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::CloudError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) account_name: String,
pub(crate) pool_name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Pool, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Batch/batchAccounts/{}/pools/{}/stopResize",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.account_name,
&self.pool_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::Pool =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CloudError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| Builder |
base_polling.py | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import abc
import base64
import json
from typing import TYPE_CHECKING, Optional, Any, Union
from ..exceptions import HttpResponseError, DecodeError
from . import PollingMethod
from ..pipeline.policies._utils import get_retry_after
if TYPE_CHECKING:
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import (
HttpResponse,
AsyncHttpResponse,
HttpRequest,
)
ResponseType = Union[HttpResponse, AsyncHttpResponse]
PipelineResponseType = PipelineResponse[HttpRequest, ResponseType]
try:
ABC = abc.ABC
except AttributeError: # Python 2.7, abc exists, but not ABC
ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()}) # type: ignore
_FINISHED = frozenset(["succeeded", "canceled", "failed"])
_FAILED = frozenset(["canceled", "failed"])
_SUCCEEDED = frozenset(["succeeded"])
def _finished(status):
if hasattr(status, "value"):
status = status.value
return str(status).lower() in _FINISHED
def _failed(status):
if hasattr(status, "value"):
status = status.value
return str(status).lower() in _FAILED
def _succeeded(status):
if hasattr(status, "value"):
status = status.value
return str(status).lower() in _SUCCEEDED
class BadStatus(Exception):
pass
class BadResponse(Exception):
pass
class OperationFailed(Exception):
pass
def _as_json(response):
# type: (ResponseType) -> dict
"""Assuming this is not empty, return the content as JSON.
Result/exceptions is not determined if you call this method without testing _is_empty.
:raises: DecodeError if response body contains invalid json data.
"""
try:
return json.loads(response.text())
except ValueError:
raise DecodeError("Error occurred in deserializing the response body.")
def _raise_if_bad_http_status_and_method(response):
# type: (ResponseType) -> None
"""Check response status code is valid.
Must be 200, 201, 202, or 204.
:raises: BadStatus if invalid status.
"""
code = response.status_code
if code in {200, 201, 202, 204}:
return
raise BadStatus(
"Invalid return status {!r} for {!r} operation".format(
code, response.request.method
)
)
def _is_empty(response):
# type: (ResponseType) -> bool
"""Check if response body contains meaningful content.
:rtype: bool
"""
return not bool(response.body())
class LongRunningOperation(ABC):
"""LongRunningOperation
Provides default logic for interpreting operation responses
and status updates.
:param azure.core.pipeline.PipelineResponse response: The initial pipeline response.
:param callable deserialization_callback: The deserialization callaback.
:param dict lro_options: LRO options.
:param kwargs: Unused for now
"""
@abc.abstractmethod
def can_poll(self, pipeline_response):
# type: (PipelineResponseType) -> bool
"""Answer if this polling method could be used.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_polling_url(self):
# type: () -> str
"""Return the polling URL.
"""
raise NotImplementedError()
@abc.abstractmethod
def set_initial_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
"""Process first response after initiating long running operation.
:param azure.core.pipeline.PipelineResponse response: initial REST call response.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
"""Return the status string extracted from this response."""
raise NotImplementedError()
@abc.abstractmethod
def get_final_get_url(self, pipeline_response):
# type: (PipelineResponseType) -> Optional[str]
"""If a final GET is needed, returns the URL.
:rtype: str
"""
raise NotImplementedError()
class OperationResourcePolling(LongRunningOperation):
"""Implements a operation resource polling, typically from Operation-Location.
:param str operation_location_header: Name of the header to return operation format (default 'operation-location')
"""
def __init__(self, operation_location_header="operation-location"):
self._operation_location_header = operation_location_header
# Store the initial URLs
self._async_url = None
self._location_url = None
self._request = None
def can_poll(self, pipeline_response):
"""Answer if this polling method could be used.
"""
response = pipeline_response.http_response
return self._operation_location_header in response.headers
def get_polling_url(self):
# type: () -> str
"""Return the polling URL.
"""
return self._async_url
def get_final_get_url(self, pipeline_response):
# type: (PipelineResponseType) -> Optional[str]
"""If a final GET is needed, returns the URL.
:rtype: str
"""
response = pipeline_response.http_response
if not _is_empty(response):
body = _as_json(response)
# https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#target-resource-location
resource_location = body.get("resourceLocation")
if resource_location:
return resource_location
if self._request.method in {"PUT", "PATCH"}:
return self._request.url
if self._request.method == "POST" and self._location_url:
return self._location_url
return None
def set_initial_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
"""Process first response after initiating long running operation.
:param azure.core.pipeline.PipelineResponse response: initial REST call response.
"""
self._request = pipeline_response.http_response.request
response = pipeline_response.http_response
self._set_async_url_if_present(response)
if response.status_code in {200, 201, 202, 204} and self._async_url:
return "InProgress"
raise OperationFailed("Operation failed or canceled")
def _set_async_url_if_present(self, response):
# type: (ResponseType) -> None
self._async_url = response.headers[self._operation_location_header]
location_url = response.headers.get("location")
if location_url:
self._location_url = location_url
def get_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
"""Process the latest status update retrieved from an "Operation-Location" header.
:param azure.core.pipeline.PipelineResponse response: The response to extract the status.
:raises: BadResponse if response has no body, or body does not contain status.
"""
response = pipeline_response.http_response
if _is_empty(response):
raise BadResponse(
"The response from long running operation does not contain a body."
)
body = _as_json(response)
status = body.get("status")
if not status:
raise BadResponse("No status found in body")
return status
class LocationPolling(LongRunningOperation):
"""Implements a Location polling.
"""
def __init__(self):
self._location_url = None
def can_poll(self, pipeline_response):
# type: (PipelineResponseType) -> bool
"""Answer if this polling method could be used.
"""
response = pipeline_response.http_response
return "location" in response.headers
def get_polling_url(self):
# type: () -> str
"""Return the polling URL.
"""
return self._location_url
def get_final_get_url(self, pipeline_response):
# type: (PipelineResponseType) -> Optional[str]
"""If a final GET is needed, returns the URL.
:rtype: str
"""
return None
def set_initial_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
"""Process first response after initiating long running operation.
:param azure.core.pipeline.PipelineResponse response: initial REST call response.
"""
response = pipeline_response.http_response
self._location_url = response.headers["location"]
if response.status_code in {200, 201, 202, 204} and self._location_url:
return "InProgress"
raise OperationFailed("Operation failed or canceled")
def get_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
"""Process the latest status update retrieved from a 'location' header.
:param azure.core.pipeline.PipelineResponse response: latest REST call response.
:raises: BadResponse if response has no body and not status 202.
"""
response = pipeline_response.http_response
if "location" in response.headers:
self._location_url = response.headers["location"]
return "InProgress" if response.status_code == 202 else "Succeeded"
class StatusCheckPolling(LongRunningOperation):
"""Should be the fallback polling, that don't poll but exit successfully
if not other polling are detected and status code is 2xx.
"""
def can_poll(self, pipeline_response):
# type: (PipelineResponseType) -> bool
"""Answer if this polling method could be used.
"""
return True
def get_polling_url(self):
# type: () -> str
"""Return the polling URL.
"""
raise ValueError("This polling doesn't support polling")
def set_initial_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
"""Process first response after initiating long running
operation and set self.status attribute.
:param azure.core.pipeline.PipelineResponse response: initial REST call response.
"""
return "Succeeded"
def get_status(self, pipeline_response):
# type: (PipelineResponseType) -> str
return "Succeeded"
def get_final_get_url(self, pipeline_response):
# type: (PipelineResponseType) -> Optional[str]
"""If a final GET is needed, returns the URL.
:rtype: str
"""
return None
class LROBasePolling(PollingMethod): # pylint: disable=too-many-instance-attributes
"""A base LRO poller.
This assumes a basic flow:
- I analyze the response to decide the polling approach
- I poll
- I ask the final resource depending of the polling approach
If your polling need are more specific, you could implement a PollingMethod directly
"""
def __init__(
self,
timeout=30,
lro_algorithms=None,
lro_options=None,
path_format_arguments=None,
**operation_config
):
self._lro_algorithms = lro_algorithms or [
OperationResourcePolling(),
LocationPolling(),
StatusCheckPolling(),
]
self._timeout = timeout
self._client = None # Will hold the Pipelineclient
self._operation = None # Will hold an instance of LongRunningOperation
self._initial_response = None # Will hold the initial response
self._pipeline_response = None # Will hold latest received response
self._deserialization_callback = None # Will hold the deserialization callback
self._operation_config = operation_config
self._lro_options = lro_options
self._path_format_arguments = path_format_arguments
self._status = None
def status(self):
"""Return the current status as a string.
:rtype: str
"""
if not self._operation:
raise ValueError(
"set_initial_status was never called. Did you give this instance to a poller?"
)
return self._status
def finished(self):
"""Is this polling finished?
:rtype: bool
"""
return _finished(self.status())
def resource(self):
"""Return the built resource.
"""
return self._parse_resource(self._pipeline_response)
@property
def _transport(self):
return self._client._pipeline._transport # pylint: disable=protected-access
def initialize(self, client, initial_response, deserialization_callback):
"""Set the initial status of this LRO.
:param initial_response: The initial response of the poller
:raises: HttpResponseError if initial status is incorrect LRO state
"""
self._client = client
self._pipeline_response = self._initial_response = initial_response
self._deserialization_callback = deserialization_callback
for operation in self._lro_algorithms:
if operation.can_poll(initial_response):
self._operation = operation
break
else:
raise BadResponse("Unable to find status link for polling.")
try:
_raise_if_bad_http_status_and_method(self._initial_response.http_response)
self._status = self._operation.set_initial_status(initial_response)
except BadStatus as err:
self._status = "Failed"
raise HttpResponseError(response=initial_response.http_response, error=err)
except BadResponse as err:
self._status = "Failed"
raise HttpResponseError(
response=initial_response.http_response, message=str(err), error=err
)
except OperationFailed as err:
raise HttpResponseError(response=initial_response.http_response, error=err)
def get_continuation_token(self):
# type() -> str
import pickle
return base64.b64encode(pickle.dumps(self._initial_response)).decode('ascii')
@classmethod
def from_continuation_token(cls, continuation_token, **kwargs):
# type(str, Any) -> Tuple
try:
client = kwargs["client"]
except KeyError:
raise ValueError("Need kwarg 'client' to be recreated from continuation_token")
try:
deserialization_callback = kwargs["deserialization_callback"]
except KeyError:
raise ValueError("Need kwarg 'deserialization_callback' to be recreated from continuation_token")
import pickle
initial_response = pickle.loads(base64.b64decode(continuation_token)) # nosec
# Restore the transport in the context
initial_response.context.transport = client._pipeline._transport # pylint: disable=protected-access
return client, initial_response, deserialization_callback
def run(self):
try:
self._poll()
except BadStatus as err:
self._status = "Failed"
raise HttpResponseError(
response=self._pipeline_response.http_response, error=err
)
except BadResponse as err:
self._status = "Failed"
raise HttpResponseError(
response=self._pipeline_response.http_response,
message=str(err),
error=err,
)
except OperationFailed as err:
raise HttpResponseError(
response=self._pipeline_response.http_response, error=err
)
def _poll(self):
"""Poll status of operation so long as operation is incomplete and
we have an endpoint to query.
:param callable update_cmd: The function to call to retrieve the
latest status of the long running operation.
:raises: OperationFailed if operation status 'Failed' or 'Canceled'.
:raises: BadStatus if response status invalid.
:raises: BadResponse if response invalid.
"""
while not self.finished():
self._delay()
self.update_status()
if _failed(self.status()):
raise OperationFailed("Operation failed or canceled")
final_get_url = self._operation.get_final_get_url(self._pipeline_response)
if final_get_url:
self._pipeline_response = self.request_status(final_get_url)
_raise_if_bad_http_status_and_method(self._pipeline_response.http_response)
def _parse_resource(self, pipeline_response):
# type: (PipelineResponseType) -> Optional[Any]
"""Assuming this response is a resource, use the deserialization callback to parse it.
If body is empty, assuming no resource to return.
"""
response = pipeline_response.http_response
if not _is_empty(response):
return self._deserialization_callback(pipeline_response)
return None
def _sleep(self, delay):
self._transport.sleep(delay)
def _extract_delay(self):
if self._pipeline_response is None:
return None
delay = get_retry_after(self._pipeline_response)
if delay:
return delay
return self._timeout
def _delay(self):
"""Check for a 'retry-after' header to set timeout,
otherwise use configured timeout.
"""
delay = self._extract_delay()
self._sleep(delay)
def update_status(self):
"""Update the current status of the LRO.
"""
self._pipeline_response = self.request_status(self._operation.get_polling_url())
_raise_if_bad_http_status_and_method(self._pipeline_response.http_response)
self._status = self._operation.get_status(self._pipeline_response)
def _get_request_id(self):
return self._pipeline_response.http_response.request.headers[
"x-ms-client-request-id"
]
def request_status(self, status_link):
|
__all__ = [
'BadResponse',
'BadStatus',
'OperationFailed',
'LongRunningOperation',
'OperationResourcePolling',
'LocationPolling',
'StatusCheckPolling',
'LROBasePolling',
]
| """Do a simple GET to this status link.
This method re-inject 'x-ms-client-request-id'.
:rtype: azure.core.pipeline.PipelineResponse
"""
if self._path_format_arguments:
status_link = self._client.format_url(status_link, **self._path_format_arguments)
request = self._client.get(status_link)
# Re-inject 'x-ms-client-request-id' while polling
if "request_id" not in self._operation_config:
self._operation_config["request_id"] = self._get_request_id()
return self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **self._operation_config
) |
apicheck.py | from lcu_driver import Connector
import json
connector = Connector()
@connector.ready
async def connect(connection):
print("LCU API is ready to be used.")
# check if the user is already logged into his account
summoner = await connection.request("get", "/lol-summoner/v1/current-summoner")
if summoner.status != 200:
print(
"Please login into your account to change your icon and restart the script..."
)
else:
data = await summoner.json()
summonerId = data['summonerId']
#request = f"/lol-perks/v1/perks"
request = "/lol-perks/v1/pages"
#request = f"/lol-perks/v1/currentpage"
request_type = "get" | with open("temp.json", "w+") as f:
json.dump(save, f, indent=4)
connector.start() | summoner_spells = await connection.request(request_type, request)
save = await summoner_spells.json() |
tpl.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-08-07 22:27:07
import time
import config
from .basedb import BaseDB
class TPLDB(BaseDB):
'''
tpl db
id, userid, siteurl, sitename, banner, disabled, public, fork, har, tpl, variables, interval, note, ctime, mtime, atime, last_success
'''
__tablename__ = 'tpl'
def __init__(self, host=config.mysql.host, port=config.mysql.port,
database=config.mysql.database, user=config.mysql.user, passwd=config.mysql.passwd, auth_plugin=config.mysql.auth_plugin):
import mysql.connector
self.conn = mysql.connector.connect(user=user, password=passwd, host=host, port=port,
database=database, auth_plugin=auth_plugin, autocommit=True)
def add(self, userid, har, tpl, variables, interval=None):
now = time.time()
insert = dict(
userid = userid,
siteurl = None,
sitename = None,
banner = None,
disabled = 0,
public = 0,
fork = None,
har = har,
tpl = tpl,
variables = variables,
interval = interval,
ctime = now,
mtime = now,
atime = now,
last_success = None,
)
return self._insert(**insert)
def | (self, id, **kwargs):
return self._update(where="id=%s" % self.placeholder, where_values=(id, ), **kwargs)
def get(self, id, fields=None):
for tpl in self._select2dic(what=fields, where='id=%s' % self.placeholder, where_values=(id, )):
return tpl
def delete(self, id):
self._delete(where="id=%s" % self.placeholder, where_values=(id, ))
def incr_success(self, id):
self._execute('UPDATE %s SET success_count=success_count+1, last_success=%d WHERE `id`=%d' % (
self.escape(self.__tablename__), time.time(), int(id)))
def incr_failed(self, id):
self._execute('UPDATE %s SET failed_count=failed_count+1 WHERE `id`=%d' % (
self.escape(self.__tablename__), int(id)))
def list(self, fields=None, limit=None, **kwargs):
where = '1=1'
where_values = []
for key, value in kwargs.items():
if value is None:
where += ' and %s is %s' % (self.escape(key), self.placeholder)
else:
where += ' and %s = %s' % (self.escape(key), self.placeholder)
where_values.append(value)
for tpl in self._select2dic(what=fields, where=where, where_values=where_values, limit=limit):
yield tpl
| mod |
EPG.py | """
Implements wrapper class and methods to work with Brightcove's EPG API.
See: https://apis.support.brightcove.com/epg/getting-started/overview-epg-api.html
"""
from requests.models import Response
from .Base import Base
from .OAuth import OAuth
class EPG(Base):
"""
Class to wrap the Brightcove EPG API calls. Inherits from Base.
Attributes:
-----------
base_url (str)
Base URL for API calls.
Methods:
--------
GetAllCPChannels(self, account_id: str='') -> Response
Get a list of all Cloud Playout channels for an account.
GetEPG(self, channel_id: str, query: str='', account_id: str='') -> Response
Get EPG for a specific channel.
"""
# base URL for all API calls
base_url ='https://cm.cloudplayout.brightcove.com/accounts/{account_id}'
def __init__(self, oauth: OAuth, query: str='') -> None:
"""
Args:
oauth (OAuth): OAuth instance to use for the API calls.
query (str, optional): Default search query for this instance.
"""
super().__init__(oauth=oauth, query=query)
def GetAllCPChannels(self, account_id: str='') -> Response:
|
def GetEPG(self, channel_id: str, query: str='', account_id: str='') -> Response:
"""
Get EPG for a specific channel.
Args:
channel_id (str): Channel ID to get the EPG for.
query (str, optional): Search query string. Defaults to ''.
account_id (str, optional): Video Cloud account ID. Defaults to ''
Returns:
Response: API response as requests Response object.
"""
base = 'https://sm.cloudplayout.brightcove.com/accounts/{account_id}'
query = query or self.search_query
url = f'{base}/channels/{channel_id}/epg?{query}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
| """
Get a list of all Cloud Playout channels for an account.
Args:
account_id (str, optional): Video Cloud account ID. Defaults to ''
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/cp_channels'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers) |
files.rs | // Copyright 2020 - developers of the `grammers` project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::types::{Media, Uploaded};
use crate::utils::{generate_random_id, AsyncMutex};
use crate::Client;
use futures_util::future::try_join_all;
use grammers_mtsender::InvocationError;
use grammers_tl_types as tl;
use std::time::Duration;
use std::{io::SeekFrom, path::Path, sync::Arc};
use tokio::sync::mpsc::unbounded_channel;
use tokio::{
fs,
io::{self, AsyncRead, AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
};
pub const MIN_CHUNK_SIZE: i32 = 4 * 1024;
pub const MAX_CHUNK_SIZE: i32 = 512 * 1024;
const BIG_FILE_SIZE: usize = 10 * 1024 * 1024;
const WORKER_COUNT: usize = 4;
const RATE_LIMIT_DELAY: Duration = Duration::from_millis(250);
const RATE_LIMIT_RETRIES: usize = 3;
pub struct DownloadIter {
client: Client,
done: bool,
request: tl::functions::upload::GetFile,
}
impl DownloadIter {
fn new(client: &Client, media: &Media) -> Self {
DownloadIter::new_from_file_location(client, media.to_input_location().unwrap())
}
fn new_from_location(client: &Client, location: tl::enums::InputFileLocation) -> Self {
DownloadIter::new_from_file_location(client, location)
}
fn new_from_file_location(client: &Client, location: tl::enums::InputFileLocation) -> Self {
// TODO let users tweak all the options from the request
// TODO cdn support
Self {
client: client.clone(),
done: false,
request: tl::functions::upload::GetFile {
precise: false,
cdn_supported: false,
location,
offset: 0,
limit: MAX_CHUNK_SIZE,
},
}
}
/// Changes the chunk size, in bytes, used to make requests. Useful if you only need to get a
/// small part of a file. By default, `MAX_CHUNK_SIZE` is used.
///
/// # Panics
///
/// Panics if `size` is not divisible by `MIN_CHUNK_SIZE`, or if `size` is not in contained in
/// the range `MIN_CHUNK_SIZE..=MAX_CHUNK_SIZE`.
pub fn chunk_size(mut self, size: i32) -> Self {
assert!(MIN_CHUNK_SIZE <= size && size <= MAX_CHUNK_SIZE && size % MIN_CHUNK_SIZE == 0);
self.request.limit = size as i32;
self
}
/// Skips `n` chunks to start downloading a different offset from the file. If you want to
/// skip less data, modify the `chunk_size` before calling this method, and then reset it to
/// any value you want.
pub fn skip_chunks(mut self, n: i32) -> Self {
self.request.offset += self.request.limit * n;
self
}
/// Fetch and return the next chunk.
pub async fn next(&mut self) -> Result<Option<Vec<u8>>, InvocationError> {
if self.done {
return Ok(None);
}
use tl::enums::upload::File;
// TODO handle FILE_MIGRATE and maybe FILEREF_UPGRADE_NEEDED
let mut retries = 0;
loop {
break match self.client.invoke(&self.request).await {
Ok(File::File(f)) => {
if f.bytes.len() < self.request.limit as usize {
self.done = true;
if f.bytes.is_empty() {
return Ok(None);
}
}
self.request.offset += self.request.limit;
Ok(Some(f.bytes))
}
Ok(File::CdnRedirect(_)) => {
panic!("API returned File::CdnRedirect even though cdn_supported = false");
}
// Rate limit hit
Err(InvocationError::Rpc(err))
if err.code == 420 && retries < RATE_LIMIT_RETRIES =>
{
tokio::time::sleep(RATE_LIMIT_DELAY).await;
retries += 1;
continue;
}
Err(e) => Err(e),
};
}
}
}
/// Method implementations related to uploading or downloading files.
impl Client {
/// Returns a new iterator over the contents of a media document that will be downloaded.
///
/// # Examples
///
/// ```
/// # async fn f(media: grammers_client::types::Media, mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// let mut file_bytes = Vec::new();
/// let mut download = client.iter_download(&media);
///
/// while let Some(chunk) = download.next().await? {
/// file_bytes.extend(chunk);
/// }
///
/// // The file is now downloaded in-memory, inside `file_bytes`!
/// # Ok(())
/// # }
/// ```
pub fn iter_download(&self, media: &Media) -> DownloadIter {
DownloadIter::new(self, media)
}
/// Downloads a media file into the specified path.
///
/// If the file already exists, it will be overwritten.
///
/// This is a small wrapper around [`Client::iter_download`] for the common case of
/// wanting to save the file locally.
///
/// # Examples
///
/// ```
/// # async fn f(media: grammers_client::types::Media, mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// client.download_media(&media, "/home/username/photos/holidays.jpg").await?;
/// # Ok(())
/// # }
/// ```
pub async fn download_media<P: AsRef<Path>>(
&self,
media: &Media,
path: P,
) -> Result<(), io::Error> |
pub(crate) async fn download_media_at_location<P: AsRef<Path>>(
&self,
location: tl::enums::InputFileLocation,
path: P,
) -> Result<(), io::Error> {
let mut download = DownloadIter::new_from_location(self, location);
Client::load(path, &mut download).await
}
async fn load<P: AsRef<Path>>(path: P, download: &mut DownloadIter) -> Result<(), io::Error> {
let mut file = fs::File::create(path).await?;
while let Some(chunk) = download
.next()
.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?
{
file.write_all(&chunk).await?;
}
Ok(())
}
/// Downloads a `Document` to specified path using multiple connections
async fn download_media_concurrent<P: AsRef<Path>>(
&self,
media: &Media,
path: P,
workers: usize,
) -> Result<(), io::Error> {
let document = match media {
Media::Document(document) => document,
_ => panic!("Only Document type is supported!"),
};
let size = document.size();
let location = media.to_input_location().unwrap();
// Allocate
let mut file = fs::File::create(path).await?;
file.set_len(size as u64).await?;
file.seek(SeekFrom::Start(0)).await?;
// Start workers
let (tx, mut rx) = unbounded_channel();
let part_index = Arc::new(tokio::sync::Mutex::new(0));
let mut tasks = vec![];
for _ in 0..workers {
let location = location.clone();
let tx = tx.clone();
let part_index = part_index.clone();
let client = self.clone();
let task = tokio::task::spawn(async move {
let mut retry_offset = None;
let mut retry_counter = 0;
loop {
// Calculate file offset
let offset = {
if let Some(offset) = retry_offset {
retry_offset = None;
offset
} else {
retry_counter = 0;
let mut i = part_index.lock().await;
*i += 1;
MAX_CHUNK_SIZE * (*i - 1)
}
};
if offset > size {
break;
}
// Fetch from telegram
let res = client
.invoke(&tl::functions::upload::GetFile {
precise: true,
cdn_supported: false,
location: location.clone(),
offset,
limit: MAX_CHUNK_SIZE,
})
.await;
match res {
Ok(tl::enums::upload::File::File(file)) => {
tx.send((offset as u64, file.bytes)).unwrap();
}
Ok(tl::enums::upload::File::CdnRedirect(_)) => {
panic!(
"API returned File::CdnRedirect even though cdn_supported = false"
);
}
Err(InvocationError::Rpc(err)) => {
// Retry on rate limit
if err.code == 420 {
tokio::time::sleep(RATE_LIMIT_DELAY).await;
retry_offset = Some(offset);
retry_counter += 1;
if retry_counter <= RATE_LIMIT_RETRIES {
continue;
}
}
return Err(InvocationError::Rpc(err));
}
Err(e) => return Err(e),
}
}
Ok::<(), InvocationError>(())
});
tasks.push(task);
}
drop(tx);
// File write loop
let mut pos = 0;
while let Some((offset, data)) = rx.recv().await {
if offset != pos {
file.seek(SeekFrom::Start(offset)).await?;
}
file.write_all(&data).await?;
pos = offset + data.len() as u64;
}
// Check if all tasks finished succesfully
for task in tasks {
task.await?
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
}
Ok(())
}
/// Uploads an async stream to Telegram servers.
///
/// The file is not sent to any chat, but can be used as media when sending messages for a
/// certain period of time (less than a day). You can use this uploaded file multiple times.
///
/// Refer to [`InputMessage`] to learn more uses for `uploaded_file`.
///
/// The stream size must be known beforehand. If this is not possible, you might need to
/// process the entire async stream to determine its size, and then use the size and the
/// downloaded buffer.
///
/// The stream size may be less or equal to the actual length of the stream, but not more.
/// If it's less, you may continue to read from the stream after the method returns.
/// If it's more, the method will fail because it does not have enough data to read.
///
/// Note that Telegram uses the file name in certain methods, for example, to make sure the
/// file is an image when trying to use send the file as photo media, so it is important that
/// the file name at least uses the right extension, even if the name is a dummy value.
/// If the input file name is empty, the non-empty dummy value "a" will be used instead.
/// Because it has no extension, you may not be able to use the file in certain methods.
///
/// # Examples
///
/// ```
/// # async fn f(chat: grammers_client::types::Chat, client: grammers_client::Client, some_vec: &[u8]) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_client::InputMessage;
///
/// // In-memory `Vec<u8>` buffers can be used as async streams
/// let size = some_vec.len();
/// let mut stream = std::io::Cursor::new(some_vec);
/// let uploaded_file = client.upload_stream(&mut stream, size, "sleep.jpg".to_string()).await?;
///
/// client.send_message(&chat, InputMessage::text("Zzz...").photo(uploaded_file)).await?;
/// # Ok(())
/// # }
/// ```
///
/// [`InputMessage`]: crate::types::InputMessage
pub async fn upload_stream<S: AsyncRead + Unpin>(
&self,
stream: &mut S,
size: usize,
name: String,
) -> Result<Uploaded, io::Error> {
let file_id = generate_random_id();
let name = if name.is_empty() {
"a".to_string()
} else {
name
};
let big_file = size > BIG_FILE_SIZE;
let parts = PartStream::new(stream, size);
let total_parts = parts.total_parts();
if big_file {
let parts = Arc::new(parts);
let mut tasks = Vec::with_capacity(WORKER_COUNT);
for _ in 0..WORKER_COUNT {
let handle = self.clone();
let parts = Arc::clone(&parts);
let task = async move {
while let Some((part, bytes)) = parts.next_part().await? {
let ok = handle
.invoke(&tl::functions::upload::SaveBigFilePart {
file_id,
file_part: part,
file_total_parts: total_parts,
bytes,
})
.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
if !ok {
return Err(io::Error::new(
io::ErrorKind::Other,
"server failed to store uploaded data",
));
}
}
Ok(())
};
tasks.push(task);
}
try_join_all(tasks).await?;
Ok(Uploaded::from_raw(
tl::types::InputFileBig {
id: file_id,
parts: total_parts,
name,
}
.into(),
))
} else {
let mut md5 = md5::Context::new();
while let Some((part, bytes)) = parts.next_part().await? {
md5.consume(&bytes);
let ok = self
.invoke(&tl::functions::upload::SaveFilePart {
file_id,
file_part: part,
bytes,
})
.await
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
if !ok {
return Err(io::Error::new(
io::ErrorKind::Other,
"server failed to store uploaded data",
));
}
}
Ok(Uploaded::from_raw(
tl::types::InputFile {
id: file_id,
parts: total_parts,
name,
md5_checksum: format!("{:x}", md5.compute()),
}
.into(),
))
}
}
/// Uploads a local file to Telegram servers.
///
/// The file is not sent to any chat, but can be used as media when sending messages for a
/// certain period of time (less than a day). You can use this uploaded file multiple times.
///
/// Refer to [`InputMessage`] to learn more uses for `uploaded_file`.
///
/// If you need more control over the uploaded data, such as performing only a partial upload
/// or with a different name, use [`Client::upload_stream`] instead.
///
/// # Examples
///
/// ```
/// # async fn f(chat: grammers_client::types::Chat, mut client: grammers_client::Client) -> Result<(), Box<dyn std::error::Error>> {
/// use grammers_client::InputMessage;
///
/// let uploaded_file = client.upload_file("/home/username/photos/holidays.jpg").await?;
///
/// client.send_message(&chat, InputMessage::text("Check this out!").photo(uploaded_file)).await?;
/// # Ok(())
/// # }
/// ```
///
/// [`InputMessage`]: crate::InputMessage
pub async fn upload_file<P: AsRef<Path>>(&self, path: P) -> Result<Uploaded, io::Error> {
let path = path.as_ref();
let mut file = fs::File::open(path).await?;
let size = file.seek(SeekFrom::End(0)).await? as usize;
file.seek(SeekFrom::Start(0)).await?;
// File name will only be `None` for `..` path, and directories cannot be uploaded as
// files, so it's fine to unwrap.
let name = path.file_name().unwrap().to_string_lossy().to_string();
self.upload_stream(&mut file, size, name).await
}
}
struct PartStreamInner<'a, S: AsyncRead + Unpin> {
stream: &'a mut S,
current_part: i32,
}
struct PartStream<'a, S: AsyncRead + Unpin> {
inner: AsyncMutex<PartStreamInner<'a, S>>,
total_parts: i32,
}
impl<'a, S: AsyncRead + Unpin> PartStream<'a, S> {
fn new(stream: &'a mut S, size: usize) -> Self {
let total_parts = ((size + MAX_CHUNK_SIZE as usize - 1) / MAX_CHUNK_SIZE as usize) as i32;
Self {
inner: AsyncMutex::new(
"upload_stream",
PartStreamInner {
stream,
current_part: 0,
},
),
total_parts,
}
}
fn total_parts(&self) -> i32 {
self.total_parts
}
async fn next_part(&self) -> Result<Option<(i32, Vec<u8>)>, io::Error> {
let mut lock = self.inner.lock("read part").await;
if lock.current_part >= self.total_parts {
return Ok(None);
}
let mut read = 0;
let mut buffer = vec![0; MAX_CHUNK_SIZE as usize];
while read != buffer.len() {
let n = lock.stream.read(&mut buffer[read..]).await?;
if n == 0 {
if lock.current_part == self.total_parts - 1 {
break;
} else {
return Err(io::Error::new(
io::ErrorKind::UnexpectedEof,
"reached EOF before reaching the last file part",
));
}
}
read += n;
}
let bytes = if read == buffer.len() {
buffer
} else {
buffer[..read].to_vec()
};
let res = Ok(Some((lock.current_part, bytes)));
lock.current_part += 1;
res
}
}
| {
// Concurrent downloader
if let Media::Document(document) = media {
if document.size() as usize > BIG_FILE_SIZE {
return self
.download_media_concurrent(media, path, WORKER_COUNT)
.await;
}
}
let mut download = self.iter_download(media);
Client::load(path, &mut download).await
} |
index.tsx | import dva, { Router } from 'dva';
import createLoading from 'dva-loading';
import ZhiQue from './ZhiQue';
// 初始化dvaApp
const app = dva({
// eslint-disable-next-line global-require
history: require('history').createBrowserHistory(),
});
// 将dvaApp示例保存到window对象中,以便在其他地方调用
window.dvaApp = app; |
app.router(ZhiQue as Router);
app.use(createLoading());
app.model(require('./models/global').default);
app.start('#root'); | |
flight_dispatcher.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use common_base::tokio::sync::mpsc::Sender;
use common_base::tokio::sync::*;
use common_base::TrySpawn;
use common_datablocks::DataBlock;
use common_datavalues::DataSchemaRef;
use common_exception::ErrorCode;
use common_exception::Result;
use common_exception::ToErrorCode;
use common_infallible::RwLock;
use common_tracing::tracing;
use common_tracing::tracing::Instrument;
use common_tracing::tracing::Span;
use tokio_stream::StreamExt;
use crate::api::rpc::flight_scatter::FlightScatter;
use crate::api::rpc::flight_scatter_broadcast::BroadcastFlightScatter;
use crate::api::rpc::flight_scatter_hash::HashFlightScatter;
use crate::api::rpc::flight_tickets::StreamTicket;
use crate::api::FlightAction;
use crate::pipelines::processors::PipelineBuilder;
use crate::sessions::QueryContext;
use crate::sessions::SessionRef;
struct StreamInfo {
#[allow(unused)]
schema: DataSchemaRef,
tx: mpsc::Sender<Result<DataBlock>>,
rx: mpsc::Receiver<Result<DataBlock>>,
}
pub struct DatabendQueryFlightDispatcher {
streams: Arc<RwLock<HashMap<String, StreamInfo>>>,
stages_notify: Arc<RwLock<HashMap<String, Arc<Notify>>>>,
abort: Arc<AtomicBool>,
}
pub type DatabendQueryFlightDispatcherRef = Arc<DatabendQueryFlightDispatcher>;
impl DatabendQueryFlightDispatcher {
pub fn create() -> DatabendQueryFlightDispatcher {
DatabendQueryFlightDispatcher {
streams: Arc::new(RwLock::new(HashMap::new())),
stages_notify: Arc::new(RwLock::new(HashMap::new())),
abort: Arc::new(AtomicBool::new(false)),
}
}
/// Reject new session if is aborted.
pub fn abort(&self) {
self.abort.store(true, Ordering::Relaxed)
}
pub fn is_aborted(&self) -> bool {
self.abort.load(Ordering::Relaxed)
}
#[tracing::instrument(level = "debug", skip_all)]
pub fn get_stream(&self, ticket: &StreamTicket) -> Result<mpsc::Receiver<Result<DataBlock>>> {
let stage_name = format!("{}/{}", ticket.query_id, ticket.stage_id);
if let Some(notify) = self.stages_notify.write().remove(&stage_name) {
notify.notify_waiters();
}
let stream_name = format!("{}/{}", stage_name, ticket.stream);
match self.streams.write().remove(&stream_name) {
Some(stream_info) => Ok(stream_info.rx),
None => Err(ErrorCode::NotFoundStream("Stream is not found")),
}
}
#[tracing::instrument(level = "debug", skip_all, fields(session.id = session.get_id().as_str()))]
pub async fn broadcast_action(&self, session: SessionRef, action: FlightAction) -> Result<()> {
let query_id = action.get_query_id();
let stage_id = action.get_stage_id();
let action_sinks = action.get_sinks();
let data_schema = action.get_plan().schema();
self.create_stage_streams(&query_id, &stage_id, &data_schema, &action_sinks);
match action.get_sinks().len() {
0 => Err(ErrorCode::LogicalError("")),
1 => self.one_sink_action(session, &action).await,
_ => {
self.action_with_scatter::<BroadcastFlightScatter>(session, &action)
.await
}
}
}
#[tracing::instrument(level = "debug", skip_all, fields(session.id = session.get_id().as_str()))]
pub async fn shuffle_action(&self, session: SessionRef, action: FlightAction) -> Result<()> {
let query_id = action.get_query_id();
let stage_id = action.get_stage_id();
let action_sinks = action.get_sinks();
let data_schema = action.get_plan().schema();
self.create_stage_streams(&query_id, &stage_id, &data_schema, &action_sinks);
match action.get_sinks().len() {
0 => Err(ErrorCode::LogicalError("")),
1 => self.one_sink_action(session, &action).await,
_ => {
self.action_with_scatter::<HashFlightScatter>(session, &action)
.await
}
}
}
#[tracing::instrument(level = "debug", skip_all, fields(session.id = session.get_id().as_str()))]
async fn one_sink_action(&self, session: SessionRef, action: &FlightAction) -> Result<()> {
let query_context = session.create_context().await?;
let action_context = QueryContext::new(query_context.clone());
let pipeline_builder = PipelineBuilder::create(action_context.clone());
let query_plan = action.get_plan();
action_context.attach_query_plan(&query_plan);
let mut pipeline = pipeline_builder.build(&query_plan)?;
let action_sinks = action.get_sinks();
let action_query_id = action.get_query_id();
let action_stage_id = action.get_stage_id();
assert_eq!(action_sinks.len(), 1);
let stage_name = format!("{}/{}", action_query_id, action_stage_id);
let stages_notify = self.stages_notify.clone();
let stream_name = format!("{}/{}", stage_name, action_sinks[0]);
let tx_ref = self.streams.read().get(&stream_name).map(|x| x.tx.clone());
let tx = tx_ref.ok_or_else(|| ErrorCode::NotFoundStream("Not found stream"))?;
query_context.try_spawn(
async move {
let _session = session;
wait_start(stage_name, stages_notify).await;
match pipeline.execute().await {
Err(error) => {
tx.send(Err(error)).await.ok();
}
Ok(mut abortable_stream) => {
while let Some(item) = abortable_stream.next().await {
if let Err(error) = tx.send(item).await {
tracing::error!(
"Cannot push data when run_action_without_scatters. {}",
error
);
break;
}
}
}
};
}
.instrument(Span::current()),
)?;
Ok(())
}
#[tracing::instrument(level = "debug", skip_all, fields(session.id = session.get_id().as_str()))]
async fn action_with_scatter<T>(
&self,
session: SessionRef,
action: &FlightAction,
) -> Result<()>
where
T: FlightScatter + Send + 'static,
|
fn create_stage_streams(
&self,
query_id: &str,
stage_id: &str,
schema: &DataSchemaRef,
streams_name: &[String],
) {
let stage_name = format!("{}/{}", query_id, stage_id);
self.stages_notify
.write()
.insert(stage_name.clone(), Arc::new(Notify::new()));
let mut streams = self.streams.write();
for stream_name in streams_name {
let (tx, rx) = mpsc::channel(5);
let stream_name = format!("{}/{}", stage_name, stream_name);
streams.insert(stream_name, StreamInfo {
schema: schema.clone(),
tx,
rx,
});
}
}
}
async fn wait_start(stage_name: String, stages_notify: Arc<RwLock<HashMap<String, Arc<Notify>>>>) {
let notify = {
let stages_notify = stages_notify.read();
stages_notify.get(&stage_name).map(Arc::clone)
};
if let Some(notify) = notify {
notify.notified().await;
}
}
| {
let query_context = session.create_context().await?;
let action_context = QueryContext::new(query_context.clone());
let pipeline_builder = PipelineBuilder::create(action_context.clone());
let query_plan = action.get_plan();
action_context.attach_query_plan(&query_plan);
let mut pipeline = pipeline_builder.build(&query_plan)?;
let action_query_id = action.get_query_id();
let action_stage_id = action.get_stage_id();
let sinks_tx = {
let action_sinks = action.get_sinks();
assert!(action_sinks.len() > 1);
let mut sinks_tx = Vec::with_capacity(action_sinks.len());
for sink in &action_sinks {
let stream_name = format!("{}/{}/{}", action_query_id, action_stage_id, sink);
match self.streams.read().get(&stream_name) {
Some(stream) => sinks_tx.push(stream.tx.clone()),
None => {
return Err(ErrorCode::NotFoundStream(format!(
"Not found stream {}",
stream_name
)))
}
}
}
Result::Ok(sinks_tx)
}?;
let stage_name = format!("{}/{}", action_query_id, action_stage_id);
let stages_notify = self.stages_notify.clone();
let flight_scatter = T::try_create(
action.get_plan().schema(),
action.get_scatter_expression(),
action.get_sinks().len(),
)?;
query_context.try_spawn(
async move {
let _session = session;
wait_start(stage_name, stages_notify).await;
let sinks_tx_ref = &sinks_tx;
let forward_blocks = async move {
let mut abortable_stream = pipeline.execute().await?;
while let Some(item) = abortable_stream.next().await {
let forward_blocks = flight_scatter.execute(&item?)?;
assert_eq!(forward_blocks.len(), sinks_tx_ref.len());
for (index, forward_block) in forward_blocks.iter().enumerate() {
let tx: &Sender<Result<DataBlock>> = &sinks_tx_ref[index];
tx.send(Ok(forward_block.clone()))
.await
.map_err_to_code(ErrorCode::LogicalError, || {
"Cannot push data when run_action"
})?;
}
}
Result::Ok(())
};
if let Err(error) = forward_blocks.await {
for tx in &sinks_tx {
if !tx.is_closed() {
let send_error_message = tx.send(Err(error.clone()));
let _ignore_send_error = send_error_message.await;
}
}
}
}
.instrument(Span::current()),
)?;
Ok(())
} |
utils.rs | use serde::de::Error as DeError;
use serde::de::MapAccess;
use serde::ser::{SerializeSeq, Serialize, Serializer};
use std::{
collections::HashMap,
hash::Hash,
};
use super::prelude::*;
#[cfg(feature = "cache")]
use crate::internal::prelude::*;
#[cfg(all(feature = "cache", feature = "model"))]
use super::permissions::Permissions;
#[cfg(all(feature = "cache", feature = "model"))]
use crate::cache::Cache;
pub fn default_true() -> bool {
true
}
pub fn deserialize_emojis<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<HashMap<EmojiId, Emoji>, D::Error> {
let vec: Vec<Emoji> = Deserialize::deserialize(deserializer)?;
let mut emojis = HashMap::new();
for emoji in vec {
emojis.insert(emoji.id, emoji);
}
Ok(emojis)
}
pub fn serialize_emojis<S: Serializer>(
emojis: &HashMap<EmojiId, Emoji>,
serializer: S) -> StdResult<S::Ok, S::Error> {
let mut seq = serializer.serialize_seq(Some(emojis.len()))?;
for emoji in emojis.values() {
seq.serialize_element(emoji)?;
}
seq.end()
}
pub fn deserialize_guild_channels<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<HashMap<ChannelId, GuildChannel>, D::Error> {
let vec: Vec<GuildChannel> = Deserialize::deserialize(deserializer)?;
let mut map = HashMap::new();
for channel in vec {
map.insert(channel.id, channel);
}
Ok(map)
}
pub fn deserialize_members<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<HashMap<UserId, Member>, D::Error> {
let vec: Vec<Member> = Deserialize::deserialize(deserializer)?;
let mut members = HashMap::new();
for member in vec {
let user_id = member.user.id;
members.insert(user_id, member);
}
Ok(members)
}
pub fn deserialize_presences<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<HashMap<UserId, Presence>, D::Error> {
let vec: Vec<Presence> = Deserialize::deserialize(deserializer)?;
let mut presences = HashMap::new();
for presence in vec {
presences.insert(presence.user_id, presence);
}
Ok(presences)
}
pub fn serialize_presences<S: Serializer>(
presences: &HashMap<UserId, Presence>,
serializer: S
) -> StdResult<S::Ok, S::Error> {
let mut seq = serializer.serialize_seq(Some(presences.len()))?;
for presence in presences.values() {
seq.serialize_element(presence)?;
}
seq.end()
}
pub fn deserialize_private_channels<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<HashMap<ChannelId, Channel>, D::Error> {
let vec: Vec<Channel> = Deserialize::deserialize(deserializer)?;
let mut private_channels = HashMap::new();
for private_channel in vec {
let id = match private_channel {
Channel::Private(ref channel) => channel.id,
Channel::Guild(_) => unreachable!("Guild private channel decode"),
Channel::Category(_) => unreachable!("Channel category private channel decode"),
};
private_channels.insert(id, private_channel);
}
Ok(private_channels)
}
pub fn serialize_private_channels<S: Serializer>(
private_channels: &HashMap<ChannelId, Channel>,
serializer: S
) -> StdResult<S::Ok, S::Error> {
let mut seq = serializer.serialize_seq(Some(private_channels.len()))?;
for private_channel in private_channels.values() {
seq.serialize_element(private_channel)?;
}
seq.end()
}
pub fn deserialize_roles<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<HashMap<RoleId, Role>, D::Error> {
let vec: Vec<Role> = Deserialize::deserialize(deserializer)?;
let mut roles = HashMap::new();
for role in vec {
roles.insert(role.id, role);
}
Ok(roles)
}
pub fn serialize_roles<S: Serializer>(
roles: &HashMap<RoleId, Role>,
serializer: S
) -> StdResult<S::Ok, S::Error> {
let mut seq = serializer.serialize_seq(Some(roles.len()))?;
for role in roles.values() {
seq.serialize_element(role)?;
}
seq.end()
}
pub fn deserialize_single_recipient<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<User, D::Error> {
let mut users: Vec<User> = Deserialize::deserialize(deserializer)?;
let user = if users.is_empty() {
return Err(DeError::custom("Expected a single recipient"));
} else {
users.remove(0)
};
Ok(user)
}
pub fn serialize_single_recipient<S: Serializer>(
user: &User,
serializer: S,
) -> StdResult<S::Ok, S::Error> {
let mut seq = serializer.serialize_seq(Some(1))?;
seq.serialize_element(user)?;
seq.end()
}
pub fn deserialize_u16<'de, D: Deserializer<'de>>(deserializer: D) -> StdResult<u16, D::Error> {
deserializer.deserialize_any(U16Visitor)
}
pub fn deserialize_u64<'de, D: Deserializer<'de>>(deserializer: D) -> StdResult<u64, D::Error> {
deserializer.deserialize_any(U64Visitor)
}
#[allow(clippy::trivially_copy_pass_by_ref)]
pub fn serialize_u64<S: Serializer>(data: &u64, ser: S) -> StdResult<S::Ok, S::Error> {
ser.serialize_str(&data.to_string())
}
pub fn deserialize_voice_states<'de, D: Deserializer<'de>>(
deserializer: D)
-> StdResult<HashMap<UserId, VoiceState>, D::Error> {
let vec: Vec<VoiceState> = Deserialize::deserialize(deserializer)?;
let mut voice_states = HashMap::new();
for voice_state in vec {
voice_states.insert(voice_state.user_id, voice_state);
}
Ok(voice_states)
}
pub fn serialize_gen_map<K: Eq + Hash, S: Serializer, V: Serialize>(
map: &HashMap<K, V>,
serializer: S,
) -> StdResult<S::Ok, S::Error> {
let mut seq = serializer.serialize_seq(Some(map.len()))?;
for value in map.values() {
seq.serialize_element(&value)?;
}
seq.end()
}
#[cfg(all(feature = "cache", feature = "model"))]
pub async fn user_has_perms( | cache: impl AsRef<Cache>,
channel_id: ChannelId,
guild_id: Option<GuildId>,
mut permissions: Permissions
) -> Result<bool> {
let cache = cache.as_ref();
let channel = match cache.channel(channel_id).await {
Some(channel) => channel,
None => return Err(Error::Model(ModelError::ChannelNotFound)),
};
// Both users in DMs, all users in groups, and maybe all channels in categories
// will have the same permissions.
//
// The only exception to this is when the current user is blocked by
// the recipient in a DM channel, preventing the current user
// from sending messages.
//
// Since serenity can't _reasonably_ check and keep track of these,
// just assume that all permissions are granted and return `true`.
let (guild_id, guild_channel) = match channel {
Channel::Guild(channel) => (channel.guild_id, channel),
Channel::Category(_) => return Ok(true),
Channel::Private(_) => match guild_id {
Some(_) => return Err(Error::Model(ModelError::InvalidChannelType)),
None => return Ok(true),
}
};
let guild = match cache.guild(guild_id).await {
Some(guild) => guild,
None => return Err(Error::Model(ModelError::GuildNotFound)),
};
let member = match guild.members.get(&cache.current_user().await.id) {
Some(member) => member,
None => return Err(Error::Model(ModelError::MemberNotFound)),
};
let perms = guild.user_permissions_in(&guild_channel, member)?;
permissions.remove(perms);
Ok(permissions.is_empty())
}
macro_rules! num_visitors {
($($visitor:ident: $type:ty),*) => {
$(
#[derive(Debug)]
pub struct $visitor;
impl<'de> Visitor<'de> for $visitor {
type Value = $type;
fn expecting(&self, formatter: &mut Formatter<'_>) -> FmtResult {
formatter.write_str("identifier")
}
fn visit_str<E: DeError>(self, v: &str) -> StdResult<Self::Value, E> {
v.parse::<$type>().map_err(|_| {
let mut s = String::with_capacity(32);
s.push_str("Unknown ");
s.push_str(stringify!($type));
s.push_str(" value: ");
s.push_str(v);
DeError::custom(s)
})
}
fn visit_i64<E: DeError>(self, v: i64) -> StdResult<Self::Value, E> { Ok(v as $type) }
fn visit_u64<E: DeError>(self, v: u64) -> StdResult<Self::Value, E> { Ok(v as $type) }
// This is called when serde_json's `arbitrary_precision` feature is enabled.
fn visit_map<A: MapAccess<'de>>(self, mut map: A) -> StdResult<Self::Value, A::Error> {
struct Id {
num: $type,
}
struct StrVisitor;
impl<'de> Visitor<'de> for StrVisitor {
type Value = $type;
fn expecting(&self, formatter: &mut Formatter<'_>) -> FmtResult {
formatter.write_str("string")
}
fn visit_str<E: DeError>(self, s: &str) -> StdResult<Self::Value, E> { s.parse().map_err(E::custom) }
fn visit_string<E: DeError>(self, s: String) -> StdResult<Self::Value, E> { s.parse().map_err(E::custom) }
}
impl<'de> Deserialize<'de> for Id {
fn deserialize<D: Deserializer<'de>>(des: D) -> StdResult<Self, D::Error> {
Ok(Id { num: des.deserialize_str(StrVisitor)? })
}
}
map.next_value::<Id>().map(|id| id.num)
}
}
)*
}
}
num_visitors!(U16Visitor: u16, U64Visitor: u64); | |
index.js | const jwt = require('jsonwebtoken');
const defaultconfig = {
secret: 'abcdefghijklmnopqrstuvwxyz0123456789',
};
function PluginJWT(config={}) {
PluginJWT.config = {
...defaultconfig,
...config,
};
return build;
}
PluginJWT.getConfig = function getConfig(name) {
return !!name ? PluginJWT.config[name] : PluginJWT.config;
}
function build(req, res) {
const tokenRaw = req.headers['authorization'] || req.headers['Authorization'];
if (tokenRaw) {
const tokenSplitted = tokenRaw.split(' '); | req.user = jwt.verify(token, PluginJWT.getConfig('secret'))
}
return;
}
req.error = "not authorized";
};
module.exports = PluginJWT; | if (tokenSplitted.length < 2) {
req.error = "invalid token";
} else {
const token = tokenSplitted[1]; |
If.tsx | import React from 'react';
interface IProps {
condition: boolean; | }
function If({ children, condition, else: elseElement = null }: IProps): JSX.Element | null {
if (condition) {
return children;
}
return elseElement;
}
export default If; | children: React.ReactElement;
else?: React.ReactElement | null | undefined; |
unmarshal.go | package misc
import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/BurntSushi/toml"
"github.com/hashicorp/hcl/v2/hclsimple"
)
// UnmarshalHCL decodes the hcl file read from path
// then unmarshal it into the given interface
func UnmarshalHCL(path string, v interface{}) error {
var err error
var data []byte
if _, err := url.Parse(path); err != nil {
resp, err := http.Get(path)
if err != nil {
return fmt.Errorf("failed to load hcl from %s: %s", path, err)
}
defer resp.Body.Close()
if data, err = ioutil.ReadAll(resp.Body); err != nil {
return fmt.Errorf("failed to load hcl from %s: %s", path, err)
}
} else {
source, err := NewReadCloser(path)
if err != nil {
return err
}
defer source.Close()
data, err = ioutil.ReadAll(source)
if err != nil {
return err
}
}
err = hclsimple.Decode("source.hcl", data, nil, v)
if err != nil {
return fmt.Errorf("failed to decode hcl: %s: %s", path, err)
}
return nil
}
// UnmarshalTOML decodes the toml file read from path
// then unmarshal it into the given interface
func | (path string, v interface{}) error {
source, err := NewReadCloser(path)
if err != nil {
return err
}
defer source.Close()
_, err = toml.DecodeReader(source, v)
if err != nil {
return fmt.Errorf("failed to decode toml: %s: %s", path, err)
}
return nil
}
| UnmarshalTOML |
transform_mesh_w_4x4mat.py | ######################################################################## | import numpy
import vtk
from vtk_py import *
########################################################################
def transform_mesh_w_4x4mat(infilename, outfilename, matrix):
ugrid = vtk.vtkUnstructuredGrid()
if(infilename[len(infilename)-4:len(infilename)] == ".vtu"):
ugrid = readXMLUGrid(infilename)
elif(infilename[len(infilename)-4:len(infilename)] == ".vtk"):
ugrid = readUGrid(infilename)
rot_mat = vtk.vtkMatrix4x4()
rot_mat.DeepCopy(matrix)
transform = vtk.vtkTransform()
transform.SetMatrix(rot_mat)
transform.Update()
transformfilter = vtk.vtkTransformFilter()
if (vtk.vtkVersion.GetVTKMajorVersion() >= 6):
transformfilter.SetInputData(ugrid)
else:
transformfilter.SetInput(ugrid)
transformfilter.SetTransform(transform)
transformfilter.Update()
if(outfilename[len(outfilename)-4:len(outfilename)] == ".vtu"):
writeXMLUGrid(transformfilter.GetOutput(), outfilename)
elif(outfilename[len(outfilename)-4:len(outfilename)] == ".vtk"):
writeUGrid(transformfilter.GetOutput(), outfilename)
return transformfilter.GetOutput()
if (__name__ == "__main__"):
print len(sys.argv)
assert (len(sys.argv) == 19), 'Number of arguments must be 3.'
infilename = sys.argv[1]
outfilename = sys.argv[2]
matrix = map(float,sys.argv[3:19])
print matrix
transform_mesh_w_4x4mat(infilename, outfilename, matrix) |
import sys |
get.rs | use snafu::{OptionExt, ResultExt};
use std::path::Path;
mod merge_json;
use merge_json::merge_json;
/// Fetches the given prefixes from the API and merges them into a single Value. (It's not
/// expected that given prefixes would overlap, but if they do, later ones take precedence.)
pub async fn | <P>(socket_path: P, prefixes: Vec<String>) -> Result<serde_json::Value>
where
P: AsRef<Path>,
{
let mut results: Vec<serde_json::Value> = Vec::with_capacity(prefixes.len());
// Fetch all given prefixes into separate Values.
for prefix in prefixes {
let uri = format!("/?prefix={}", prefix);
let method = "GET";
let (_status, body) = crate::raw_request(&socket_path, &uri, method, None)
.await
.context(error::Request { uri, method })?;
let value = serde_json::from_str(&body).context(error::ResponseJson { body })?;
results.push(value);
}
// Merge results together.
results
.into_iter()
.reduce(|mut merge_into, merge_from| {
merge_json(&mut merge_into, merge_from);
merge_into
})
.context(error::NoPrefixes)
}
/// Fetches the given URI from the API and returns the result as an untyped Value.
pub async fn get_uri<P>(socket_path: P, uri: String) -> Result<serde_json::Value>
where
P: AsRef<Path>,
{
let method = "GET";
let (_status, body) = crate::raw_request(&socket_path, &uri, method, None)
.await
.context(error::Request { uri, method })?;
serde_json::from_str(&body).context(error::ResponseJson { body })
}
mod error {
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility = "pub(super)")]
pub enum Error {
#[snafu(display("Must give prefixes to query"))]
NoPrefixes,
#[snafu(display("Failed {} request to '{}': {}", method, uri, source))]
Request {
method: String,
uri: String,
source: crate::Error,
},
#[snafu(display("Response contained invalid JSON '{}' - {}", body, source))]
ResponseJson {
body: String,
source: serde_json::Error,
},
}
}
pub use error::Error;
pub type Result<T> = std::result::Result<T, error::Error>;
| get_prefixes |
passwordcheck.js | function arePasswordsEqual(original, newPassword) {
if (original == newPassword) {
return true;
} | else {
return false;
}
} |
|
testProject.meta.js | Bridge.assembly("TestProject", function ($asm, globals) {
"use strict";
var $m = Bridge.setMetadata,
$n = [System,Test.BridgeIssues.N1092,Test.BridgeIssues.N772,TestProject2,TestProject1];
$m($n[1].App, function () { return {"att":1048577,"a":2,"m":[{"a":2,"isSynthetic":true,"n":".ctor","t":1,"sn":"ctor"},{"a":2,"n":"Main1","is":true,"t":8,"sn":"Main1","rt":$n[0].Void}]}; }); | $m($n[4].TestClassA, function () { return {"att":1048576,"a":4,"m":[{"a":2,"isSynthetic":true,"n":".ctor","t":1,"sn":"ctor"},{"a":2,"n":"Value1","t":16,"rt":$n[0].Int32,"g":{"a":2,"n":"get_Value1","t":8,"rt":$n[0].Int32,"fg":"Value1"},"s":{"a":2,"n":"set_Value1","t":8,"p":[$n[0].Int32],"rt":$n[0].Void,"fs":"Value1"},"fn":"Value1"}]}; });
}); | $m($n[2].App, function () { return {"att":1048577,"a":2,"m":[{"a":2,"isSynthetic":true,"n":".ctor","t":1,"sn":"ctor"},{"a":2,"n":"Main1","is":true,"t":8,"sn":"Main1","rt":$n[0].Void}]}; });
$m($n[3].TestClassB, function () { return {"att":1048576,"a":4,"m":[{"a":2,"isSynthetic":true,"n":".ctor","t":1,"sn":"ctor"},{"a":2,"n":"Value1","t":16,"rt":$n[0].Int32,"g":{"a":2,"n":"get_Value1","t":8,"rt":$n[0].Int32,"fg":"Value1"},"s":{"a":2,"n":"set_Value1","t":8,"p":[$n[0].Int32],"rt":$n[0].Void,"fs":"Value1"},"fn":"Value1"}]}; }); |
26.remove-duplicates-from-sorted-array.py | class Solution(object):
def | (self, nums: List[int]) -> int:
i = 0
for j in range(len(nums)):
if (i == 0 or nums[i - 1] < nums[j]):
nums[i] = nums[j]
i += 1
return i
class Solution2:
def removeDuplicates(self, nums: List[int]) -> int:
slow = fast = 0
while fast < len(nums):
while fast + 1 < len(nums) and nums[fast] == nums[fast + 1]:
fast += 1
if nums[slow] < nums[fast]:
nums[slow], nums[fast] = nums[fast], nums[slow]
slow += 1
fast += 1
return slow | removeDuplicates |
Application.js | import React, { Component, PropTypes } from 'react'
import ApplicationLayout from 'template-ui/lib/components/layout2/ApplicationLayout'
import IconText from 'template-ui/lib/components/IconText'
import ListMenu from 'template-ui/lib/components/ListMenu'
import IconMenu from 'template-ui/lib/components/IconMenu'
import GoogleUserChip from 'template-ui/lib/components/widgets/GoogleUserChip'
import horizontal from 'template-ui/lib/components/theme/horizontal.css'
import apptheme from './theme/application.css'
class ApplicationComponent extends Component {
render() {
const mainMenu = (
<ListMenu
options={ this.props.menuOptions }
onClick={ this.props.onMenuClick }
/>
)
const appbarMenu = (
<div className={ horizontal.center }>
{
this.props.user ? ( | user={ this.props.user }
/>
) : null
}
<IconMenu
options={ this.props.dropdownOptions }
onClick={ this.props.onOptionClick }
/>
</div>
)
const applicationProps = {
...this.props,
menu: mainMenu,
appbar: appbarMenu,
}
return (
<ApplicationLayout {...applicationProps} />
)
}
}
export default ApplicationComponent
/*
<IconText
icon='person'
text={ this.props.username }
/>
*/ | <GoogleUserChip
whiteText |
index.html.ts | // metadata
export const version = "0.5.16"
export const title = "Interface"
export const description = "An example of interface in Solidity"
const html = `<p>Interface define contraints so that any contract that implements this can communicate with another contract that require these contraints.</p>
<p>Interface</p>
<ul>
<li>cannot have any functions implemented</li>
<li>cannot inherit from other contracts and interfaces</li>
<li>all declared functions must be external</li>
<li>cannot declare a constructor</li>
<li>cannot declare state variables</li>
</ul>
<pre><code class="language-solidity">pragma solidity ^0.5.16;
/* | enum FoodType { MEAT, PLANT, ANY }
struct Position {
uint x;
uint y;
}
event Talk(string message);
// All functions must be external
function move(uint x, uint y) external;
function eat(FoodType foodType) external returns (bool);
function talk(string calldata message) external;
}
contract Wolf is Animal {
Position position;
function talk(string calldata message) external {
emit Talk("Woof!");
emit Talk(message);
}
function move(uint x, uint y) external {
position.x = x;
position.y = y;
}
function eat(FoodType foodType) external returns (bool) {
return foodType == FoodType.MEAT;
}
}
contract Zoo {
function feed(address _animal, Animal.FoodType foodType) public returns (bool) {
Animal animal = Animal(_animal);
animal.talk("I'm hungry");
return animal.eat(foodType);
}
}</code></pre>
`
export default html | Here is an example where the Zoo contract calls a contract the implements
the Animal interface.
*/
interface Animal { |
app.js | const express = require('express');
const bodyParser = require('body-parser');
const api = require('./notifications.js');
| console.debug(...args);
}
}
const router = express.Router();
router.get('/', (req, res, next) => {
const msg = api.next();
if (!msg) {
debug('No new notifications');
return res.status(204).end();
}
res.json(msg);
});
router.post('/', bodyParser.json(), (req, res) => {
if (!req.body || !req.body.message) {
return res.status(400).end();
}
api.add({
title: req.body.title,
message: req.body.message,
date: new Date()
});
res.status(201).end();
});
router.use((req, res, next) => {
res.status(404).send('Not found');
});
router.use((err, req, res, next) => {
console.error(err);
res.status(500).send('Server error');
});
module.exports = router; | function debug (...args) {
if (process.env.DEBUG) { |
index.test.tsx | import { act } from "react-dom/test-utils";
import { mount } from "enzyme";
import fetchMock from "fetch-mock";
import { advanceTo, advanceBy, clear } from "jest-date-mock";
import { useInView } from "react-intersection-observer";
import toDiffableHtml from "diffable-html";
import { MockAlertGroup, MockAlert } from "__fixtures__/Alerts";
import {
EmptyHistoryResponse,
RainbowHistoryResponse,
FailedHistoryResponse,
} from "__fixtures__/AlertHistory";
import type {
APIAlertGroupT,
APIGridT,
HistoryResponseT,
LabelsT,
} from "Models/APITypes";
import { AlertHistory } from ".";
let group: APIAlertGroupT;
let grid: APIGridT;
const MockGroup = (groupName: string, sharedLabels: LabelsT = []) => {
const group = MockAlertGroup(
[
{ name: "alertname", value: "Fake Alert" },
{ name: "groupName", value: groupName },
],
[],
[],
sharedLabels,
{}
);
return group;
};
const MockAlerts = (alertCount: number) => {
for (let i = 1; i <= alertCount; i++) {
const alert = MockAlert(
[],
[{ name: "instance", value: `instance${i}` }],
"active"
);
const startsAt = new Date();
alert.startsAt = startsAt.toISOString();
for (let j = 0; j < alert.alertmanager.length; j++) {
alert.alertmanager[j].startsAt = startsAt.toISOString();
alert.alertmanager[j].source = "http://prometheus.example.com/graph";
}
group.alerts.push(alert);
}
};
beforeEach(() => {
jest.useFakeTimers();
advanceTo(new Date(Date.UTC(2000, 1, 1, 0, 0, 0)));
group = MockGroup("fakeGroup");
grid = {
labelName: "foo",
labelValue: "bar",
alertGroups: [],
totalGroups: 0,
stateCount: {
active: 0,
suppressed: 0,
unprocessed: 0,
},
};
});
afterEach(() => {
fetchMock.resetHistory();
fetchMock.reset();
clear();
});
describe("<AlertHistory />", () => {
it("send a correct payload with empty grid", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(EmptyHistoryResponse),
},
{
overwriteRoutes: true,
}
);
grid.labelName = "";
grid.labelValue = "";
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(fetchMock.calls()[0][1]?.body).toStrictEqual(
JSON.stringify({
sources: [
"https://secure.example.com/graph",
"http://plain.example.com/",
],
labels: { alertname: "Fake Alert", groupName: "fakeGroup" },
})
);
tree.unmount();
});
it("send a correct payload with non-empty grid", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(EmptyHistoryResponse),
},
{
overwriteRoutes: true,
}
);
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(fetchMock.calls()[0][1]?.body).toStrictEqual(
JSON.stringify({
sources: [
"https://secure.example.com/graph",
"http://plain.example.com/",
],
labels: { alertname: "Fake Alert", groupName: "fakeGroup", foo: "bar" },
})
);
tree.unmount();
});
it("send a correct payload with @cluster grid", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(EmptyHistoryResponse),
},
{
overwriteRoutes: true,
}
);
grid.labelName = "@cluster";
grid.labelValue = "prod";
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(fetchMock.calls()[0][1]?.body).toStrictEqual(
JSON.stringify({
sources: [
"https://secure.example.com/graph",
"http://plain.example.com/",
],
labels: { alertname: "Fake Alert", groupName: "fakeGroup" },
})
);
tree.unmount();
});
it("send a correct payload with shared labels", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(EmptyHistoryResponse),
},
{
overwriteRoutes: true,
}
);
MockAlerts(3);
group = MockGroup("fakeGroup", [
{ name: "shared1", value: "value1" },
{ name: "shared2", value: "value2" },
]);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(fetchMock.calls()[0][1]?.body).toStrictEqual(
JSON.stringify({
sources: [
"https://secure.example.com/graph",
"http://plain.example.com/",
],
labels: {
alertname: "Fake Alert",
groupName: "fakeGroup",
shared1: "value1",
shared2: "value2",
foo: "bar",
},
})
);
tree.unmount();
});
it("matches snapshot with empty response", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(EmptyHistoryResponse),
},
{
overwriteRoutes: true,
}
);
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(toDiffableHtml(tree.html())).toMatchSnapshot();
tree.unmount();
});
it("matches snapshot with rainbow response", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(RainbowHistoryResponse),
},
{
overwriteRoutes: true,
}
);
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(toDiffableHtml(tree.html())).toMatchSnapshot();
tree.unmount();
});
it("doesn't fetch when not in view", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(RainbowHistoryResponse),
},
{
overwriteRoutes: true,
}
);
(useInView as jest.MockedFunction<typeof useInView>).mockReturnValue([
jest.fn(),
false,
] as any);
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
tree.unmount();
expect(fetchMock.calls()).toHaveLength(0);
});
it("fetches an update after 300 seconds", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(RainbowHistoryResponse),
},
{
overwriteRoutes: true,
}
);
const inView = true;
(useInView as jest.MockedFunction<typeof useInView>).mockReturnValue([
jest.fn(),
inView,
] as any);
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
await act(async () => {
advanceBy(1000 * 299);
jest.advanceTimersByTime(1000 * 299);
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
await act(async () => {
advanceBy(1000 * 2);
jest.advanceTimersByTime(1000 * 2);
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(2);
tree.unmount();
});
it("handles reponses with errors", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(FailedHistoryResponse),
},
{
overwriteRoutes: true,
}
);
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(toDiffableHtml(tree.html())).toMatchSnapshot();
tree.unmount();
});
it("handles fetch errors", async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
status: 500,
body: "Error",
},
{
overwriteRoutes: true,
}
);
MockAlerts(3);
const tree = mount(<AlertHistory group={group} grid={grid}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
expect(fetchMock.calls()).toHaveLength(1);
expect(toDiffableHtml(tree.html())).toMatchSnapshot();
tree.unmount();
});
interface testCasesT {
title: string;
response: HistoryResponseT;
values: string[];
}
const testCases: testCasesT[] = [
{
title: "EmptyHistoryResponse",
response: EmptyHistoryResponse,
values: new Array(24).fill("inactive"),
},
{
title: "RainbowHistoryResponse",
response: RainbowHistoryResponse,
values: [
"inactive",
"firing firing-1",
"firing firing-2",
"firing firing-3",
"firing firing-4",
"firing firing-5",
"inactive",
"firing firing-1",
"firing firing-2",
"firing firing-3",
"firing firing-4",
"firing firing-5",
"inactive",
"firing firing-1",
"firing firing-2",
"firing firing-3",
"firing firing-4",
"firing firing-5",
"inactive",
"firing firing-1",
"firing firing-2",
"firing firing-3",
"firing firing-4",
"firing firing-5",
],
},
{
title: "FailedHistoryResponse",
response: FailedHistoryResponse,
values: ["error"],
},
{
title: "Single alert",
response: {
error: "",
samples: [
...Array(12).fill({ timestamp: "", value: 0 }),
{ timestamp: "", value: 1 },
...Array(11).fill({ timestamp: "", value: 0 }),
],
},
values: [
...new Array(12).fill("inactive"),
"firing firing-1",
...new Array(11).fill("inactive"),
],
},
{
title: "2 alerts in a single hour",
response: {
error: "",
samples: [
{ timestamp: "", value: 2 },
...Array(23).fill({ timestamp: "", value: 0 }),
],
},
values: ["firing firing-2", ...new Array(23).fill("inactive")],
},
{
title: "5 alerts in a single hour",
response: {
error: "",
samples: [
{ timestamp: "", value: 5 },
...Array(23).fill({ timestamp: "", value: 0 }),
],
},
values: ["firing firing-5", ...new Array(23).fill("inactive")],
},
{
title: "20 alerts in a single hour",
response: {
error: "",
samples: [
{ timestamp: "", value: 20 },
...Array(23).fill({ timestamp: "", value: 0 }),
],
},
values: ["firing firing-5", ...new Array(23).fill("inactive")],
},
];
for (const testCase of testCases) {
const g = MockGroup("fakeGroup");
for (let i = 1; i <= 5; i++) {
const alert = MockAlert(
[],
[{ name: "instance", value: `instance${i}` }],
"active"
);
const startsAt = new Date();
alert.startsAt = startsAt.toISOString();
alert.alertmanager.push(alert.alertmanager[0]);
for (let j = 0; j < alert.alertmanager.length; j++) {
alert.alertmanager[j].startsAt = startsAt.toISOString();
alert.alertmanager[j].source = "http://prometheus.example.com/graph";
}
g.alerts.push(alert);
}
const gr = {
labelName: "foo",
labelValue: "bar",
alertGroups: [],
totalGroups: 0,
stateCount: { | };
it(`${testCase.title}`, async () => {
fetchMock.resetHistory();
fetchMock.mock(
"*",
{
headers: { "Content-Type": "application/json" },
body: JSON.stringify(testCase.response),
},
{
overwriteRoutes: true,
}
);
const tree = mount(<AlertHistory group={g} grid={gr}></AlertHistory>);
await act(async () => {
await fetchMock.flush(true);
});
tree.update();
const rects = tree.find("rect").map((r) => r.props().className);
expect(rects).toStrictEqual(testCase.values);
tree.unmount();
});
}
}); | active: 0,
suppressed: 0,
unprocessed: 0,
}, |
latency_pipe.py | import sys
sys.path.append("../")
import json
import os
import argparse
import torch
import shutil
import numpy as np
num_loop = 1
#for i in range(num_loop):
# op = 'CUDA_VISIBLE_DEVICES=2 python ar_test.py -i 0 -em test -analyze -ns -write_time -beam_size 1'
# os.system(op)
'''
for bs in [1, 5, 6]:
op = 'CUDA_VISIBLE_DEVICES=0 python ar_test.py -i 1 -em test -analyze -write_time -beam_size %d'%bs
os.system(op)
for iteration in range(1, 8):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 1 -beam_alpha 1.35 -em test -nd -paradigm mp -print_latency -write_time -lbs 6 -s 100 ' + ' -i %d'%iteration
os.system(op)
for iteration in range(1, 8):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index -1 -beam_alpha 1.35 -em test -nd -paradigm mp -print_latency -write_time -lbs 6 ' + ' -i %d'%iteration
os.system(op)
#myset = [[5, 1], [5, 2], [5, 3], [5, 4], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]
myset = [[3, 4], [3, 5]]
for item in myset:
i, lbs = item
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 1 -beam_alpha 1.35 -em test -nd -paradigm mp -s 100 -print_latency -write_time' + ' -i %d'%i + ' -lbs %d'%lbs
os.system(op)
for q in range(4, 0, -1):
for iteration in range(2):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 1 -beam_alpha 1.35 -em test -nd -paradigm ef -s 100 -print_latency -write_time -lbs 6 ' + ' -i %d'%iteration + ' -q %d'%q
os.system(op)
'''
for bs in [1, 5, 6]:
op = 'CUDA_VISIBLE_DEVICES=0 python ar_test.py -i 0 -em test -analyze -write_time -beam_size %d'%bs
os.system(op)
for iteration in range(1, 8):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 0 -em test -nd -paradigm mp -print_latency -write_time -lbs 6 -s 100 ' + ' -i %d'%iteration
os.system(op)
for iteration in range(1, 8):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index -2 -em test -nd -paradigm mp -print_latency -write_time -lbs 6 ' + ' -i %d'%iteration
os.system(op)
#myset = [[5, 1], [5, 2], [5, 3], [5, 4], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]
myset = [[1,1], [1, 2],[1, 3], [1, 4],[1, 5]]
for item in myset:
i, lbs = item
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 0 -em test -nd -paradigm mp -s 100 -print_latency -write_time' + ' -i %d'%i + ' -lbs %d'%lbs
os.system(op)
for q in range(4, 0, -1):
for iteration in range(2):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 0 -em test -nd -paradigm ef -s 100 -print_latency -write_time -lbs 6 ' + ' -i %d'%iteration + ' -q %d'%q
os.system(op)
'''
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python ar_test.py -i 0 -em test -analyze -ns -write_time -beam_size 1'
os.system(op)
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python ar_test.py -i 0 -em test -analyze -ns -write_time'
os.system(op)
for iteration in range(1, 8):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py -ns --index 0 -em test -nd -paradigm mp -s 100 -print_latency -write_time' + ' -i %d'%iteration
os.system(op)
for iteration in range(1, 8):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py -ns --index -2 -em test -nd -paradigm mp -s 100 -print_latency -write_time' + ' -i %d'%iteration
os.system(op)
'''
'''
myset = [[5, 1], [5, 2], [5, 3], [5, 4], [5, 5], [5, 6], [4, 4], [3, 3], [2, 2], [1, 1]]
for item in myset:
i, lbs = item
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 0 -em test -nd -paradigm mp -s 100 -print_latency -write_time' + ' -i %d'%i + ' -lbs %d'%lbs
os.system(op)
for q in range(4, 0, -1):
for iteration in range(2):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=0 python test_nar.py --index 0 -em test -nd -paradigm ef -s 100 -print_latency -write_time' + ' -i %d'%iteration + ' -q %d'%q
os.system(op)
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python ar_test.py -i 1 -em test -analyze -ns -write_time -beam_size 1'
os.system(op)
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python ar_test.py -i 1 -em test -analyze -ns -write_time'
os.system(op)
for iteration in range(1, 7):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 0 -em test -nd -paradigm mp -s 100 -print_latency -write_time -ns' + ' -i %d'%iteration | for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 0 -em test -nd -paradigm mp -print_latency -write_time -ns' + ' -i %d'%iteration
os.system(op)
for iteration in range(5, 7):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 1 -beam_alpha 1.15 -em test -nd -paradigm mp -print_latency -write_time -ns' + ' -i %d'%iteration
os.system(op)
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 1 -beam_alpha 1.15 -em test -nd -paradigm ef -print_latency -write_time -ns'
os.system(op)
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 1 -beam_alpha 1.15 -em test -nd -paradigm ef -s 100 -print_latency -write_time -ns'
os.system(op)
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 1 -beam_alpha 1.15 -em test -nd -paradigm ef -s 100 -print_latency -write_time -ns -q 2'
os.system(op)
for iteration in range(1, 7):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 0 -em test -nd -paradigm mp -s 100 -print_latency -write_time -i 5 ' + ' -lbs %d'%iteration
if i > 0:
op += " -ns "
os.system(op)
for iteration in range(3, 7):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 1 -em test -nd -paradigm mp -beam_alpha 1.15 -s 100 -print_latency -write_time -i 5 ' + ' -lbs %d'%iteration
if i > 0:
op += " -ns "
os.system(op)
for iteration in range(1, 7):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index -1 -em test -nd -paradigm mp -beam_alpha 1.15 -print_latency -write_time -i 5 ' + ' -lbs %d'%iteration
if i > 0:
op += " -ns "
os.system(op)
for iteration in range(1, 7):
for i in range(num_loop):
op = 'CUDA_VISIBLE_DEVICES=3 python test_nar.py --index 0 -s 100 -em test -nd -paradigm mp -print_latency -write_time -lbs 5 ' + ' -i %d'%iteration
#if i > 0:
# op += " -ns "
os.system(op)
''' | os.system(op)
for iteration in range(5, 7): |
alarm.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.ceilometer import alarm
from heat.engine import support
COMMON_GNOCCHI_PROPERTIES = (
COMPARISON_OPERATOR, EVALUATION_PERIODS, GRANULARITY,
AGGREGATION_METHOD, THRESHOLD,
) = (
'comparison_operator', 'evaluation_periods', 'granularity',
'aggregation_method', 'threshold',
)
common_gnocchi_properties_schema = {
COMPARISON_OPERATOR: properties.Schema(
properties.Schema.STRING,
_('Operator used to compare specified statistic with threshold.'),
constraints=[
constraints.AllowedValues(['ge', 'gt', 'eq', 'ne', 'lt',
'le']),
],
update_allowed=True
),
EVALUATION_PERIODS: properties.Schema(
properties.Schema.INTEGER,
_('Number of periods to evaluate over.'),
update_allowed=True
),
AGGREGATION_METHOD: properties.Schema(
properties.Schema.STRING,
_('The aggregation method to compare to the threshold'),
constraints=[
constraints.AllowedValues(['mean', 'sum', 'last', 'max', 'min',
'std', 'median', 'first', 'count']),
],
update_allowed=True
),
GRANULARITY: properties.Schema(
properties.Schema.INTEGER,
_('The time range in seconds.'),
update_allowed=True
),
THRESHOLD: properties.Schema(
properties.Schema.NUMBER,
_('Threshold to evaluate against.'),
required=True,
update_allowed=True
),
}
class CeilometerGnocchiResourcesAlarm(alarm.BaseCeilometerAlarm):
"""A resource allowing for the watch of some specified resource.
An alarm that evaluates threshold based on some metric for the
specified resource.
"""
support_status = support.SupportStatus(version='2015.1')
PROPERTIES = (
METRIC, RESOURCE_ID, RESOURCE_TYPE
) = (
'metric', 'resource_id', 'resource_type'
)
PROPERTIES += COMMON_GNOCCHI_PROPERTIES
properties_schema = {
METRIC: properties.Schema(
properties.Schema.STRING,
_('Metric name watched by the alarm.'),
required=True,
update_allowed=True
),
RESOURCE_ID: properties.Schema(
properties.Schema.STRING,
_('Id of a resource'),
required=True,
update_allowed=True
),
RESOURCE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Resource type'),
required=True,
update_allowed=True
),
}
properties_schema.update(common_gnocchi_properties_schema)
properties_schema.update(alarm.common_properties_schema)
ceilometer_alarm_type = 'gnocchi_resources_threshold'
class CeilometerGnocchiAggregationByMetricsAlarm(
CeilometerGnocchiResourcesAlarm):
"""A resource that implements alarm with specified metrics.
A resource that implements alarm which allows to use specified by user
metrics in metrics list.
"""
support_status = support.SupportStatus(version='2015.1')
| PROPERTIES += COMMON_GNOCCHI_PROPERTIES
properties_schema = {
METRICS: properties.Schema(
properties.Schema.LIST,
_('A list of metric ids.'),
required=True,
update_allowed=True,
),
}
properties_schema.update(common_gnocchi_properties_schema)
properties_schema.update(alarm.common_properties_schema)
ceilometer_alarm_type = 'gnocchi_aggregation_by_metrics_threshold'
class CeilometerGnocchiAggregationByResourcesAlarm(
CeilometerGnocchiResourcesAlarm):
"""A resource that implements alarm as an aggregation of resources alarms.
A resource that implements alarm which uses aggregation of resources alarms
with some condition. If state of a system is satisfied alarm condition,
alarm is activated.
"""
support_status = support.SupportStatus(version='2015.1')
PROPERTIES = (
METRIC, QUERY, RESOURCE_TYPE
) = (
'metric', 'query', 'resource_type'
)
PROPERTIES += COMMON_GNOCCHI_PROPERTIES
properties_schema = {
METRIC: properties.Schema(
properties.Schema.STRING,
_('Metric name watched by the alarm.'),
required=True,
update_allowed=True
),
QUERY: properties.Schema(
properties.Schema.STRING,
_('The query to filter the metrics'),
required=True,
update_allowed=True
),
RESOURCE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Resource type'),
required=True,
update_allowed=True
),
}
properties_schema.update(common_gnocchi_properties_schema)
properties_schema.update(alarm.common_properties_schema)
ceilometer_alarm_type = 'gnocchi_aggregation_by_resources_threshold'
def resource_mapping():
return {
'OS::Ceilometer::GnocchiResourcesAlarm':
CeilometerGnocchiResourcesAlarm,
'OS::Ceilometer::GnocchiAggregationByMetricsAlarm':
CeilometerGnocchiAggregationByMetricsAlarm,
'OS::Ceilometer::GnocchiAggregationByResourcesAlarm':
CeilometerGnocchiAggregationByResourcesAlarm,
} | PROPERTIES = (METRICS,) = ('metrics',) |
tools.py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 21:50:58 2018
@author: USER
"""
# Codes are free to use. Do whatever you want
from __future__ import absolute_import
"""Read raw data"""
####################### LIBRARY #############################
# exceptions library
from exceptions import (Data_Format_Exception,
Data_Match_Exception)
# Python stdlib imports
import datetime
from math import factorial
# data processing library
import numpy as np
# pyrod library
####################### CONSTANT ############################
# constant
####################### FUNCTIONS ###########################
'.......................optimise.........................'
# f - fitting data
# y - experiment data
# mask - mask data
def R_square(f, y, mask):
if not len(f) == len(y) == len(mask):
raise Data_Match_Exception('Please input equal length')
def nplist(data):
# check and transform data
try:
# check np array
if isinstance(data, np.ndarray):
pass
# check list
elif isinstance(data, list):
rl = np.array(data)
# check np mat
elif isinstance(data, np.matrix):
rl = np.asarray(data).reshape(-1)
# for other unpoackable datatype
else:
# init a list first
l = []
# unpack raw data with for
for e in data:
l.append(e)
# trans to np array
rl = np.array(l)
# unknown type
except Data_Format_Exception:
print('unknown data type')
return rl
# tranform to np array; apply mask
rf, ry = nplist(f)*nplist(mask), nplist(y)*nplist(mask)
# calculate r square
ss_tot = np.sum((ry - np.sum(ry)/len(ry))**2)
ss_res = np.sum((ry - rf)**2)
r2 = 1 - ss_res/ss_tot
return r2
def opt_step_brute(func,x0_range,grid_size = 10,step = 2):
|
'......................smooth.........................'
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
"""
Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
# integer value
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order+1)
half_window = (window_size -1) // 2
# precompute coefficients
b = np.mat([[k**i for i in order_range] for k in range(-half_window, half_window+1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs( y[1:half_window+1][::-1] - y[0] )
lastvals = y[-1] + np.abs(y[-half_window-1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve( m[::-1], y, mode='valid')
######################## CLASSS #############################
| """
Brute method is much too slow and big.
However, its usefull and simple. To improve it, we try to step it
x0_range: range of variable, [x1-,x1+],[x2-,x2+]
currently,only two axes are avaialble
"""
# current step is 3
step = 3
# grid_size and step have to be integer
try:
grid_size = int(grid_size)
step = int(step)
except ValueError:
raise ValueError("grid_size and step have to be of type int")
# one dimensional step brute method
if len(x0_range) == 1:
# store func(grid_data) result
grid_list0 = []
x0 = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)
# func(grid_data)
for px in range(grid_size):
grid_list0.append(func(x0[px]))
# store min in step1
min_idx = np.argmin(grid_list0)
# continue step2
grid_list1 = []
x1 = x0[min_idx]
delta = (abs(x0_range[0][1] - x0_range[0][0]))/grid_size
x2 = np.linspace(x1-delta,x1+delta,grid_size)
for sx in range(grid_size):
grid_list1.append(func(x2[sx]))
min_step2 = x2[np.argmin(grid_list1)]
elif len(x0_range) == 2:
# step1: grid the x0_range
min_step1 = []
au = np.linspace(x0_range[0][0],x0_range[0][1],grid_size)
av = np.linspace(x0_range[1][0],x0_range[1][1],grid_size)
# find minimum in xu and xv grid
def grid_min(xu,xv):
x0_grid = np.meshgrid(xu, xv)
#grid list
grid_list = np.mat(np.zeros([grid_size**2,3]))
idx = 0
# pu-- for postion in u axes
for pu in range(grid_size):
# pv--for postion in v axes
for pv in range(grid_size):
grid_list[idx,0] = x0_grid[0][pu,pv]
grid_list[idx,1] = x0_grid[1][pu,pv]
grid_list[idx,2] = func([x0_grid[0][pu,pv],
x0_grid[1][pu,pv]])
idx = idx + 1
# find the minimum in step1
min_idx = np.argmin(grid_list[:,2])
return grid_list[min_idx,:]
# append the firt minimum before rocking
min_step1.append(grid_min(au,av))
# start rocking, try to avoid local minmum
bu = au - (au[1]-au[0])/2
bv = av - (av[1]-av[0])/2
min_step1.append(grid_min(bu,bv))
# step 2
# step 2 new x range
u_min = np.min([min_step1[0][0,0],
min_step1[1][0,0]])
u_max = np.max([min_step1[0][0,0],
min_step1[1][0,0]])
deta_u = u_max - u_min
v_min = np.min([min_step1[0][0,1],
min_step1[1][0,1]])
v_max = np.max([min_step1[0][0,1],
min_step1[1][0,1]])
deta_v = v_max - v_min
# new u and v
cu = np.linspace(u_min-deta_u, u_min+deta_u, grid_size)
cv = np.linspace(v_min-deta_v, v_min+deta_v, grid_size)
min_step2 = grid_min(cu,cv).tolist()
return min_step2 |
ts_protobuf.rs | // Copyright 2020 Contributors to the Parsec project.
// SPDX-License-Identifier: Apache-2.0
#![allow(
non_snake_case,
non_camel_case_types,
non_upper_case_globals,
clippy::unseparated_literal_suffix,
// There is an issue where long double become u128 in extern blocks. Check this issue:
// https://github.com/rust-lang/rust-bindgen/issues/1549
improper_ctypes,
missing_debug_implementations,
trivial_casts,
clippy::all,
unused,
unused_qualifications
)]
use zeroize::Zeroize;
include!(concat!(env!("OUT_DIR"), "/ts_crypto.rs"));
/// Trait for associating an Opcode with each operation type
/// and obtaining it in a generic way.
pub trait GetOpcode {
fn opcode(&self) -> Opcode;
}
macro_rules! opcode_impl {
($type:ty, $opcode:ident) => {
impl GetOpcode for $type {
fn opcode(&self) -> Opcode {
Opcode::$opcode
}
}
};
($type_in:ty, $type_out:ty, $opcode:ident) => {
impl GetOpcode for $type_in {
fn opcode(&self) -> Opcode {
Opcode::$opcode
}
}
impl GetOpcode for $type_out {
fn opcode(&self) -> Opcode {
Opcode::$opcode
}
}
};
}
opcode_impl!(OpenKeyIn, OpenKeyOut, OpenKey);
opcode_impl!(CloseKeyIn, CloseKey);
opcode_impl!(GenerateKeyIn, GenerateKeyOut, GenerateKey);
opcode_impl!(DestroyKeyIn, DestroyKeyOut, DestroyKey);
opcode_impl!(SignHashIn, SignHashOut, SignHash);
opcode_impl!(VerifyHashIn, VerifyHashOut, VerifyHash);
opcode_impl!(ImportKeyIn, ImportKeyOut, ImportKey);
opcode_impl!(ExportPublicKeyIn, ExportPublicKeyOut, ExportPublicKey);
/// Trait allowing the handle of opened-key-dependent operations
/// to be set in a generic way.
pub trait SetHandle {
fn set_handle(&mut self, handle: u32);
}
macro_rules! set_handle_impl {
($type:ty) => {
impl SetHandle for $type {
fn set_handle(&mut self, handle: u32) {
self.handle = handle;
}
}
};
}
set_handle_impl!(DestroyKeyIn);
set_handle_impl!(SignHashIn);
set_handle_impl!(VerifyHashIn);
set_handle_impl!(AsymmetricEncryptIn);
set_handle_impl!(AsymmetricDecryptIn);
set_handle_impl!(ExportPublicKeyIn);
impl Drop for ImportKeyIn {
fn drop(&mut self) |
}
impl Drop for SignHashIn {
fn drop(&mut self) {
self.hash.zeroize();
}
}
impl Drop for VerifyHashIn {
fn drop(&mut self) {
self.hash.zeroize();
self.signature.zeroize();
}
}
| {
self.data.zeroize();
} |
js-handle.js | function ajaxConfirmSave()
{
if(confirm( 'คุณต้องการบันทึก การแก้ไข&&ข้อเสนอแนะ ของความเสี่ยงนี้.' ))
{
//var id=$('#myForm input[name="js_id"]').val();
//$('#myForm input[name="_method"]').val('PATCH');
//$('#myForm').attr("action", '../../headrmreview/'+id);
//alert('WWW');
$('#myForm').submit();
return true;
}
else
{
return false;
}
}
function loaddatable()
{
var rows_selected = [];
var table =$('#example2').DataTable({
"dom": '<"top"flB>r | "bottom"ip><"clear">',
buttons: [
{ extend: 'copy', className: 'btn btn-primary' },
{ extend: 'excel', className: 'btn btn-primary' },
{
extend: 'pdfHtml5',
orientation: 'landscape',
pageSize: 'A3' ,
className: 'btn btn-primary'
},
{ extend: 'print', className: 'btn btn-primary' }
],
"scrollX": true,
"scrollY": true,
'columnDefs':
[
{
'targets': 0,
'searchable':true,
'orderable':true,
//'className': 'dt-body-center'
'className': 'text-center'
}
,
{
'targets': 1,
'searchable':true,
'orderable':true,
'className': 'text-left'
}
,
{
'targets': 2,
'searchable':true,
'orderable':true,
'className': 'text-left'
}
,
{
'targets': 3,
'searchable':true,
'orderable':true,
'className': 'text-left'
}
,
{
'targets': 4,
'searchable':true,
'orderable':true,
'className': 'text-left'
}
,
{
'targets': 4,
'searchable':false,
'orderable':true,
'className': 'text-left'
}
,
{
'targets': 5,
'searchable':false,
'orderable':false,
'className': 'text-left'
}
],
'rowCallback': function(row, data, dataIndex){
// Get row ID
var rowId = data[0];
// If row ID is in the list of selected row IDs
if($.inArray(rowId, rows_selected) !== -1){
$(row).find('input[type="checkbox"]').prop('checked', true);
$(row).addClass('selected');
}
},
"order": [[ 0, "desc" ]],
'lengthMenu': [[10, 25, 50,100,150,200,300,-1], [10, 25, 50,100,150,200,300, "All" ]],
'responsive': true ,
//paging: false,
//searching: false,
/*
"language": {
"url": "//cdn.datatables.net/plug-ins/9dcbecd42ad/i18n/German.json"
}
*/
"language": {
//"url": 'lang/th.json'
//"url": '../../js/lang/th.json'
"url": 'js/lang/th.json'
}
});
}
| t<"clear">< |
io_loop.rs | use crate::{
buffer::Buffer,
channels::Channels,
connection_status::ConnectionState,
frames::Frames,
heartbeat::Heartbeat,
internal_rpc::InternalRPC,
protocol::{self, AMQPError, AMQPHardError},
reactor::{ReactorBuilder, ReactorHandle, Slot},
socket_state::SocketState,
tcp::HandshakeResult,
thread::ThreadHandle,
Configuration, ConnectionStatus, Error, PromiseResolver, Result, TcpStream,
};
use amq_protocol::frame::{gen_frame, parse_frame, AMQPFrame, GenError};
use log::{debug, error, trace};
use std::{
collections::VecDeque,
convert::TryFrom,
io::{self, Write},
sync::Arc,
thread::Builder as ThreadBuilder,
time::Duration,
};
const FRAMES_STORAGE: usize = 32;
#[derive(Debug, PartialEq)]
enum Status {
Initial,
SocketConnected,
SocketWritable,
Connected,
Stop,
}
pub struct IoLoop {
connection_status: ConnectionStatus,
configuration: Configuration,
channels: Channels,
internal_rpc: InternalRPC,
frames: Frames,
heartbeat: Heartbeat,
socket_state: SocketState,
reactor: Box<dyn ReactorHandle + Send>,
reactor_thread_handle: ThreadHandle,
connection_io_loop_handle: ThreadHandle,
stream: TcpStream,
slot: Slot,
status: Status,
frame_size: usize,
receive_buffer: Buffer,
send_buffer: Buffer,
serialized_frames: VecDeque<(u64, Option<PromiseResolver<()>>)>,
}
impl IoLoop {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
connection_status: ConnectionStatus,
configuration: Configuration,
channels: Channels,
internal_rpc: InternalRPC,
frames: Frames,
socket_state: SocketState,
connection_io_loop_handle: ThreadHandle,
stream: HandshakeResult,
reactor_builder: &dyn ReactorBuilder,
) -> Result<Self> {
let mut stream = TcpStream::try_from(stream)?;
let heartbeat = Heartbeat::new(channels.clone());
let mut reactor = reactor_builder.build(heartbeat.clone())?;
let reactor_handle = reactor.handle();
let frame_size = std::cmp::max(
protocol::constants::FRAME_MIN_SIZE as usize,
configuration.frame_max() as usize,
);
let slot = reactor.register(stream.inner_mut(), socket_state.handle())?;
let reactor_thread_handle = reactor.start()?;
Ok(Self {
connection_status,
configuration,
channels,
internal_rpc,
frames,
heartbeat,
socket_state,
reactor: reactor_handle,
reactor_thread_handle,
connection_io_loop_handle,
stream,
slot,
status: Status::Initial,
frame_size,
receive_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size),
send_buffer: Buffer::with_capacity(FRAMES_STORAGE * frame_size),
serialized_frames: VecDeque::default(),
})
}
fn | (&mut self) -> Result<bool> {
match self.stream.inner().peer_addr() {
Ok(peer) => {
debug!("Connecting to {}", peer);
self.status = Status::SocketConnected;
Ok(true)
}
Err(err) => {
if let io::ErrorKind::NotConnected = err.kind() {
Ok(false)
} else {
Err(err.into())
}
}
}
}
fn ensure_writable(&mut self) -> Result<bool> {
match self.stream.inner().is_writable() {
Ok(()) => {
self.status = Status::SocketWritable;
Ok(true)
}
Err(err) => {
if let io::ErrorKind::NotConnected | io::ErrorKind::WouldBlock = err.kind() {
Ok(false)
} else {
Err(err.into())
}
}
}
}
fn finish_setup(&mut self) -> Result<bool> {
if self.connection_status.connected() {
let frame_max = self.configuration.frame_max() as usize;
self.frame_size = std::cmp::max(self.frame_size, frame_max);
self.receive_buffer.grow(FRAMES_STORAGE * self.frame_size);
self.send_buffer.grow(FRAMES_STORAGE * self.frame_size);
let heartbeat = self.configuration.heartbeat();
if heartbeat != 0 {
let heartbeat = Duration::from_millis(u64::from(heartbeat) * 500); // * 1000 (ms) / 2 (half the negotiated timeout)
self.heartbeat.set_timeout(heartbeat);
self.reactor.start_heartbeat();
}
self.status = Status::Connected;
}
Ok(true)
}
fn ensure_setup(&mut self) -> Result<bool> {
loop {
let success = match self.status {
Status::Initial => self.ensure_connected(),
Status::SocketConnected => self.ensure_writable(),
Status::SocketWritable => return self.finish_setup(),
Status::Connected => return Ok(true),
Status::Stop => return Ok(false),
}?;
if !success {
return Ok(false);
}
}
}
fn has_data(&self) -> bool {
self.frames.has_pending()
|| self.send_buffer.available_data() > 0
|| !self.serialized_frames.is_empty()
}
fn can_write(&mut self) -> bool {
self.socket_state.writable() && self.has_data() && !self.connection_status.blocked()
}
fn can_read(&mut self) -> bool {
self.socket_state.readable() && self.receive_buffer.available_space() > 0
}
fn can_parse(&self) -> bool {
self.receive_buffer.available_data() > 0
}
fn should_continue(&self) -> bool {
(self.status != Status::Connected
|| self.connection_status.connected()
|| self.connection_status.closing())
&& self.status != Status::Stop
&& !self.connection_status.errored()
}
pub fn start(mut self) -> Result<()> {
let waker = self.socket_state.handle();
self.connection_io_loop_handle.clone().register(
ThreadBuilder::new()
.name("lapin-io-loop".to_owned())
.spawn(move || {
while self.should_continue() {
if let Err(err) = self.run() {
self.critical_error(err)?;
}
}
self.heartbeat.cancel();
self.reactor.shutdown()?;
self.reactor_thread_handle.wait("reactor")?;
self.internal_rpc.stop_executor()
})?,
);
waker.wake();
Ok(())
}
fn poll_internal_rpc(&self) -> Result<()> {
self.internal_rpc.poll(&self.channels)
}
fn poll_socket_events(&mut self) -> Result<()> {
self.socket_state.poll_events();
if self.socket_state.error() {
self.critical_error(io::Error::from(io::ErrorKind::ConnectionAborted).into())?;
}
if self.socket_state.closed() {
self.critical_error(io::Error::from(io::ErrorKind::ConnectionReset).into())?;
}
self.poll_internal_rpc()
}
fn run(&mut self) -> Result<()> {
trace!("io_loop run");
self.poll_socket_events()?;
if !self.ensure_setup()? {
return Ok(());
}
trace!(
"io_loop do_run; can_read={}, can_write={}, has_data={}",
self.socket_state.readable(),
self.socket_state.writable(),
self.has_data()
);
if !self.can_read() && !self.can_write() {
self.socket_state.wait();
}
self.poll_socket_events()?;
if self.stream.is_handshaking() {
self.stream.handshake()?;
if self.stream.is_handshaking() {
// We hit WOULDBLOCK while handshaking, wait for the next socket event
return Ok(());
}
}
self.write()?;
if self.connection_status.closed() {
self.status = Status::Stop;
}
if self.should_continue() {
self.read()?;
}
self.handle_frames()?;
trace!(
"io_loop do_run done; can_read={}, can_write={}, has_data={}, status={:?}",
self.socket_state.readable(),
self.socket_state.writable(),
self.has_data(),
self.status
);
self.poll_internal_rpc()
}
fn critical_error(&mut self, error: Error) -> Result<()> {
if let Some(resolver) = self.connection_status.connection_resolver() {
resolver.swear(Err(error.clone()));
}
self.status = Status::Stop;
self.channels.set_connection_error(error.clone());
for (_, resolver) in std::mem::take(&mut self.serialized_frames) {
if let Some(resolver) = resolver {
resolver.swear(Err(error.clone()));
}
}
self.reactor.shutdown()?;
Err(error)
}
fn handle_read_result(&mut self, result: Result<()>) -> Result<()> {
if let Err(e) = self
.socket_state
.handle_read_result(result, &*self.reactor, self.slot)
{
error!("error reading: {:?}", e);
self.critical_error(e)?;
}
self.poll_internal_rpc()
}
fn handle_write_result(&mut self, result: Result<()>) -> Result<()> {
if let Err(e) = self
.socket_state
.handle_write_result(result, &*self.reactor, self.slot)
{
error!("error writing: {:?}", e);
self.critical_error(e)?;
}
self.poll_internal_rpc()
}
fn flush(&mut self) -> Result<()> {
self.stream.flush()?;
self.poll_internal_rpc()
}
fn write(&mut self) -> Result<()> {
if self.socket_state.writable() {
let res = self.flush();
self.handle_write_result(res)?;
}
while self.can_write() {
let res = self.write_to_stream();
self.handle_write_result(res)?;
}
self.poll_internal_rpc()
}
fn read(&mut self) -> Result<()> {
while self.can_read() {
let res = self.read_from_stream();
self.handle_read_result(res)?;
}
self.poll_internal_rpc()
}
fn write_to_stream(&mut self) -> Result<()> {
self.flush()?;
self.serialize()?;
let sz = self.send_buffer.write_to(&mut self.stream)?;
if sz > 0 {
self.heartbeat.update_last_write();
trace!("wrote {} bytes", sz);
self.send_buffer.consume(sz);
let mut written = sz as u64;
while written > 0 {
if let Some((to_write, resolver)) = self.serialized_frames.pop_front() {
if written < to_write {
self.serialized_frames
.push_front((to_write - written, resolver));
trace!("{} to write to complete this frame", to_write - written);
written = 0;
} else {
if let Some(resolver) = resolver {
resolver.swear(Ok(()));
}
written -= to_write;
}
} else {
error!(
"We've written {} but didn't expect to write anything",
written
);
break;
}
}
if self.send_buffer.available_data() > 0 {
// We didn't write all the data yet
trace!("Still {} to write", self.send_buffer.available_data());
}
self.flush()?;
} else {
error!("Socket was writable but we wrote 0, marking as wouldblock");
self.handle_write_result(Err(io::Error::from(io::ErrorKind::WouldBlock).into()))?;
}
self.poll_internal_rpc()
}
fn read_from_stream(&mut self) -> Result<()> {
match self.connection_status.state() {
ConnectionState::Closed => Ok(()),
ConnectionState::Error => Err(Error::InvalidConnectionState(ConnectionState::Error)),
_ => {
let sz = self.receive_buffer.read_from(&mut self.stream)?;
if sz > 0 {
trace!("read {} bytes", sz);
self.receive_buffer.fill(sz);
} else {
error!("Socket was readable but we read 0, marking as wouldblock");
self.handle_read_result(
Err(io::Error::from(io::ErrorKind::WouldBlock).into()),
)?;
}
self.poll_internal_rpc()
}
}
}
fn serialize(&mut self) -> Result<()> {
while let Some((next_msg, resolver)) = self.frames.pop(self.channels.flow()) {
trace!("will write to buffer: {}", next_msg);
let checkpoint = self.send_buffer.checkpoint();
let res = gen_frame(&next_msg)((&mut self.send_buffer).into());
match res.map(|w| w.into_inner().1) {
Ok(sz) => self.serialized_frames.push_back((sz, resolver)),
Err(e) => {
self.send_buffer.rollback(checkpoint);
match e {
GenError::BufferTooSmall(_) => {
// Requeue msg
self.frames.retry((next_msg, resolver));
break;
}
e => {
error!("error generating frame: {:?}", e);
self.critical_error(Error::SerialisationError(Arc::new(e)))?;
}
}
}
}
}
self.poll_internal_rpc()
}
fn handle_frames(&mut self) -> Result<()> {
while self.can_parse() {
if let Some(frame) = self.parse()? {
self.channels.handle_frame(frame)?;
} else {
break;
}
}
self.poll_internal_rpc()
}
fn parse(&mut self) -> Result<Option<AMQPFrame>> {
match parse_frame(self.receive_buffer.parsing_context()) {
Ok((i, f)) => {
let consumed = self.receive_buffer.offset(i);
let frame_max = self.configuration.frame_max() as usize;
if frame_max > 0 && consumed > frame_max {
error!("received large ({} bytes) frame", consumed);
let error = AMQPError::new(
AMQPHardError::FRAMEERROR.into(),
format!("frame too large: {} bytes", consumed).into(),
);
self.internal_rpc.handle().close_connection(
error.get_id(),
error.get_message().to_string(),
0,
0,
);
self.poll_internal_rpc()?;
self.critical_error(Error::ProtocolError(error))?;
}
self.receive_buffer.consume(consumed);
Ok(Some(f))
}
Err(e) => {
if !e.is_incomplete() {
error!("parse error: {:?}", e);
self.critical_error(Error::ParsingError(e))?;
}
Ok(None)
}
}
}
}
| ensure_connected |
test_rts.rs | // Copyright (C) 2017-2018 Baidu, Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Baidu, Inc., nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use sgx_types::*;
use std::vec::Vec;
use std::string::String;
use sgx_trts::trts::*;
use sgx_trts::veh::*;
use sgx_trts::ascii::AsciiExt;
use sgx_trts::c_str::*;
use sgx_trts::error;
use sgx_trts::memchr;
use sgx_trts::libc;
use sgx_trts::enclave::*;
use core::mem;
global_ctors_object! {
VARNAME, func_name = {()}
}
// veh
extern "C" fn sample_exception_handler(_ : *mut sgx_exception_info_t) -> uint32_t {
0
}
pub fn test_rsgx_get_thread_policy() {
assert_eq!(rsgx_get_thread_policy(), SgxThreadPolicy::Bound);
}
pub fn test_trts_sizes() {
//Only during dev
//assert_eq!(mem::size_of::<global_data_t>(), 1488);
//assert_eq!(mem::size_of::<thread_data_t>(), 160);
}
pub fn test_register_first_exception_handler() {
let handle = rsgx_register_exception_handler(1, sample_exception_handler);
assert!(handle.is_some());
assert_eq!(rsgx_unregister_exception_handler(handle.unwrap()), true);
}
pub fn test_register_last_exception_handler() {
let handle = rsgx_register_exception_handler(0, sample_exception_handler);
assert!(handle.is_some());
assert_eq!(rsgx_unregister_exception_handler(handle.unwrap()), true);
}
pub fn test_register_multiple_exception_handler() {
let mut handler_vec : Vec<exception_handle> = Vec::new();
let ntest:usize = 100;
for i in 0..ntest {
let handle = rsgx_register_exception_handler(i as uint32_t % 2,
sample_exception_handler);
assert!(handle.is_some());
handler_vec.push(handle.unwrap());
}
for i in 0..ntest {
let h = handler_vec[i];
assert_eq!(rsgx_unregister_exception_handler(h), true);
}
for i in 0..ntest {
let h = handler_vec[i];
assert_eq!(rsgx_unregister_exception_handler(h), false);
}
}
// trts
pub fn test_read_rand(){
let mut rand_arr = [0; 100];
assert_eq!(rsgx_read_rand(&mut rand_arr[..]), Ok(()));
let x = rand_arr.iter().fold(0, |sum, &x| sum ^ x);
// Cannot all be zero
assert_ne!(x, 0);
}
pub fn test_data_is_within_enclave() {
#[derive(Clone, Copy)]
struct SampleDs{
x : i32,
y : i32,
z : [i32;100],
};
unsafe impl marker::ContiguousMemory for SampleDs{};
let mut sample_object : SampleDs = SampleDs{ x : 0, y : 0, z : [0;100]};
sample_object.x = 100;
sample_object.y = 100;
sample_object.z[0] = 100;
assert_eq!(rsgx_data_is_within_enclave(&sample_object), true);
let ooo;
unsafe {
let ppp = 0xdeadbeafdeadbeaf as * const u8;
ooo = &*ppp;
}
assert_eq!(rsgx_data_is_within_enclave(ooo), false);
}
pub fn test_slice_is_within_enlave() {
let one_array = [0; 100];
assert_eq!(rsgx_slice_is_within_enclave(&one_array[..]), true);
// TODO: Not compiling
//let mut ooo : &[u8];
//unsafe {
// let ppp = 0xdeadbeafdeadbeaf as * const [u8];
// ooo = &*ppp;
//}
//assert_eq!(rsgx_slice_is_within_enclave(ooo), false);
}
pub fn test_raw_is_within_enclave(){
assert_eq!(rsgx_raw_is_within_enclave(test_raw_is_within_enclave as * const u8,
10),
true);
assert_eq!(rsgx_raw_is_within_enclave(0xdeadbeafdeadbeaf as * const u8,
10),
false);
}
pub fn test_data_is_outside_enclave() {
#[derive(Clone, Copy)]
struct SampleDs{
x : i32,
y : i32,
z : [i32;100],
};
unsafe impl marker::ContiguousMemory for SampleDs{};
let mut sample_object : SampleDs = SampleDs{ x : 0, y : 0, z : [0;100]};
sample_object.x = 100;
sample_object.y = 100;
sample_object.z[0] = 100;
assert_eq!(rsgx_data_is_outside_enclave(&sample_object), false);
let ooo;
unsafe {
let ppp = 0xdeadbeafdeadbeaf as * const u8;
ooo = &*ppp;
}
assert_eq!(rsgx_data_is_outside_enclave(ooo), true);
}
pub fn test_slice_is_outside_enclave() {
let one_array = [0; 100];
assert_eq!(rsgx_slice_is_outside_enclave(&one_array[..]), false);
// TODO: Not compiling
//let mut ooo : &[u8];
//unsafe {
// let ppp = 0xdeadbeafdeadbeaf as * const [u8];
// ooo = &*ppp;
//}
//assert_eq!(rsgx_slice_is_within_enclave(ooo), true);
}
pub fn test_raw_is_outside_enclave(){
assert_eq!(rsgx_raw_is_outside_enclave(test_raw_is_outside_enclave as * const u8,
10),
false);
assert_eq!(rsgx_raw_is_outside_enclave(0xdeadbeafdeadbeaf as * const u8,
10),
true);
}
// macros
pub fn test_global_ctors_object (){
assert_eq!(VARNAME(), ());
}
// oom
// I don't think we can test oom
// error
pub fn test_error() {
// XXX: Top 11 should be the same in all unix?
let errorinfo_vec:Vec<(i32,&'static str)> = vec![
(1 , "Operation not permitted"),
(2 , "No such file or directory"),
(3 , "No such process"),
(4 , "Interrupted system call"),
(5 , "Input/output error"),
(6 , "Device not configured"),
(7 , "Argument list too long"),
(8 , "Exec format error"),
(9 , "Bad file descriptor"),
(10, "No child processes"),
(11, "Resource deadlock avoided"),
];
for case in errorinfo_vec {
let mut buf:[i8;64] = [0;64];
error::set_errno(case.0);
unsafe { error::error_string(error::errno(),&mut buf[..]);}
let answer:Vec<u8> = buf.iter().map(|&x| x as u8).collect();
let ans_str = String::from_utf8(answer).unwrap();
assert_eq!(ans_str.trim_matches('\0'),
case.1);
}
}
// libc
pub fn | (){
let test_str = "abcdedfg";
assert_eq!(
unsafe {
libc::memchr(
test_str.as_ptr() as * const u8,
'd' as u8,
test_str.len())},
test_str[3..].as_ptr());
assert_eq!(
unsafe {
libc::memchr(
"abcdefg".as_ptr() as * const u8,
'z' as u8,
test_str.len())},
0 as * const u8);
}
pub fn test_rts_libc_memrchr(){
let test_str = "abcdedfg";
assert_eq!(
unsafe {
libc::memrchr(
test_str.as_ptr() as * const u8,
'd' as u8,
test_str.len())},
test_str[5..].as_ptr());
assert_eq!(
unsafe {
libc::memrchr(
"abcdefg".as_ptr() as * const u8,
'z' as u8,
test_str.len())},
0 as * const u8);
}
// memchr
pub fn test_rts_memchr_memchr(){
let test_str = "abcdedfg".as_bytes();
let needle = 'd' as u8;
assert_eq!(memchr::memchr(needle, test_str), Some(3));
let needle = 'z' as u8;
assert_eq!(memchr::memchr(needle, test_str), None);
}
pub fn test_rts_memchr_memrchr(){
let test_str = "abcdedfg".as_bytes();
let needle = 'd' as u8;
assert_eq!(memchr::memrchr(needle, test_str), Some(5));
let needle = 'z' as u8;
assert_eq!(memchr::memrchr(needle, test_str), None);
}
// ascii
pub fn test_ascii(){
assert_eq!("café".to_ascii_uppercase(), "CAFÉ");
assert_eq!("café".to_ascii_uppercase(), "CAFé");
let ascii = 'a';
let non_ascii = '❤';
let int_ascii = 97;
assert!(ascii.is_ascii());
assert!(!non_ascii.is_ascii());
assert!(int_ascii.is_ascii());
assert_eq!('A', ascii.to_ascii_uppercase());
assert_eq!('❤', non_ascii.to_ascii_uppercase());
assert_eq!(65, int_ascii.to_ascii_uppercase());
let ascii = 'A';
let non_ascii = '❤';
let int_ascii = 65;
assert_eq!('a', ascii.to_ascii_lowercase());
assert_eq!('❤', non_ascii.to_ascii_lowercase());
assert_eq!(97, int_ascii.to_ascii_lowercase());
let ascii1 = 'A';
let ascii2 = 'a';
let ascii3 = 'A';
let ascii4 = 'z';
assert!(ascii1.eq_ignore_ascii_case(&ascii2));
assert!(ascii1.eq_ignore_ascii_case(&ascii3));
assert!(!ascii1.eq_ignore_ascii_case(&ascii4));
let mut ascii = 'a';
ascii.make_ascii_uppercase();
assert_eq!('A', ascii);
let mut ascii = 'A';
ascii.make_ascii_lowercase();
assert_eq!('a', ascii);
}
// c_str
pub fn test_cstr(){
let c_string = CString::new("foo").unwrap();
let ptr = c_string.into_raw();
unsafe {
assert_eq!(b'f', *ptr as u8);
assert_eq!(b'o', *ptr.offset(1) as u8);
assert_eq!(b'o', *ptr.offset(2) as u8);
assert_eq!(b'\0', *ptr.offset(3) as u8);
// retake pointer to free memory
let _ = CString::from_raw(ptr);
}
let c_string = CString::new("foo").unwrap();
let bytes = c_string.into_bytes();
assert_eq!(bytes, vec![b'f', b'o', b'o']);
let c_string = CString::new("foo").unwrap();
let bytes = c_string.into_bytes_with_nul();
assert_eq!(bytes, vec![b'f', b'o', b'o', b'\0']);
let c_string = CString::new("foo").unwrap();
let bytes = c_string.as_bytes();
assert_eq!(bytes, &[b'f', b'o', b'o']);
let c_string = CString::new("foo").unwrap();
let bytes = c_string.as_bytes_with_nul();
assert_eq!(bytes, &[b'f', b'o', b'o', b'\0']);
let c_string = CString::new(b"foo".to_vec()).unwrap();
let c_str = c_string.as_c_str();
assert_eq!(c_str, CStr::from_bytes_with_nul(b"foo\0").unwrap());
let c_string = CString::new(b"foo".to_vec()).unwrap();
let boxed = c_string.into_boxed_c_str();
assert_eq!(&*boxed, CStr::from_bytes_with_nul(b"foo\0").unwrap());
let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
assert_eq!(c_str.to_bytes(), b"foo");
let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
assert_eq!(c_str.to_bytes_with_nul(), b"foo\0");
let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
assert_eq!(c_str.to_str(), Ok("foo"));
let c_str = CStr::from_bytes_with_nul(b"Hello World\0").unwrap();
assert_eq!(c_str.to_string_lossy(), Cow::Borrowed("Hello World"));
use std::borrow::Cow;
use std::ffi::CStr;
let c_str = CStr::from_bytes_with_nul(b"Hello \xF0\x90\x80World\0").unwrap();
assert_eq!(
c_str.to_string_lossy(),
Cow::Owned(String::from("Hello �World")) as Cow<str>
);
}
| test_rts_libc_memchr |
acf-taxonomy.js | (function($){
// taxonomy
acf.fields.taxonomy = acf.field.extend({
type: 'taxonomy',
$el: null,
actions: {
'ready': 'render',
'append': 'render',
'remove': 'remove'
},
events: {
'click a[data-name="add"]': 'add_term'
},
focus: function(){
// $el
this.$el = this.$field.find('.acf-taxonomy-field');
// get options
this.o = acf.get_data(this.$el, {
save: '',
type: '',
taxonomy: ''
});
// extra
this.o.key = this.$field.data('key');
},
render: function(){
// attempt select2
var $select = this.$field.find('select');
// bail early if no select field
if( !$select.exists() ) return;
// select2 options
var args = acf.get_data( $select );
// customize args
args = acf.parse_args(args, {
'pagination': true,
'ajax_action': 'acf/fields/taxonomy/query',
'key': this.o.key
});
// add select2
acf.select2.init( $select, args );
},
remove: function(){
// attempt select2
var $select = this.$field.find('select');
// validate ui
if( !$select.exists() ) return false;
// remove select2
acf.select2.destroy( $select );
},
add_term: function( e ){
// reference
var self = this;
// open popup
acf.open_popup({
title: e.$el.attr('title') || e.$el.data('title'),
loading: true,
height: 220
});
// AJAX data
var ajax_data = acf.prepare_for_ajax({
action: 'acf/fields/taxonomy/add_term',
field_key: this.o.key
});
// get HTML
$.ajax({
url: acf.get('ajaxurl'),
data: ajax_data,
type: 'post',
dataType: 'html',
success: function(html){
self.add_term_confirm( html );
}
});
},
add_term_confirm: function( html ){
// reference
var self = this;
// update popup
acf.update_popup({
content : html
});
// focus
$('#acf-popup input[name="term_name"]').focus();
// events
$('#acf-popup form').on('submit', function( e ){
// prevent default
e.preventDefault();
// submit
self.add_term_submit( $(this ));
});
},
add_term_submit: function( $form ){
// reference
var self = this;
// vars
var $submit = $form.find('.acf-submit'),
$name = $form.find('input[name="term_name"]'),
$parent = $form.find('select[name="term_parent"]');
// basic validation
if( $name.val() === '' ) {
$name.focus();
return false;
}
// show loading
$submit.find('button').attr('disabled', 'disabled');
$submit.find('.acf-spinner').addClass('is-active');
// vars
var ajax_data = acf.prepare_for_ajax({
action: 'acf/fields/taxonomy/add_term',
field_key: this.o.key,
term_name: $name.val(),
term_parent: $parent.exists() ? $parent.val() : 0
});
// save term
$.ajax({
url: acf.get('ajaxurl'),
data: ajax_data,
type: 'post',
dataType: 'json',
success: function( json ){
// vars
var message = acf.get_ajax_message(json);
// success
if( acf.is_ajax_success(json) ) {
// clear name
$name.val('');
// update term lists
self.append_new_term( json.data );
}
// message
if( message.text ) {
$submit.find('span').html( message.text );
}
},
complete: function(){
// reset button
$submit.find('button').removeAttr('disabled');
// hide loading
$submit.find('.acf-spinner').removeClass('is-active');
// remove message
$submit.find('span').delay(1500).fadeOut(250, function(){
$(this).html('');
$(this).show();
});
// focus
$name.focus();
}
});
},
append_new_term: function( term ){
// vars
var item = {
id: term.term_id,
text: term.term_label
};
// append to all taxonomy lists
$('.acf-taxonomy-field[data-taxonomy="' + this.o.taxonomy + '"]').each(function(){
// vars
var type = $(this).data('type');
// bail early if not checkbox/radio
if( type == 'radio' || type == 'checkbox' ) {
// allow
} else {
return;
}
// vars
var $hidden = $(this).children('input[type="hidden"]'),
$ul = $(this).find('ul:first'),
name = $hidden.attr('name');
// allow multiple selection
if( type == 'checkbox' ) {
name += '[]';
}
// create new li
var $li = $([
'<li data-id="' + term.term_id + '">',
'<label>',
'<input type="' + type + '" value="' + term.term_id + '" name="' + name + '" /> ',
'<span>' + term.term_label + '</span>',
'</label>',
'</li>'
].join(''));
// find parent
if( term.term_parent ) {
// vars
var $parent = $ul.find('li[data-id="' + term.term_parent + '"]');
// update vars
$ul = $parent.children('ul');
// create ul
if( !$ul.exists() ) {
$ul = $('<ul class="children acf-bl"></ul>');
$parent.append( $ul );
}
}
// append
$ul.append( $li );
});
// append to select
$('#acf-popup #term_parent').each(function(){
// vars
var $option = $('<option value="' + term.term_id + '">' + term.term_label + '</option>');
if( term.term_parent ) {
$(this).children('option[value="' + term.term_parent + '"]').after( $option );
} else {
$(this).append( $option );
}
});
// set value
switch( this.o.type ) {
// select
case 'select':
//this.$el.children('input').select2('data', item);
// vars
var $select = this.$el.children('select');
acf.select2.add_value($select, term.term_id, term.term_label);
break;
case 'multi_select':
/*
// vars
var $input = this.$el.children('input'),
value = $input.select2('data') || [];
// append
value.push( item ); |
// update
$input.select2('data', value);
*/
// vars
var $select = this.$el.children('select');
acf.select2.add_value($select, term.term_id, term.term_label);
break;
case 'checkbox':
case 'radio':
// scroll to view
var $holder = this.$el.find('.categorychecklist-holder'),
$li = $holder.find('li[data-id="' + term.term_id + '"]'),
offet = $holder.get(0).scrollTop + ( $li.offset().top - $holder.offset().top );
// check input
$li.find('input').prop('checked', true);
// scroll to bottom
$holder.animate({scrollTop: offet}, '250');
break;
}
}
});
})(jQuery); | |
organization.go | package ccv2
import (
"encoding/json"
"code.cloudfoundry.org/cli/api/cloudcontroller"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccerror"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccv2/internal"
)
// Organization represents a Cloud Controller Organization.
type Organization struct {
GUID string
Name string
QuotaDefinitionGUID string
DefaultIsolationSegmentGUID string
}
// UnmarshalJSON helps unmarshal a Cloud Controller Organization response.
func (org *Organization) UnmarshalJSON(data []byte) error {
var ccOrg struct {
Metadata internal.Metadata `json:"metadata"`
Entity struct {
Name string `json:"name"`
QuotaDefinitionGUID string `json:"quota_definition_guid"`
DefaultIsolationSegmentGUID string `json:"default_isolation_segment_guid"`
} `json:"entity"`
}
if err := json.Unmarshal(data, &ccOrg); err != nil {
return err
}
org.GUID = ccOrg.Metadata.GUID
org.Name = ccOrg.Entity.Name
org.QuotaDefinitionGUID = ccOrg.Entity.QuotaDefinitionGUID
org.DefaultIsolationSegmentGUID = ccOrg.Entity.DefaultIsolationSegmentGUID
return nil
}
//go:generate go run $GOPATH/src/code.cloudfoundry.org/cli/util/codegen/generate.go Organization codetemplates/delete_async_by_guid.go.template delete_organization.go
//go:generate go run $GOPATH/src/code.cloudfoundry.org/cli/util/codegen/generate.go Organization codetemplates/delete_async_by_guid_test.go.template delete_organization_test.go
// GetOrganization returns an Organization associated with the provided guid.
func (client *Client) GetOrganization(guid string) (Organization, Warnings, error) {
request, err := client.newHTTPRequest(requestOptions{
RequestName: internal.GetOrganizationRequest,
URIParams: Params{"organization_guid": guid},
})
if err != nil {
return Organization{}, nil, err
}
var org Organization
response := cloudcontroller.Response{
Result: &org,
}
err = client.connection.Make(request, &response)
return org, response.Warnings, err
}
// GetOrganizations returns back a list of Organizations based off of the
// provided queries.
func (client *Client) GetOrganizations(queries []Query) ([]Organization, Warnings, error) {
request, err := client.newHTTPRequest(requestOptions{
RequestName: internal.GetOrganizationsRequest,
Query: FormatQueryParameters(queries),
})
if err != nil {
return nil, nil, err
}
var fullOrgsList []Organization
warnings, err := client.paginate(request, Organization{}, func(item interface{}) error {
if org, ok := item.(Organization); ok | else {
return ccerror.UnknownObjectInListError{
Expected: Organization{},
Unexpected: item,
}
}
return nil
})
return fullOrgsList, warnings, err
}
| {
fullOrgsList = append(fullOrgsList, org)
} |
event_handler.py | import numpy as np
import pyqtgraph as pg
from qtpy import QtGui
from __code._utilities.parent import Parent
from __code.radial_profile.display import Display
class EventHandler(Parent):
def file_index_changed(self):
file_index = self.parent.ui.slider.value()
live_image = self.parent.get_selected_image(file_index)
_view = self.parent.ui.image_view.getView()
_view_box = _view.getViewBox()
_state = _view_box.getState()
first_update = False
if self.parent.histogram_level == []:
first_update = True
_histo_widget = self.parent.ui.image_view.getHistogramWidget()
self.parent.histogram_level = _histo_widget.getLevels()
_image = np.transpose(live_image)
self.parent.ui.image_view.setImage(_image)
self.parent.live_image = _image
_view_box.setState(_state)
if not first_update:
_histo_widget.setLevels(self.parent.histogram_level[0], self.parent.histogram_level[1])
def guide_color_changed(self):
red = self.parent.ui.guide_red_slider.value()
green = self.parent.ui.guide_green_slider.value()
blue = self.parent.ui.guide_blue_slider.value()
alpha = self.parent.ui.guide_alpha_slider.value()
self.parent.guide_color_slider['red'] = red
self.parent.guide_color_slider['green'] = green
self.parent.guide_color_slider['blue'] = blue
self.parent.guide_color_slider['alpha'] = alpha
self.circle_center_changed()
self.parent.ui.image_view.removeItem(self.parent.line_view_binning)
o_display = Display(parent=self.parent)
o_display.grid()
def circle_center_changed(self): | return
x0 = float(self.parent.ui.circle_x.text())
y0 = float(self.parent.ui.circle_y.text())
from_angle = np.float(str(self.parent.ui.sector_from_value.text()))
to_angle = np.float(str(self.parent.ui.sector_to_value.text()))
self.calculate_corners_angles()
self.update_angle_label_position()
[y1, x1] = self.calculate_sector_xy_position(angle=from_angle, x0=x0, y0=y0)
[y2, x2] = self.calculate_sector_xy_position(angle=to_angle, x0=x0, y0=y0)
pos = np.array([[x0, y0], [x1, y1], [x2, y2]])
adj = np.array([[0, 1], [1, 2], [2, 0]])
symbols = ['+', 'o', 'o']
lines = np.array([(255, 0, 0, 255, 2), (255, 0, 0, 0, 1), (255, 0, 0, 255, 2)],
dtype=[('red', np.ubyte), ('green', np.ubyte), ('blue', np.ubyte), ('alpha', np.ubyte),
('width', float)])
if self.parent.sector_g:
self.parent.ui.image_view.removeItem(self.parent.sector_g)
self.parent.sector_g = pg.GraphItem()
self.parent.ui.image_view.addItem(self.parent.sector_g)
self.parent.sector_g.setData(pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False)
def update_angle_label_position(self):
x0 = np.int(str(self.parent.ui.circle_x.text()))
y0 = np.int(str(self.parent.ui.circle_y.text()))
# add angle 0, 90, 180 and 270 labels
if self.parent.angle_0 is None:
self.parent.angle_0 = pg.TextItem(text=u'0\u00b0', anchor=(0, 1))
self.parent.angle_90 = pg.TextItem(text=u'90\u00b0', anchor=(0, 1))
self.parent.angle_180 = pg.TextItem(text=u'180\u00b0', anchor=(0, 0))
self.parent.angle_270 = pg.TextItem(text=u'270\u00b0', anchor=(1, 1))
self.parent.ui.image_view.addItem(self.parent.angle_0)
self.parent.ui.image_view.addItem(self.parent.angle_90)
self.parent.ui.image_view.addItem(self.parent.angle_180)
self.parent.ui.image_view.addItem(self.parent.angle_270)
self.parent.angle_0.setPos(np.int(x0), 0)
self.parent.angle_90.setPos(self.parent.height, y0)
self.parent.angle_180.setPos(x0, self.parent.width)
self.parent.angle_270.setPos(0, y0)
def calculate_sector_xy_position(self, angle=0, x0=0, y0=0):
x = np.NaN
y = np.NaN
angle_top_right = self.parent.corners['top_right']
angle_bottom_right = self.parent.corners['bottom_right']
angle_bottom_left = self.parent.corners['bottom_left']
angle_top_left = self.parent.corners['top_left']
# print("angle_top_right: {}".format(angle_top_right))
# print("angle_bottom_right: {}".format(angle_bottom_right))
# print("angle_bottom_left: {}".format(angle_bottom_left))
# print("angle_top_left: {}".format(angle_top_left))
if (angle_top_right <= angle) and \
(angle <= angle_bottom_right):
# right
# get x
x = self.parent.height
# get y
_angle = np.abs(90 - angle)
if angle == 90:
y = 0
else:
angle_rad = np.deg2rad(_angle)
y = np.tan(angle_rad) * (self.parent.height - x0)
if angle <= 90:
y = y0 - y
else:
y = y0 + y
elif angle_bottom_right < angle < angle_bottom_left:
# bottom
# get y
y = self.parent.width
# get x
_angle = np.abs(180 - angle)
if angle == 180:
x = 0
else:
angle_rad = np.deg2rad(_angle)
x = (y - y0) * np.tan(angle_rad)
if angle <= 180:
x = x0 + x
else:
x = x0 - x
elif angle_bottom_left <= angle <= angle_top_left:
# left
# get x
x = 0
# get y
_angle = np.abs(270 - angle)
if angle == 270:
y = 0
else:
angle_rad = np.deg2rad(_angle)
y = np.tan(angle_rad) * x0
if angle <= 270:
y = y0 + y
else:
y = y0 - y
else:
# top
# get y
y = 0
# get x
b_right_part = True
if angle > angle_top_left:
angle = np.abs(360 - angle)
b_right_part = False
if angle == 0:
x = 0
else:
angle_rad = np.deg2rad(angle)
x = y0 * np.tan(angle_rad)
if b_right_part:
x = x0 + x
else:
x = x0 - x
return [y, x]
def calculate_corners_angles(self):
'''top vertical being angle 0'''
x0 = float(str(self.parent.ui.circle_x.text()))
y0 = float(str(self.parent.ui.circle_y.text()))
width = self.parent.width
height = self.parent.height
# width = self.parent.height
# height = self.parent.width
theta_tr = np.NaN # angle top right
theta_br = np.NaN # bottom right
theta_bl = np.NaN # bottom left
theta_tl = np.NaN # top left
theta_tr = np.arctan((width - x0) / y0)
theta_tr_deg = np.rad2deg(theta_tr)
theta_br = np.pi - np.arctan((width - x0) / (height - y0))
theta_br_deg = np.rad2deg(theta_br)
theta_bl = np.pi + np.arctan(x0 / (height - y0))
theta_bl_deg = np.rad2deg(theta_bl)
theta_tl = 2 * np.pi - np.arctan(x0 / y0)
theta_tl_deg = np.rad2deg(theta_tl)
self.parent.corners['top_right'] = theta_tr_deg
self.parent.corners['bottom_right'] = theta_br_deg
self.parent.corners['bottom_left'] = theta_bl_deg
self.parent.corners['top_left'] = theta_tl_deg
def sector_radio_button_changed(self):
is_full_circle = self.parent.ui.sector_full_circle.isChecked()
if is_full_circle:
_status_sector = False
self.remove_angle_label()
else:
_status_sector = True
self.update_angle_label_position()
self.parent.ui.sector_from_label.setEnabled(_status_sector)
self.parent.ui.sector_from_value.setEnabled(_status_sector)
self.parent.ui.sector_from_units.setEnabled(_status_sector)
self.parent.ui.sector_to_label.setEnabled(_status_sector)
self.parent.ui.sector_to_value.setEnabled(_status_sector)
self.parent.ui.sector_to_units.setEnabled(_status_sector)
self.parent.ui.from_angle_slider.setEnabled(_status_sector)
self.parent.ui.to_angle_slider.setEnabled(_status_sector)
self.parent.sector_changed()
def remove_angle_label(self):
if self.parent.angle_0:
self.parent.ui.image_view.removeItem(self.parent.angle_0)
if self.parent.angle_90:
self.parent.ui.image_view.removeItem(self.parent.angle_90)
if self.parent.angle_180:
self.parent.ui.image_view.removeItem(self.parent.angle_180)
if self.parent.angle_270:
self.parent.ui.image_view.removeItem(self.parent.angle_270)
self.parent.angle_0 = None
self.parent.angle_90 = None
self.parent.angle_180 = None
self.parent.angle_270 = None
def update_max_radius_item(self):
is_max_radius_selected = self.parent.ui.max_radius_radioButton.isChecked()
self.max_radius_handler(is_max_radius_selected=is_max_radius_selected)
def max_radius_handler(self, is_max_radius_selected=None):
if self.parent.max_radius_item:
self.parent.ui.image_view.removeItem(self.parent.max_radius_item)
if is_max_radius_selected:
x0 = float(str(self.parent.ui.circle_x.text()))
y0 = float(str(self.parent.ui.circle_y.text()))
max_radius = self.parent.ui.max_radius_slider.value()
_pen = QtGui.QPen()
_pen.setColor(QtGui.QColor(0, 0, 255))
_pen.setWidth(0.4)
self.parent.max_radius_item = pg.CircleROI([x0 - max_radius, y0 - max_radius],
[2*max_radius, 2*max_radius],
movable=False,
resizable=False,
pen=_pen)
handles = self.parent.max_radius_item.getHandles()
self.parent.ui.image_view.addItem(self.parent.max_radius_item)
for _handle in handles:
self.parent.max_radius_item.removeHandle(_handle)
def retrieve_max_radius_possible(self):
x0 = float(str(self.parent.ui.circle_x.text()))
y0 = float(str(self.parent.ui.circle_y.text()))
width = self.parent.width
height = self.parent.height
def lenght_is(x=0, y=0):
return np.sqrt(x**2 + y**2)
# to top left distance
x = x0
y = y0
top_left = lenght_is(x=x, y=y)
# to top right distance
x = width - x0
y = y0
top_right = lenght_is(x=x, y=y)
# to bottom left corner
x = x0
y = height - y0
bottom_left = lenght_is(x=x, y=y)
# to bottom right corner
x = width - x0
y = height - y0
bottom_right = lenght_is(x=x, y=y)
max_distance = np.max([top_left, top_right, bottom_left, bottom_right])
return max_distance
def update_max_radius_value(self):
max_radius = self.retrieve_max_radius_possible()
current_radius_value = self.parent.ui.max_radius_slider.value()
if current_radius_value > max_radius:
self.parent.ui.max_radius_slider.setValue(max_radius)
self.parent.ui.max_radius_slider.setMaximum(max_radius) | if self.parent.ui.sector_full_circle.isChecked():
if self.parent.sector_g:
self.parent.ui.image_view.removeItem(self.parent.sector_g) |
error.rs | //! Error types
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
use solana_program::{
decode_error::DecodeError,
msg,
program_error::{PrintProgramError, ProgramError},
sanitize::SanitizeError,
};
use thiserror::Error;
/// Errors that may be returned by the Template program.
#[derive(Clone, Debug, Eq, Error, FromPrimitive, PartialEq)]
pub enum AudiusProgramError {
/// The owner of the input isn't set to the program address generated by the program.
#[error("Input account owner is not the program address")]
IncorrectOwner,
/// Signature with an already met principal
#[error("Signature with an already met principal")]
SignCollission,
/// Unexpected signer met
#[error("Unexpected signer met")]
WrongSigner,
/// Wrong sender account
#[error("Incorect sender account")]
IncorectSenderAccount,
/// Wrong manager account
#[error("Incorect account manager")]
IncorectManagerAccount,
/// Wrong reward manager key
#[error("Wrong reward manager key")]
WrongRewardManagerKey,
/// Wrong recipient Solana key
#[error("Wrong recipient Solana key")]
WrongRecipientKey,
/// Isn't enough signers keys
#[error("Isn't enough signers keys")]
NotEnoughSigners,
/// Secp256 instruction missing
#[error("Secp256 instruction missing")]
Secp256InstructionMissing,
/// Instruction load error
#[error("Instruction load error")]
InstructionLoadError,
/// Repeated senders
#[error("Repeated sender")]
RepeatedSenders,
/// Signature verification failed
#[error("Signature verification failed")]
SignatureVerificationFailed,
/// Some signers have same operators
#[error("Some signers have same operators")]
OperatorCollision,
/// Funds already sent
#[error("Funds already sent")]
AlreadySent,
/// Incorrect messages
#[error("Incorrect messages")]
IncorrectMessages,
/// Messages overflow
#[error("Messages overflow")]
MessagesOverflow,
/// Math overflow
#[error("Math overflow")]
MathOverflow,
}
impl From<AudiusProgramError> for ProgramError {
fn from(e: AudiusProgramError) -> Self {
ProgramError::Custom(e as u32)
}
}
impl<T> DecodeError<T> for AudiusProgramError {
fn type_of() -> &'static str {
"AudiusProgramError"
}
}
impl PrintProgramError for AudiusProgramError {
fn print<E>(&self)
where
E: 'static + std::error::Error + DecodeError<E> + PrintProgramError + FromPrimitive,
|
}
/// Convert SanitizeError to AudiusProgramError
pub fn to_audius_program_error(_e: SanitizeError) -> AudiusProgramError {
AudiusProgramError::InstructionLoadError
}
| {
msg!(&self.to_string())
} |
get_template.go | package infra
import (
"net/http"
"strings"
"github.com/porter-dev/porter/api/server/handlers"
"github.com/porter-dev/porter/api/server/shared"
"github.com/porter-dev/porter/api/server/shared/apierrors"
"github.com/porter-dev/porter/api/server/shared/config"
"github.com/porter-dev/porter/api/server/shared/requestutils"
"github.com/porter-dev/porter/api/types"
"github.com/porter-dev/porter/internal/templater/parser"
)
type InfraGetTemplateHandler struct {
handlers.PorterHandlerWriter
}
func NewInfraGetTemplateHandler(
config *config.Config,
writer shared.ResultWriter,
) *InfraGetTemplateHandler {
return &InfraGetTemplateHandler{
PorterHandlerWriter: handlers.NewDefaultPorterHandler(config, nil, writer),
}
}
func (c *InfraGetTemplateHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
_, reqErr := requestutils.GetURLParamString(r, types.URLParamTemplateVersion)
if reqErr != nil {
c.HandleAPIError(w, r, reqErr)
return
}
name, reqErr := requestutils.GetURLParamString(r, types.URLParamTemplateName)
if reqErr != nil {
c.HandleAPIError(w, r, reqErr)
return
}
nameLower := strings.ToLower(name)
formYAML, err := parser.FormYAMLFromBytes(&parser.ClientConfigDefault{}, getFormBytesFromKind(name), "declared", "infra")
if err != nil {
c.HandleAPIError(w, r, apierrors.NewErrInternal(err))
return
}
res := &types.InfraTemplate{
InfraTemplateMeta: templateMap[nameLower],
Form: formYAML,
} | }
func getFormBytesFromKind(kind string) []byte {
formBytes := []byte(testForm)
switch strings.ToLower(kind) {
case "ecr":
formBytes = []byte(ecrForm)
case "rds":
formBytes = []byte(rdsForm)
case "eks":
formBytes = []byte(eksForm)
case "gcr":
formBytes = []byte(gcrForm)
case "gke":
formBytes = []byte(gkeForm)
case "docr":
formBytes = []byte(docrForm)
case "doks":
formBytes = []byte(doksForm)
}
return formBytes
} |
c.WriteResult(w, r, res) |
user.go | package entity
import (
"html"
"strings"
"time"
"github.com/arjun/SampleAPI/infrastructure/security"
"github.com/badoux/checkmail"
)
type User struct {
ID uint64 `gorm:"primary_key;auto_increment" json:"id"`
FirstName string `gorm:"size:100;not null;" json:"first_name"` | UpdatedAt time.Time `gorm:"default:CURRENT_TIMESTAMP" json:"updated_at"`
DeletedAt *time.Time `json:"deleted_at,omitempty"`
}
type PublicUser struct {
ID uint64 `gorm:"primary_key;auto_increment" json:"id"`
FirstName string `gorm:"size:100;not null;" json:"first_name"`
LastName string `gorm:"size:100;not null;" json:"last_name"`
}
func (u *User) BeforeSave() error {
hashPassword, err := security.Hash(u.Password)
if err != nil {
return err
}
u.Password = string(hashPassword)
return nil
}
type Users []User
func (users Users) PublicUsers() []interface{} {
result := make([]interface{}, len(users))
for index, user := range users {
result[index] = user.PublicUser()
}
return result
}
func (u *User) PublicUser() interface{} {
return &PublicUser{
ID: u.ID,
FirstName: u.FirstName,
LastName: u.LastName,
}
}
func (u *User) Prepare() {
u.FirstName = html.EscapeString(strings.TrimSpace(u.FirstName))
u.LastName = html.EscapeString(strings.TrimSpace(u.LastName))
u.Email = html.EscapeString(strings.TrimSpace(u.Email))
u.CreatedAt = time.Now()
u.UpdatedAt = time.Now()
}
func (u *User) Validate(action string) map[string]string {
var errorMessages = make(map[string]string)
var err error
switch strings.ToLower(action) {
case "update":
if u.Email == "" {
errorMessages["email_required"] = "email required"
}
if u.Email != "" {
if err = checkmail.ValidateFormat(u.Email); err != nil {
errorMessages["invalid_email"] = "email email"
}
}
case "login":
if u.Password == "" {
errorMessages["password_required"] = "password is required"
}
if u.Email == "" {
errorMessages["email_required"] = "email is required"
}
if u.Email != "" {
if err = checkmail.ValidateFormat(u.Email); err != nil {
errorMessages["invalid_email"] = "please provide a valid email"
}
}
case "forgotpassword":
if u.Email == "" {
errorMessages["email_required"] = "email required"
}
if u.Email != "" {
if err = checkmail.ValidateFormat(u.Email); err != nil {
errorMessages["invalid_email"] = "please provide a valid email"
}
}
default:
if u.FirstName == "" {
errorMessages["firstname_required"] = "first name is required"
}
if u.LastName == "" {
errorMessages["lastname_required"] = "last name is required"
}
if u.Password == "" {
errorMessages["password_required"] = "password is required"
}
if u.Password != "" && len(u.Password) < 6 {
errorMessages["invalid_password"] = "password should be at least 6 characters"
}
if u.Email == "" {
errorMessages["email_required"] = "email is required"
}
if u.Email != "" {
if err = checkmail.ValidateFormat(u.Email); err != nil {
errorMessages["invalid_email"] = "please provide a valid email"
}
}
}
return errorMessages
} | LastName string `gorm:"size:100;not null;" json:"last_name"`
Email string `gorm:"size:100;not null;unique" json:"email"`
Password string `gorm:"size:100;not null;" json:"password"`
CreatedAt time.Time `gorm:"default:CURRENT_TIMESTAMP" json:"created_at"` |
localization_suite_test.go | package localization_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func | (t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Localization Suite")
}
| TestLocalization |
utils.py | """Utility functions shared across tasks."""
import numpy as np
import matplotlib as mpl
# For headless environments
mpl.use('Agg') # NOQA
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from rastervision.common.utils import plot_img_row
def | (x, model):
batch_x = np.expand_dims(x, axis=0)
batch_y = model.predict(batch_x)
y = np.squeeze(batch_y, axis=0)
return y
def make_prediction_img(x, target_size, predict):
"""Generate a prediction image one window at a time.
Generate a prediction image consisting of a prediction for each pixel. The
format of that prediction depends on the output of the predict function.
Passing a very large image as input to a model might
not be possible due to memory limitations. Instead, we slide a window over
the image and get the predictions for each window. The individual
predictions can be combined to create a large prediction image. By
overlapping the windows, we can discard inaccurate predictions along window
boundaries.
# Arguments
x: the full sized image to get a prediction for
(nb_rows, nb_cols, nb_channels)
target_size: the window size which needs to be the same as what the
model expects as input
predict: a function that takes a window image of size
target_size and returns the prediction for each pixel
# Returns
The prediction image
"""
quarter_target_size = target_size // 4
half_target_size = target_size // 2
sample_prediction = predict(x[0:target_size, 0:target_size, :])
nb_channels = sample_prediction.shape[2]
dtype = sample_prediction.dtype
pad_width = (
(quarter_target_size, target_size),
(quarter_target_size, target_size),
(0, 0))
pad_x = np.pad(x, pad_width, 'edge')
pad_y = np.zeros(
(pad_x.shape[0], pad_x.shape[1], nb_channels),
dtype=dtype)
def update_prediction_center(row_begin, row_end, col_begin, col_end):
"""Just update the center half of the window."""
x_window = pad_x[row_begin:row_end, col_begin:col_end, :]
y_window = predict(x_window)
y_window_center = y_window[
quarter_target_size:target_size - quarter_target_size,
quarter_target_size:target_size - quarter_target_size,
:]
pad_y[
row_begin + quarter_target_size:row_end - quarter_target_size,
col_begin + quarter_target_size:col_end - quarter_target_size,
:] = y_window_center
for row_begin in range(0, pad_x.shape[0], half_target_size):
for col_begin in range(0, pad_x.shape[1], half_target_size):
row_end = row_begin + target_size
col_end = col_begin + target_size
if row_end <= pad_x.shape[0] and col_end <= pad_x.shape[1]:
update_prediction_center(
row_begin, row_end, col_begin, col_end)
y = pad_y[quarter_target_size:quarter_target_size+x.shape[0],
quarter_target_size:quarter_target_size+x.shape[1],
:]
return y
def make_legend(label_keys, label_names):
patches = []
for label_key, label_name in zip(label_keys, label_names):
color = tuple(np.array(label_key) / 255.)
patch = mpatches.Patch(
facecolor=color, edgecolor='black', linewidth=0.5,
label=label_name)
patches.append(patch)
plt.legend(handles=patches, loc='upper left',
bbox_to_anchor=(1, 1), fontsize=4)
def plot_prediction(generator, all_x, y, pred,
file_path, is_debug=False):
dataset = generator.dataset
fig = plt.figure()
nb_subplot_cols = 3
if is_debug:
nb_subplot_cols += len(generator.active_input_inds)
grid_spec = mpl.gridspec.GridSpec(1, nb_subplot_cols)
all_x = generator.calibrate_image(all_x)
rgb_input_im = all_x[:, :, dataset.rgb_inds]
imgs = [rgb_input_im]
titles = ['RGB']
if is_debug:
ir_im = all_x[:, :, dataset.ir_ind]
imgs.append(ir_im)
titles.append('IR')
depth_im = all_x[:, :, dataset.depth_ind]
imgs.append(depth_im)
titles.append('Depth')
ndvi_im = all_x[:, :, dataset.ndvi_ind]
imgs.append(ndvi_im)
titles.append('NDVI')
imgs.append(y)
titles.append('Ground Truth')
imgs.append(pred)
titles.append('Prediction')
plot_img_row(fig, grid_spec, 0, imgs, titles)
make_legend(dataset.label_keys, dataset.label_names)
plt.savefig(file_path, bbox_inches='tight', format='png', dpi=300)
plt.close(fig)
| predict_x |
train_extractive.py | #!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.model_builder import ExtSummarizer
from models.trainer_ext import build_trainer
from others.logging import logger, init_logger
import pdb
model_flags = ['hidden_size', 'ff_size', 'heads', 'inter_layers', 'encoder', 'ff_actv', 'use_interval', 'rnn_size']
def train_multi_ext(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_single_ext(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_ext(args, device_id):
timestep = 0
FILE_PATH = 'model_step_*.pt'
#FILE_PATH = 'bertext_cnndm_transformer*.pt'
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:3]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_ext(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
print("will sleep 60", os.path.getsize(cp))
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = 0
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_ext(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, FILE_PATH)))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
return
else:
print("will sleep 300", cp_files)
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
trainer = build_trainer(args, device_id, model, None)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_ext(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter, step)
def train_ext(args, device_id):
if (args.world_size > 1):
train_multi_ext(args)
else:
train_single_ext(args, device_id)
def train_single_ext(args, device_id):
init_logger(args.log_file)
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id) | random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
def train_iter_fct():
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = ExtSummarizer(args, device, checkpoint)
optim = model_builder.build_optim(args, model, checkpoint)
logger.info(model)
trainer = build_trainer(args, device_id, model, optim)
trainer.train(train_iter_fct, args.train_steps) | torch.cuda.manual_seed(args.seed)
torch.manual_seed(args.seed) |
blockedunlockpickup.py | from gym_minigrid.minigrid import *
from gym_minigrid.roomgrid import RoomGrid
from gym_minigrid.register import register
class BlockedUnlockPickup(RoomGrid):
""" | in another room
"""
def __init__(self, seed=None):
room_size = 6
super().__init__(
num_rows=1,
num_cols=2,
room_size=room_size,
max_steps=16*room_size**2,
seed=seed
)
def _gen_grid(self, width, height):
super()._gen_grid(width, height)
# Add a box to the room on the right
obj, _ = self.add_object(1, 0, kind="box")
# Make sure the two rooms are directly connected by a locked door
door, pos = self.add_door(0, 0, 0, locked=True)
# Block the door with a ball
color = self._rand_color()
self.grid.set(pos[0]-1, pos[1], Ball(color))
# Add a key to unlock the door
self.add_object(0, 0, 'key', door.color)
self.place_agent(0, 0)
self.obj = obj
self.mission = "pick up the %s %s" % (obj.color, obj.type)
def step(self, action):
obs, reward, done, info = super().step(action)
if action == self.actions.pickup:
if self.agents[DEFAULT_AGENT_ID].carrying and self.agents[DEFAULT_AGENT_ID].carrying == self.obj:
reward = self._reward()
done = True
return obs, reward, done, info
register(
id='MiniGrid-BlockedUnlockPickup-v0',
entry_point='gym_minigrid.envs:BlockedUnlockPickup'
) | Unlock a door blocked by a ball, then pick up a box |
ios-thermometer.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
(global.vueIonicons = factory());
}(this, (function () { 'use strict';
var A_ROTATE = 'rotate';
var A_BEAT = 'beat';
var A_SHAKE = 'shake';
var IoniconsMixin = {
computed: {
ionClass: function ionClass() {
var addClass = '';
| addClass = 'ion-rotate ';
} else if (this.animate === A_BEAT) {
addClass = 'ion-beat ';
} else if (this.animate === A_SHAKE) {
addClass = 'ion-shake ';
}
return "".concat(this.rootClass, " ").concat(addClass);
}
},
props: {
title: {
type: String,
"default": ''
},
rootClass: {
type: String,
"default": ''
},
w: {
type: String,
"default": '1em'
},
h: {
type: String,
"default": '1em'
},
animate: {
type: String,
"default": ''
}
}
};
//
var script = {
name: "ios-thermometer-icon",
mixins: [IoniconsMixin],
data: function data() {
var iconTitle = this.title ? this.title : null;
return {
iconTitle: iconTitle
};
}
};
/* script */
const __vue_script__ = script;
/* template */
var __vue_render__ = function () {var _vm=this;var _h=_vm.$createElement;var _c=_vm._self._c||_h;return (_vm.iconTitle)?_c('div',{staticClass:"ion",class:_vm.ionClass,attrs:{"data-title":_vm.iconTitle,"data-name":"ios-thermometer-icon"}},[_c('svg',{staticClass:"ion__svg",attrs:{"width":_vm.w,"height":_vm.h,"viewBox":"0 0 512 512"}},[_c('path',{attrs:{"d":"M309.8 304.6c-4.3-3-6.9-7.9-6.9-13.1v-213c0-25.7-21-46.5-47-46.5s-47 20.8-47 46.5v213c0 5.2-2.6 10.2-6.9 13.1-25.2 17.3-42 46.4-42 79.3 0 53 43 96 96 96s96-43 96-96c0-32.9-17-62.1-42.2-79.3zM256.1 445c-32 0-58.1-26.3-58.1-58.8 0-25.4 15.4-47.1 37.9-55.3 3.2-1.2 5.4-4.1 5.4-7.5V180.2c0-8 6.5-14.5 14.5-14.5s14.5 6.5 14.5 14.5v143.2c0 3.4 2.1 6.3 5.3 7.5 21.9 8.2 38.4 29.9 38.4 55.2 0 32.5-25.8 58.9-57.9 58.9z"}})])]):_c('div',{staticClass:"ion",class:_vm.ionClass,attrs:{"name":"ios-thermometer-icon"}},[_c('svg',{staticClass:"ion__svg",attrs:{"width":_vm.w,"height":_vm.h,"viewBox":"0 0 512 512"}},[_c('path',{attrs:{"d":"M309.8 304.6c-4.3-3-6.9-7.9-6.9-13.1v-213c0-25.7-21-46.5-47-46.5s-47 20.8-47 46.5v213c0 5.2-2.6 10.2-6.9 13.1-25.2 17.3-42 46.4-42 79.3 0 53 43 96 96 96s96-43 96-96c0-32.9-17-62.1-42.2-79.3zM256.1 445c-32 0-58.1-26.3-58.1-58.8 0-25.4 15.4-47.1 37.9-55.3 3.2-1.2 5.4-4.1 5.4-7.5V180.2c0-8 6.5-14.5 14.5-14.5s14.5 6.5 14.5 14.5v143.2c0 3.4 2.1 6.3 5.3 7.5 21.9 8.2 38.4 29.9 38.4 55.2 0 32.5-25.8 58.9-57.9 58.9z"}})])])};
var __vue_staticRenderFns__ = [];
/* style */
const __vue_inject_styles__ = undefined;
/* scoped */
const __vue_scope_id__ = undefined;
/* module identifier */
const __vue_module_identifier__ = undefined;
/* functional template */
const __vue_is_functional_template__ = false;
/* component normalizer */
function __vue_normalize__(
template, style, script$$1,
scope, functional, moduleIdentifier,
createInjector, createInjectorSSR
) {
const component = (typeof script$$1 === 'function' ? script$$1.options : script$$1) || {};
// For security concerns, we use only base name in production mode.
component.__file = "ios-thermometer.vue";
if (!component.render) {
component.render = template.render;
component.staticRenderFns = template.staticRenderFns;
component._compiled = true;
if (functional) component.functional = true;
}
component._scopeId = scope;
return component
}
/* style inject */
/* style inject SSR */
var iosThermometer = __vue_normalize__(
{ render: __vue_render__, staticRenderFns: __vue_staticRenderFns__ },
__vue_inject_styles__,
__vue_script__,
__vue_scope_id__,
__vue_is_functional_template__,
__vue_module_identifier__,
undefined,
undefined
);
return iosThermometer;
}))); | if (this.animate === A_ROTATE) { |
include_boost_script.py | import operator
s1 = "#include <boost/"
lines1 = {}
with open('./boost_includes_1') as f:
lines=f.readlines()
for line in lines: | line1 = line[line.find('#'):line.find('\n')]
if lines1.has_key(line1):
lines1[line1] = lines1[line1] + 1
else:
lines1[line1]=1;
sorted_x = sorted(lines1.items(), key=operator.itemgetter(1))
for line in sorted_x:
print line
f.close()
'''
sed scripts to remove some boost deps:
make_shared:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::make_shared/std::make_shared/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/make_shared.hpp>/#include <memory>/g' {} +
shared_ptr:
find . -type f -not -path '*/\.*' -exec sed -i 's/std::shared_ptr/std::shared_ptr/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/shared_ptr.hpp>/#include <memory>/g' {} +
bind:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::bind/std::bind/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/bind.hpp>/#include <functional>/g' {} +
function:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::function/std::function/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/function.hpp>/#include <functional>/g' {} +
scoped_ptr
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::scoped_ptr/std::unique_ptr/g' {} +
find . -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/scoped_ptr.hpp>/#include <memory>/g' {} +
To remove:
<boost/math/special_functions/fpclassify.hpp>: marketmodel.cpp, matrices.cpp, simulatedannealing.cpp
change to <cmath>
change boost::math::isnan to std::isnan and boost::math::isinf to std::isinf
script:
find . -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::math::isnan/std::isnan/g' {} +
find . -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::math::isinf/std::isinf/g' {} +
find */marketmodel.cpp */matrices.cpp */*/*/simulatedannealing.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/#include <boost\/math\/special_functions\/fpclassify.hpp>/#include <cmath>/g' {} +
Other math/special_functions also similar:
gamma.hpp = not replacable - only gaama_q and gamma_q_inv in noarbsabr.cpp
atanh.hpp:
find . -type f -not -path '*/\.*' -exec sed -i 's/boost::math::atanh/std::atanh/g' {} +
find blackformula.cpp -type f -not -path '*/\.*' -exec sed -i 's/#include <boost\/math\/special_functions\/atanh.hpp>/#include <cmath>/g' {} +
erf.hpp
find */*/*/*/gaussian1dmodel.cpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::math::erf/std::erf/g' {} +
find */*/*/*/gaussian1dmodel.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/#include <boost\/math\/special_functions\/erf.hpp>/#include <cmath>/g' {} +
---
boost/atomic:
find observable.hpp observable.cpp singleton.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/boost::atomic/std::atomic/g' {} +
find observable.hpp observable.cpp singleton.hpp -type f -not -path '*/\.*' -not -path '*/extra' -exec sed -i 's/#include <boost\/math\/special_functions\/erf.hpp>/#include <cmath>/g' {} +
boost/random
not doing for lambda as non-polymorphic only maybe
not doing for thread at present
''' | if s1 in line: |
resnet.py | import torch.nn as nn
import torch
from torch.autograd import Variable
import math
import torch.utils.model_zoo as model_zoo
from commons.siam_mask.models.features import Features
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(Features):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# padding = (2 - stride) + (dilation // 2 - 1)
padding = 2 - stride
assert stride==1 or dilation==1, "stride and dilation must have one equals to zero at least"
if dilation > 1:
padding = dilation
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if out.size() != residual.size():
print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
class | (nn.Module):
def __init__(self, block, layers, layer4=False, layer3=False):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=0, # 3
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2) # 31x31, 15x15
self.feature_size = 128 * block.expansion
if layer3:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) # 15x15, 7x7
self.feature_size = (256 + 128) * block.expansion
else:
self.layer3 = lambda x:x # identity
if layer4:
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) # 7x7, 3x3
self.feature_size = 512 * block.expansion
else:
self.layer4 = lambda x:x # identity
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
dd = dilation
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1 and dilation == 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
else:
if dilation > 1:
dd = dilation // 2
padding = dd
else:
dd = 1
padding = 0
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=3, stride=stride, bias=False,
padding=padding, dilation=dd),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
# layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))
layers.append(block(self.inplanes, planes, stride, downsample, dilation=dd))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
p0 = self.relu(x)
x = self.maxpool(p0)
p1 = self.layer1(x)
p2 = self.layer2(p1)
p3 = self.layer3(p2)
return p0, p1, p2, p3
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
if __name__ == '__main__':
net = resnet50()
print(net)
net = net.cuda()
var = torch.FloatTensor(1,3,127,127).cuda()
var = Variable(var)
net(var)
print('*************')
var = torch.FloatTensor(1,3,255,255).cuda()
var = Variable(var)
net(var)
| ResNet |
blog.py | # ******************* BLOG MODULE ****************************** #
# ** Created by Yossep
# ** github: https://github.com/j2B237/
# ** Project : Joblogueur
# ** Description:
#
# Within this module we have many functions designed to help display posts
# Methods such as :
# display all posts
# display posts per category
# display individual post
# register email user for the newsletter
# ************************************************************************ #
# Third party import
from flask import Blueprint, render_template, flash, request, redirect, url_for
from flask_mail import Message
# Local import
from FlaskApp.models import Post, Category, Moderator, Comment
from FlaskApp.forms import CommentForm
from . import db, ext, mail
bp = Blueprint('blog', __name__)
# Fake data to seed the website view
fake_Category = [
{
'id': 1,
'category_name': "10 bonnes raisons",
'color': 'primary'
},
{
'id': 2,
'category_name': "Comment réussir ?",
'color': 'success',
},
{
'id': 3,
'category_name': "Offres et formations",
'color': 'warning'
}
]
fake_moderators = [
{ 'id': 1,
'username': 'admin',
'email': '[email protected]',
'password': 'admin237',
'address1': 'address1',
'address2': 'address2',
'city': 'city',
'state': 'state',
'country': 'country',
'zipcode': 'zipcode',
'is_admin': True,
'image_file': 'default.jpg',
'created_on': '21/02/2021',
'posts': []
}
]
fake_posts = [
{
'id': 1,
'title': 'Comment réussir à gagner de l\'argent sur internet',
'introduction': 'Qu\’ils soient aujourd\’hui milliardaires ou non, reconnus à l\’international ou en France.',
'p_intro': 'Ils ont tous commencer simplement. Pour toi modeste citoyen qui voudrait gagner de l\'argent pour arrondir tes fins du mois, nous avons sélectionner une liste de sites et bonnes astuces à essayer',
'h1': "",
'p_h1': "",
'h2': "",
'p_h2': "",
'h3': "",
'p_h3': "",
'h4': "",
'p_h4': "",
'h5': "",
'p_h5': "",
'conclusion': "",
'p_conclusion': "",
'date_posted': '10/02/2021',
'display_or_not': True,
'moderator_id': 1,
'category_id': 1,
'comments': [],
}
]
fake_comments = [
{
'id': 1,
'author_name': 'admin',
'email_author': '[email protected]',
'content': 'C\'est bon tout ca.',
'date_posted': '12/02/2021',
'approved_or_not': True,
'post_id': 1
}
]
# Create a sitemap
@ext.register_generator
def index():
yield 'index', {}
# Home blog view
@bp.route('/')
def index():
global fake_moderators, fake_comments, fake_posts, fake_Category
categories = Category.query.all()
moderators = Moderator.query.all()
posts_to_display = Post.query.all()
post_banner = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
last_post = Post.query.join(Category).filter(Category.category_name == "TUTORIELS").order_by(
Post.date_posted.desc()).first()
posts_for_cards = Post.query.filter_by(display_or_not=True).order_by(Post.date_posted.desc())[:4]
post_business = Post.query.join(Category).filter(Category.category_name == "BUSINESS").\
order_by(Post.date_posted.desc()).first()
post_formation = Post.query.join(Category).filter(Category.category_name == "FORMATIONS"). \
order_by(Post.date_posted.desc()).first()
post_tutoriel = Post.query.join(Category).filter(Category.category_name == "TUTORIELS"). \
order_by(Post.date_posted.desc()).first()
post_ressource = Post.query.join(Category).filter(Category.category_name == "RESSOURCES"). \
order_by(Post.date_posted.desc()).first()
image_posts = []
for post in posts_for_cards:
image = post.img_title
image_posts.append(image)
return render_template('blog/blog.html', title="Accueil - Joblogueur",
categories=categories, last_post=last_post,moderators=moderators,
images=image_posts, posts_to_display=posts_to_display,
post_banner=post_banner, post_business=post_business,
post_formation=post_formation, post_tutoriel=post_tutoriel, post_ressource=post_ressource)
# Display individual post
@bp.route('/publication/<post_title>', methods=['POST', 'GET'])
def post(post_ti | :
form = CommentForm()
titre = post_title.replace('-', ' ')
# Recherche la publication par son titre
post = Post.query.filter_by(title=titre).first()
moderators = Moderator.query.all()
# Recherche tous les commentaires liés à cette publication
comments_to_display = Comment.query.join(Post).filter(Comment.post_id == post.id).\
order_by(Comment.date_posted.desc()).all()
# Liste toutes les categories
categories = Category.query.all()
nbr_comments = 0
# Calcul le nbre de commentaires par publication
for comment in post.comments:
if comment.approved_or_not:
nbr_comments += 1
if form.validate_on_submit():
search_comments = Comment.query.filter_by(email_author=form.author_email.data).all()
ids = []
for comment in search_comments:
ids.append(comment.post_id)
if post.id in ids:
flash("Vous avez deja commenté cet article", "info")
# Création d'un commentaire
else:
new_comment = Comment(name_author=form.author.data, email_author=form.author_email.data,
content=form.content.data, post_id=post.id, approved_or_not=False)
db.session.add(new_comment)
db.session.commit()
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
flash("Votre commentaire est en cours de validation", "success")
return render_template('blog/blog_post.html', title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories, comments=comments_to_display,
titre=post_title)
form.author.data = ""
form.author_email.data = ""
form.content.data = ""
image_file = url_for('static', filename='upload/'+str(post.img_title))
return render_template("blog/blog_post.html", title=titre + " | Joblogueur", post=post, form=form,
nbr_comments=int(nbr_comments), categories=categories,
comments=comments_to_display, image=image_file, moderators=moderators,
titre=post_title)
# Display post per category
@bp.route('/publications/<category_name>')
def post_per_category(category_name):
page = request.args.get('page', 1, type=int)
search_category = category_name.replace('-', ' ')
categories = Category.query.all()
posts = Post.query.join(Category).filter(Category.category_name == search_category).\
order_by(Post.date_posted.desc()).paginate(per_page=7, page=page)
image_posts = []
for post in posts.items:
image = post.img_title
image_posts.append(image)
return render_template("blog/posts_per_category.html", title=search_category + " | Joblogueur", posts=posts,
categories=categories, search_category=search_category, images=image_posts)
# Register user for daily news
@bp.route('/newsletter-invitation', methods=['POST','GET'])
def newsletter_invitation():
categories = Category.query.all()
posts_per_category = []
for category in categories:
last_post = Post.query.join(Category).filter(Post.category_id == category.id).first()
posts_per_category.append(last_post)
if request.method == 'POST':
usermail = request.form['usermail']
content = """
Salut très cher(e),
Comment vas-tu ?
Il y'a du nouveau sur ton blog préféré www.digitalschools.sn/blog
Ci-dessous une liste des publications que tu as surement manqués:
1- https://3df5e7df0cdb.ngrok.io/blog/publication/10-raisons-pourquoi-toute-entreprise-doit-cr%C3%A9er-ou-avoir-un-site-Web
2- https://3df5e7df0cdb.ngrok.io/blog/publication/10-bonnes-raisons-d%27apprendre-%C3%A0-son-enfant-%C3%A0-coder
3- https://3df5e7df0cdb.ngrok.io/blog/publication/FLASK-1.0.0
Merci pour ton temps et ta perséverance dans la lecture quotidienne.
Youssouf BINYOUM (digitalschools.sn)
"""
msg = Message("Nouvelle publication sur digitalschools.sn/blog", recipients=[usermail],
sender='[email protected]')
msg.body = content
mail.send(msg)
print(request.args)
return redirect(url_for('blog.index'))
| tle) |
state.d.ts | export type Data = {
cpu: number,
temperature: number | } |
|
set.go | package twilio
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/grokify/gophonenumbers/common"
"github.com/grokify/mogo/io/ioutilmore"
"github.com/grokify/mogo/os/osutil"
"github.com/grokify/mogo/time/timeutil"
"github.com/grokify/mogo/type/stringsutil"
"github.com/rs/zerolog/log"
)
var MultiLimit = 0 // test limit to gracefully exit process early.
// MultiResults is designed to handle large volumes
// of requests.
type MultiResults struct {
CountsByStatusCode map[string]int
Responses map[string]*NumberInfo
}
func NewMultiResults() MultiResults {
return MultiResults{
CountsByStatusCode: map[string]int{},
Responses: map[string]*NumberInfo{}}
}
func (mr *MultiResults) Inflate() {
counts := map[string]int{}
for _, resp := range mr.Responses {
scStr := strconv.Itoa(resp.APIResponseInfo.StatusCode)
if _, ok := counts[scStr]; !ok {
counts[scStr] = 0
}
counts[scStr]++
}
counts["all"] = len(mr.Responses)
mr.CountsByStatusCode = counts
}
func (mr *MultiResults) AddResponses(resps map[string]*NumberInfo) {
for k, v := range resps {
existing, ok := mr.Responses[k]
if !ok ||
(existing.APIResponseInfo.StatusCode >= 300 && v.APIResponseInfo.StatusCode < 300) {
mr.Responses[k] = v
}
}
}
func (mr *MultiResults) GetNumberInfo(e164Number string) (*NumberInfo, error) {
e164Number = strings.TrimSpace(e164Number)
if ni, ok := mr.Responses[e164Number]; ok {
return ni, nil
}
return nil, fmt.Errorf("number [%s] not found", e164Number)
}
func (mr *MultiResults) NumbersSuccess() []string {
numbers := []string{}
for _, resp := range mr.Responses {
pn := strings.TrimSpace(resp.PhoneNumber)
if len(pn) > 0 {
numbers = append(numbers, pn)
}
}
return numbers
}
func (mr *MultiResults) Write(filename string) error {
mr.Inflate()
bytes, err := json.MarshalIndent(mr, "", " ")
if err != nil {
return err
}
return ioutil.WriteFile(filename, bytes, 0600)
}
func | (client *Client, requestNumbers, skipNumbers []string, filenameBase string, logAt, fileAt uint) MultiResults {
uniquesRequests := stringsutil.SliceCondenseSpace(requestNumbers, true, true)
uniqueSkips := stringsutil.SliceCondenseSpace(skipNumbers, true, true)
uniqueSkipsMap := map[string]int{}
for _, pnSkip := range uniqueSkips {
uniqueSkipsMap[pnSkip] = 1
}
resps := NewMultiResults()
count := len(uniquesRequests)
i := 0
for _, e164Number := range uniquesRequests {
i++
if _, ok := uniqueSkipsMap[e164Number]; ok {
continue
}
validate, _ := client.Validate(
e164Number, &Params{Type: "carrier"})
resps.Responses[e164Number] = &validate
if logAt > 0 && i%int(logAt) == 0 {
/*apiStatus := "S"
if validate.ApiResponseInfo.StatusCode >= 300 {
apiStatus = "F"
}*/
log.Info().
Int("num", i).
Int("count", count).
Str("e164number", e164Number).
Int("httpStatus", validate.APIResponseInfo.StatusCode).
Msg("logAt")
}
if fileAt > 0 && i%int(fileAt) == 0 && len(resps.Responses) > 0 {
err := resps.Write(common.BuildFilename(filenameBase, i, count))
if err != nil {
log.Error().Err(err)
}
resps = NewMultiResults()
}
if MultiLimit > 0 && i > MultiLimit {
break
}
}
if len(resps.Responses) > 0 {
err := resps.Write(common.BuildFilename(filenameBase, i, count))
if err != nil {
log.Error().Err(err)
}
}
return resps
}
func NewMultiResultsFiles(dir string, rxPattern string) (MultiResults, error) {
dir = strings.TrimSpace(dir)
if len(dir) == 0 {
dir = "."
}
all := NewMultiResults()
rx, err := regexp.Compile(rxPattern)
if err != nil {
return all, err
}
files, err := osutil.ReadDirMore(dir, rx, false, true, false)
if err != nil {
return all, err
}
for _, entry := range files {
mResults := NewMultiResults()
err := ioutilmore.ReadFileJSON(
filepath.Join(dir, entry.Name()), &mResults)
if err != nil {
return all, err
}
fi, err := entry.Info()
if err != nil {
return all, err
}
fileModTime := fi.ModTime()
for key, ni := range mResults.Responses {
if timeutil.TimeIsZeroAny(ni.APIResponseInfo.Time) {
ni.APIResponseInfo.Time = fileModTime
mResults.Responses[key] = ni
}
}
all.AddResponses(mResults.Responses)
}
return all, nil
}
| GetWriteValidationMulti |
memory.go | package storage
import (
"github.com/xwb1989/shortener/storage/encoder"
)
type memMap struct {
table map[uint64]string
encoder encoder.Encoder
}
func (m *memMap) Read(key string) (string, error) {
k, err := m.encoder.StringToKey(key)
if err != nil {
return "", err
}
if val, ok := m.table[k]; ok {
return val, nil
} else {
return val, InvalidKeyError(key)
}
}
func (m *memMap) Write(url string) (string, error) {
key := m.encoder.Encode(url)
m.table[key] = url
return m.encoder.KeyToString(key), nil
}
func NewMemMap(encoder encoder.Encoder) Storage | {
return &memMap{table: map[uint64]string{}, encoder: encoder}
} |
|
console_test.go | package console_test
import (
"fmt"
"isso0424/dise/handler/console"
"testing"
)
const shouldErrorOccur = "should error occur with %s"
func TestConsoleSend(t *testing.T) {
session := console.New()
err := session.Send("hoge", "fuga")
if err != nil {
t.Fatal(err.Error())
}
}
func | (t *testing.T) {
session := console.New()
err := session.Send("", "message")
if err == nil {
t.Fatal(fmt.Sprintf(shouldErrorOccur, "empty channel ID"))
}
err = session.Send("channelID", "")
if err == nil {
t.Fatal(fmt.Sprintf(shouldErrorOccur, "empty message"))
}
err = session.Send("channelID", createTooLongMessage())
if err == nil {
t.Fatal(fmt.Sprintf(shouldErrorOccur, "too long message"))
}
}
func createTooLongMessage() (message string) {
template := "hoge"
for i := 0; i < 510; i++ {
message += template
}
return
}
| TestConsoleSendFail |
test_util.py | import io
import subprocess
import sys
import traceback
global cl
class Colorizer(object):
RED = "\033[31;1m"
GREEN = "\033[32;1m"
YELLOW = "\033[33;1m"
CYAN = "\033[36;1m"
RESET = "\033[0m"
NEWLINE = "\n"
@classmethod
def _colorize(cls, string, color):
|
@classmethod
def red(cls, string):
return cls._colorize(string, "RED")
@classmethod
def green(cls, string):
return cls._colorize(string, "GREEN")
@classmethod
def yellow(cls, string):
return cls._colorize(string, "YELLOW")
@classmethod
def cyan(cls, string):
return cls._colorize(string, "CYAN")
class FakeStdout(io.StringIO):
"""Fake class to mimic stdout. We can't just use io.StringIO because we need
to fake the ability to write binary files to sys.stdout.buffer (thus this
class has a "buffer" attribute that behaves the same way).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = io.BytesIO()
def getvalue(self):
"""
If self.buffer has a non-unicode value, return that value.
Otherwise, decode the self.buffer value and append it
to self.getvalue().
This is because this function is mimicking the behavior of `sys.stdout`.
`sys.stdout` can be read as either a string or bytes.
When a string is written to `sys.stdout`, it returns a string when doing `getvalue()`.
When bytes are written to `sys.stdout` (by writing to `sys.stdout.buffer`),
it returns bytes when doing `getvalue()`. The reason we need to account for this
case is that there are tests in which a binary file is uploaded, then it is
printed out (by writing to `sys.stdout.buffer`), and then the test reads what's
printed out and makes sure it matches the original file.
"""
try:
buffer_value = self.buffer.getvalue().decode()
except UnicodeDecodeError:
return self.buffer.getvalue()
return super().getvalue() + buffer_value
def run_command(
args,
expected_exit_code=0,
max_output_chars=1024,
env=None,
include_stderr=False,
binary=False,
force_subprocess=False,
cwd=None,
):
# We import the following imports here because codalab_service.py imports TestModule from
# this file. If we kept the imports at the top, then anyone who ran codalab_service.py
# would also have to install all the dependencies that BundleCLI and CodaLabManager use.
from codalab.lib.bundle_cli import BundleCLI
from codalab.lib.codalab_manager import CodaLabManager
def sanitize(string, max_chars=256):
# Sanitize and truncate output so it can be printed on the command line.
# Don't print out binary.
if isinstance(string, bytes):
string = '<binary>'
if len(string) > max_chars:
string = string[:max_chars] + ' (...more...)'
return string
# If we don't care about the exit code, set `expected_exit_code` to None.
print(">>", *map(str, args), sep=" ")
sys.stdout.flush()
try:
kwargs = dict(env=env)
if not binary:
kwargs = dict(kwargs, encoding="utf-8")
if include_stderr:
kwargs = dict(kwargs, stderr=subprocess.STDOUT)
if cwd:
kwargs = dict(kwargs, cwd=cwd)
if not force_subprocess:
# In this case, run the Codalab CLI directly, which is much faster
# than opening a new subprocess to do so.
stderr = io.StringIO() # Not used; we just don't want to redirect cli.stderr to stdout.
stdout = FakeStdout()
cli = BundleCLI(CodaLabManager(), stdout=stdout, stderr=stderr)
try:
cli.do_command(args[1:])
exitcode = 0
except SystemExit as e:
exitcode = e.code
output = stdout.getvalue()
else:
output = subprocess.check_output([a.encode() for a in args], **kwargs)
exitcode = 0
except subprocess.CalledProcessError as e:
output = e.output
exitcode = e.returncode
except Exception:
output = traceback.format_exc()
exitcode = 1
if expected_exit_code is not None and exitcode != expected_exit_code:
colorize = Colorizer.red
extra = ' BAD'
else:
colorize = Colorizer.cyan
extra = ''
print(colorize(" (exit code %s, expected %s%s)" % (exitcode, expected_exit_code, extra)))
sys.stdout.flush()
print(sanitize(output, max_output_chars))
sys.stdout.flush()
assert (
expected_exit_code == exitcode
), f'Exit codes don\'t match: got {exitcode}, expected {expected_exit_code}'
return output.rstrip()
def cleanup(cl, tag, should_wait=True):
'''
Removes all bundles and worksheets with the specified tag.
:param cl: str
Path to CodaLab command line.
:param tag: str
Specific tag use to search for bundles and worksheets to delete.
:param should_wait: boolean
Whether to wait for a bundle to finish running before deleting (default is true).
:return:
'''
print('Cleaning up bundles and worksheets tagged with {}...'.format(tag))
# Clean up tagged bundles
bundles_removed = 0
while True:
# Query 1000 bundles at a time for removal
query_result = run_command([cl, 'search', 'tags=%s' % tag, '.limit=1000', '--uuid-only'])
if len(query_result) == 0:
break
for uuid in query_result.split('\n'):
if should_wait:
# Wait until the bundle finishes and then delete it
run_command([cl, 'wait', uuid])
run_command([cl, 'rm', uuid, '--force'])
bundles_removed += 1
# Clean up tagged worksheets
worksheets_removed = 0
while True:
query_result = run_command([cl, 'wsearch', 'tag=%s' % tag, '.limit=1000', '--uuid-only'])
if len(query_result) == 0:
break
for uuid in query_result.split('\n'):
run_command([cl, 'wrm', uuid, '--force'])
worksheets_removed += 1
print('Removed {} bundles and {} worksheets.'.format(bundles_removed, worksheets_removed))
| return getattr(cls, color) + string + cls.RESET + cls.NEWLINE |
notifications_test.go | package notifications
import (
"bytes"
"context"
"testing"
"time"
blocks "github.com/ipsn/go-ipfs/gxlibs/github.com/ipfs/go-block-format"
cid "github.com/ipsn/go-ipfs/gxlibs/github.com/ipfs/go-cid"
blocksutil "github.com/ipsn/go-ipfs/gxlibs/github.com/ipfs/go-ipfs-blocksutil"
)
func TestDuplicates(t *testing.T) {
b1 := blocks.NewBlock([]byte("1"))
b2 := blocks.NewBlock([]byte("2"))
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background(), b1.Cid(), b2.Cid())
n.Publish(b1)
blockRecvd, ok := <-ch
if !ok {
t.Fail()
}
assertBlocksEqual(t, b1, blockRecvd)
n.Publish(b1) // ignored duplicate
n.Publish(b2)
blockRecvd, ok = <-ch
if !ok {
t.Fail()
}
assertBlocksEqual(t, b2, blockRecvd)
}
func TestPublishSubscribe(t *testing.T) {
blockSent := blocks.NewBlock([]byte("Greetings from The Interval"))
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background(), blockSent.Cid())
n.Publish(blockSent)
blockRecvd, ok := <-ch
if !ok {
t.Fail()
}
assertBlocksEqual(t, blockRecvd, blockSent)
}
func TestSubscribeMany(t *testing.T) {
e1 := blocks.NewBlock([]byte("1"))
e2 := blocks.NewBlock([]byte("2"))
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background(), e1.Cid(), e2.Cid())
n.Publish(e1)
r1, ok := <-ch
if !ok {
t.Fatal("didn't receive first expected block")
}
assertBlocksEqual(t, e1, r1)
n.Publish(e2)
r2, ok := <-ch
if !ok {
t.Fatal("didn't receive second expected block")
}
assertBlocksEqual(t, e2, r2)
}
// TestDuplicateSubscribe tests a scenario where a given block
// would be requested twice at the same time.
func TestDuplicateSubscribe(t *testing.T) {
e1 := blocks.NewBlock([]byte("1"))
n := New()
defer n.Shutdown()
ch1 := n.Subscribe(context.Background(), e1.Cid())
ch2 := n.Subscribe(context.Background(), e1.Cid())
n.Publish(e1)
r1, ok := <-ch1
if !ok {
t.Fatal("didn't receive first expected block")
}
assertBlocksEqual(t, e1, r1)
r2, ok := <-ch2
if !ok {
t.Fatal("didn't receive second expected block")
}
assertBlocksEqual(t, e1, r2)
}
func TestShutdownBeforeUnsubscribe(t *testing.T) {
e1 := blocks.NewBlock([]byte("1"))
n := New()
ctx, cancel := context.WithCancel(context.Background())
ch := n.Subscribe(ctx, e1.Cid()) // no keys provided
n.Shutdown()
cancel()
select {
case _, ok := <-ch:
if ok {
t.Fatal("channel should have been closed")
}
default:
t.Fatal("channel should have been closed")
}
}
func TestSubscribeIsANoopWhenCalledWithNoKeys(t *testing.T) {
n := New()
defer n.Shutdown()
ch := n.Subscribe(context.Background()) // no keys provided
if _, ok := <-ch; ok {
t.Fatal("should be closed if no keys provided")
}
}
func TestCarryOnWhenDeadlineExpires(t *testing.T) {
impossibleDeadline := time.Nanosecond
fastExpiringCtx, cancel := context.WithTimeout(context.Background(), impossibleDeadline)
defer cancel()
n := New()
defer n.Shutdown()
block := blocks.NewBlock([]byte("A Missed Connection"))
blockChannel := n.Subscribe(fastExpiringCtx, block.Cid())
assertBlockChannelNil(t, blockChannel)
}
func TestDoesNotDeadLockIfContextCancelledBeforePublish(t *testing.T) {
g := blocksutil.NewBlockGenerator()
ctx, cancel := context.WithCancel(context.Background())
n := New()
defer n.Shutdown()
t.Log("generate a large number of blocks. exceed default buffer")
bs := g.Blocks(1000)
ks := func() []cid.Cid {
var keys []cid.Cid
for _, b := range bs {
keys = append(keys, b.Cid())
}
return keys
}()
_ = n.Subscribe(ctx, ks...) // ignore received channel
t.Log("cancel context before any blocks published")
cancel()
for _, b := range bs {
n.Publish(b)
}
t.Log("publishing the large number of blocks to the ignored channel must not deadlock")
} | if ok {
t.Fail()
}
}
func assertBlocksEqual(t *testing.T, a, b blocks.Block) {
if !bytes.Equal(a.RawData(), b.RawData()) {
t.Fatal("blocks aren't equal")
}
if a.Cid() != b.Cid() {
t.Fatal("block keys aren't equal")
}
} |
func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) {
_, ok := <-blockChannel |
18-extending_bound_23.py | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def | (env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
return frozenset(res)
| hints |
statevector_simulator.py | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Contains a (slow) python statevector simulator.
It simulates the statevector through a quantum circuit. It is exponential in
the number of qubits.
We advise using the c++ simulator or online simulator for larger size systems.
The input is a qobj dictionary and the output is a Result object.
The input qobj to this simulator has no shots, no measures, no reset, no noise.
"""
import logging
from math import log2
from qiskit.util import local_hardware_info
from qiskit.providers.basicaer.exceptions import BasicAerError
from qiskit.providers.models import QasmBackendConfiguration
from .qasm_simulator import QasmSimulatorPy
logger = logging.getLogger(__name__)
class StatevectorSimulatorPy(QasmSimulatorPy):
| """Python statevector simulator."""
MAX_QUBITS_MEMORY = int(log2(local_hardware_info()['memory'] * (1024 ** 3) / 16))
DEFAULT_CONFIGURATION = {
'backend_name': 'statevector_simulator',
'backend_version': '1.0.0',
'n_qubits': min(24, MAX_QUBITS_MEMORY),
'url': 'https://github.com/Qiskit/qiskit-terra',
'simulator': True,
'local': True,
'conditional': True,
'open_pulse': False,
'memory': True,
'max_shots': 65536,
'coupling_map': None,
'description': 'A Python statevector simulator for qobj files',
'basis_gates': ['u1', 'u2', 'u3', 'cx', 'id', 'snapshot'],
'gates': [
{
'name': 'u1',
'parameters': ['lambda'],
'qasm_def': 'gate u1(lambda) q { U(0,0,lambda) q; }'
},
{
'name': 'u2',
'parameters': ['phi', 'lambda'],
'qasm_def': 'gate u2(phi,lambda) q { U(pi/2,phi,lambda) q; }'
},
{
'name': 'u3',
'parameters': ['theta', 'phi', 'lambda'],
'qasm_def': 'gate u3(theta,phi,lambda) q { U(theta,phi,lambda) q; }'
},
{
'name': 'cx',
'parameters': ['c', 't'],
'qasm_def': 'gate cx c,t { CX c,t; }'
},
{
'name': 'id',
'parameters': ['a'],
'qasm_def': 'gate id a { U(0,0,0) a; }'
},
{
'name': 'snapshot',
'parameters': ['slot'],
'qasm_def': 'gate snapshot(slot) q { TODO }'
}
]
}
# Override base class value to return the final state vector
SHOW_FINAL_STATE = True
def __init__(self, configuration=None, provider=None):
super().__init__(configuration=(
configuration or QasmBackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION)),
provider=provider)
def run(self, qobj, backend_options=None):
"""Run qobj asynchronously.
Args:
qobj (Qobj): payload of the experiment
backend_options (dict): backend options
Returns:
BasicAerJob: derived from BaseJob
Additional Information::
backend_options: Is a dict of options for the backend. It may contain
* "initial_statevector": vector_like
* "chop_threshold": double
The "initial_statevector" option specifies a custom initial
initial statevector for the simulator to be used instead of the all
zero state. This size of this vector must be correct for the number
of qubits in all experiments in the qobj.
The "chop_threshold" option specifies a truncation value for
setting small values to zero in the output statevector. The default
value is 1e-15.
Example::
backend_options = {
"initial_statevector": np.array([1, 0, 0, 1j]) / np.sqrt(2),
"chop_threshold": 1e-15
}
"""
return super().run(qobj, backend_options=backend_options)
def _validate(self, qobj):
"""Semantic validations of the qobj which cannot be done via schemas.
Some of these may later move to backend schemas.
1. No shots
2. No measurements in the middle
"""
n_qubits = qobj.config.n_qubits
max_qubits = self.configuration().n_qubits
if n_qubits > max_qubits:
raise BasicAerError('Number of qubits {} '.format(n_qubits) +
'is greater than maximum ({}) '.format(max_qubits) +
'for "{}".'.format(self.name()))
if qobj.config.shots != 1:
logger.info('"%s" only supports 1 shot. Setting shots=1.',
self.name())
qobj.config.shots = 1
for experiment in qobj.experiments:
name = experiment.header.name
if getattr(experiment.config, 'shots', 1) != 1:
logger.info('"%s" only supports 1 shot. '
'Setting shots=1 for circuit "%s".',
self.name(), name)
experiment.config.shots = 1 |
|
setcsrfriendlyname.ts | id: string;
new_friendly_name: string;
}
import { execute } from './index';
export default (param: setCsrFriendlyNameParameters) => {
return execute({
function: 'set_csr_friendly_name',
parameters: param,
});
}; | // https://documentation.cpanel.net/display/DD/UAPI+Functions+-+SSL%3A%3Aset_csr_friendly_name
export interface setCsrFriendlyNameParameters {
friendly_name: string; |
|
store_test.go | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package intervalstore
import (
"io/ioutil"
"os"
"testing"
"github.com/ethersphere/bee/pkg/statestore/leveldb"
"github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
)
// TestInmemoryStore tests basic functionality of InmemoryStore.
func TestInmemoryStore(t *testing.T) {
testStore(t, mock.NewStateStore())
}
// TestDBStore tests basic functionality of DBStore.
func TestDBStore(t *testing.T) {
dir, err := ioutil.TempDir("", "intervals_test_db_store")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
store, err := leveldb.NewStateStore(dir, nil)
if err != nil {
t.Fatal(err)
}
defer store.Close()
testStore(t, store)
}
// testStore is a helper function to test various Store implementations.
func | (t *testing.T, s storage.StateStorer) {
key1 := "key1"
i1 := NewIntervals(0)
i1.Add(10, 20)
if err := s.Put(key1, i1); err != nil {
t.Fatal(err)
}
i := &Intervals{}
err := s.Get(key1, i)
if err != nil {
t.Fatal(err)
}
if i.String() != i1.String() {
t.Errorf("expected interval %s, got %s", i1, i)
}
key2 := "key2"
i2 := NewIntervals(0)
i2.Add(10, 20)
if err := s.Put(key2, i2); err != nil {
t.Fatal(err)
}
err = s.Get(key2, i)
if err != nil {
t.Fatal(err)
}
if i.String() != i2.String() {
t.Errorf("expected interval %s, got %s", i2, i)
}
if err := s.Delete(key1); err != nil {
t.Fatal(err)
}
if err := s.Get(key1, i); err != storage.ErrNotFound {
t.Errorf("expected error %v, got %s", storage.ErrNotFound, err)
}
if err := s.Get(key2, i); err != nil {
t.Errorf("expected error %v, got %s", nil, err)
}
if err := s.Delete(key2); err != nil {
t.Fatal(err)
}
if err := s.Get(key2, i); err != storage.ErrNotFound {
t.Errorf("expected error %v, got %s", storage.ErrNotFound, err)
}
}
| testStore |
a1.rs | #[doc = "Register `A1` reader"]
pub struct R(crate::R<A1_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<A1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<A1_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<A1_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `A1` writer"]
pub struct W(crate::W<A1_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<A1_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<A1_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<A1_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `A1` reader - Slope of second piecewise linear function"]
pub struct A1_R(crate::FieldReader<u16, u16>);
impl A1_R {
#[inline(always)]
pub(crate) fn new(bits: u16) -> Self {
A1_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for A1_R {
type Target = crate::FieldReader<u16, u16>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `A1` writer - Slope of second piecewise linear function"]
pub struct A1_W<'a> {
w: &'a mut W,
}
impl<'a> A1_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0fff) | (value as u32 & 0x0fff);
self.w
}
}
impl R {
#[doc = "Bits 0:11 - Slope of second piecewise linear function"]
#[inline(always)]
pub fn a1(&self) -> A1_R {
A1_R::new((self.bits & 0x0fff) as u16)
}
}
impl W {
#[doc = "Bits 0:11 - Slope of second piecewise linear function"]
#[inline(always)]
pub fn a1(&mut self) -> A1_W {
A1_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Slope of second piecewise linear function\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [a1](index.html) module"]
pub struct | ;
impl crate::RegisterSpec for A1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [a1::R](R) reader structure"]
impl crate::Readable for A1_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [a1::W](W) writer structure"]
impl crate::Writable for A1_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets A1 to value 0x0348"]
impl crate::Resettable for A1_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x0348
}
}
| A1_SPEC |
NavBar.test.js | import React from 'react'; | import {NavBar} from "../components/NavBar";
import '@testing-library/jest-dom/extend-expect';
describe('NavBar', function () {
it("Renders Successfully",() => {
render(<NavBar/>);
})
it('Contains Logo', function () {
render(<NavBar/>);
expect(screen.getByText("OrthoFreeD")).toBeInTheDocument();
});
it('Contains Help Link', function () {
render(<NavBar/>);
expect(screen.getByText("Help")).toBeInTheDocument();
});
}); | import { render, screen} from '@testing-library/react'; |
api.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Defines interface for DB access.
The underlying driver is loaded as a :class:`LazyPluggable`.
Functions in this module are imported into the nova.db namespace. Call these
functions from nova.db namespace, not the nova.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:db_backend: string to lookup in the list of LazyPluggable backends.
`sqlalchemy` is the only supported backend right now.
:sql_connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/nova/nova.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova import utils
db_opts = [
cfg.StrOpt('db_backend',
default='sqlalchemy',
help='The backend to use for db'),
cfg.BoolOpt('enable_new_services',
default=True,
help='Services to be added to the available pool on create'),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help='Template string to be used to generate instance names'),
cfg.StrOpt('volume_name_template',
default='volume-%s',
help='Template string to be used to generate instance names'),
cfg.StrOpt('snapshot_name_template',
default='snapshot-%s',
help='Template string to be used to generate snapshot names'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(db_opts)
IMPL = utils.LazyPluggable('db_backend',
sqlalchemy='nova.db.sqlalchemy.api')
class NoMoreNetworks(exception.NovaException):
"""No more available networks."""
pass
class NoMoreTargets(exception.NovaException):
"""No more available targets"""
pass
###################
def constraint(**conditions):
"""Return a constraint object suitable for use with some updates."""
return IMPL.constraint(**conditions)
def equal_any(*values):
"""Return an equality condition object suitable for use in a constraint.
Equal_any conditions require that a model object's attribute equal any
one of the given values.
"""
return IMPL.equal_any(*values)
def not_equal(*values):
"""Return an inequality condition object suitable for use in a constraint.
Not_equal conditions require that a model object's attribute differs from
all of the given values.
"""
return IMPL.not_equal(*values)
###################
def service_destroy(context, instance_id):
"""Destroy the service or raise if it does not exist."""
return IMPL.service_destroy(context, instance_id)
def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
def service_get_by_host_and_topic(context, host, topic):
"""Get a service by host it's on and topic it listens to."""
return IMPL.service_get_by_host_and_topic(context, host, topic)
def service_get_all(context, disabled=None):
"""Get all services."""
return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
def service_get_all_by_host(context, host):
"""Get all services for a given host."""
return IMPL.service_get_all_by_host(context, host)
def service_get_all_compute_by_host(context, host):
"""Get all compute services for a given host."""
return IMPL.service_get_all_compute_by_host(context, host)
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
:returns: a list of (Service, instance_count) tuples.
"""
return IMPL.service_get_all_compute_sorted(context)
def service_get_all_volume_sorted(context):
"""Get all volume services sorted by volume count.
:returns: a list of (Service, volume_count) tuples.
"""
return IMPL.service_get_all_volume_sorted(context)
def service_get_by_args(context, host, binary):
"""Get the state of a service by node name and binary."""
return IMPL.service_get_by_args(context, host, binary)
def service_create(context, values):
"""Create a service from the values dictionary."""
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
"""Set the given properties on a service and update it.
Raises NotFound if service does not exist.
"""
return IMPL.service_update(context, service_id, values)
###################
def compute_node_get(context, compute_id):
"""Get a computeNode."""
return IMPL.compute_node_get(context, compute_id)
def compute_node_get_all(context):
"""Get all computeNodes."""
return IMPL.compute_node_get_all(context)
def compute_node_search_by_hypervisor(context, hypervisor_match):
"""Get computeNodes given a hypervisor hostname match string."""
return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
def compute_node_create(context, values):
"""Create a computeNode from the values dictionary."""
return IMPL.compute_node_create(context, values)
def compute_node_update(context, compute_id, values, prune_stats=False):
"""Set the given properties on a computeNode and update it.
Raises NotFound if computeNode does not exist.
"""
return IMPL.compute_node_update(context, compute_id, values, prune_stats)
def compute_node_get_by_host(context, host):
return IMPL.compute_node_get_by_host(context, host)
def compute_node_statistics(context):
return IMPL.compute_node_statistics(context)
###################
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
def certificate_get_all_by_project(context, project_id):
"""Get all certificates for a project."""
return IMPL.certificate_get_all_by_project(context, project_id)
def certificate_get_all_by_user(context, user_id):
"""Get all certificates for a user."""
return IMPL.certificate_get_all_by_user(context, user_id)
def certificate_get_all_by_user_and_project(context, user_id, project_id):
"""Get all certificates for a user and project."""
return IMPL.certificate_get_all_by_user_and_project(context,
user_id,
project_id)
###################
def floating_ip_get(context, id):
return IMPL.floating_ip_get(context, id)
def floating_ip_get_pools(context):
"""Returns a list of floating ip pools"""
return IMPL.floating_ip_get_pools(context)
def floating_ip_allocate_address(context, project_id, pool):
"""Allocate free floating ip from specified pool and return the address.
Raises if one is not available.
"""
return IMPL.floating_ip_allocate_address(context, project_id, pool)
def floating_ip_bulk_create(context, ips):
"""Create a lot of floating ips from the values dictionary."""
return IMPL.floating_ip_bulk_create(context, ips)
def floating_ip_create(context, values):
"""Create a floating ip from the values dictionary."""
return IMPL.floating_ip_create(context, values)
def floating_ip_count_by_project(context, project_id, session=None):
"""Count floating ips used by project."""
return IMPL.floating_ip_count_by_project(context, project_id,
session=session)
def floating_ip_deallocate(context, address):
"""Deallocate a floating ip by address."""
return IMPL.floating_ip_deallocate(context, address)
def floating_ip_destroy(context, address):
"""Destroy the floating_ip or raise if it does not exist."""
return IMPL.floating_ip_destroy(context, address)
def floating_ip_disassociate(context, address):
"""Disassociate a floating ip from a fixed ip by address.
:returns: the address of the existing fixed ip.
"""
return IMPL.floating_ip_disassociate(context, address)
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
"""Associate a floating ip to a fixed_ip by address."""
return IMPL.floating_ip_fixed_ip_associate(context,
floating_address,
fixed_address,
host)
def floating_ip_get_all(context):
"""Get all floating ips."""
return IMPL.floating_ip_get_all(context)
def floating_ip_get_all_by_host(context, host):
"""Get all floating ips by host."""
return IMPL.floating_ip_get_all_by_host(context, host)
def floating_ip_get_all_by_project(context, project_id):
"""Get all floating ips by project."""
return IMPL.floating_ip_get_all_by_project(context, project_id)
def floating_ip_get_by_address(context, address):
"""Get a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_get_by_address(context, address)
def floating_ip_get_by_fixed_address(context, fixed_address):
"""Get a floating ips by fixed address"""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
"""Get a floating ips by fixed address"""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
def floating_ip_set_auto_assigned(context, address):
"""Set auto_assigned flag to floating ip"""
return IMPL.floating_ip_set_auto_assigned(context, address)
def dnsdomain_list(context):
"""Get a list of all zones in our database, public and private."""
return IMPL.dnsdomain_list(context)
def dnsdomain_register_for_zone(context, fqdomain, zone):
"""Associated a DNS domain with an availability zone"""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
"""Associated a DNS domain with a project id"""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
"""Purge associations for the specified DNS zone"""
return IMPL.dnsdomain_unregister(context, fqdomain)
def dnsdomain_get(context, fqdomain):
"""Get the db record for the specified domain."""
return IMPL.dnsdomain_get(context, fqdomain)
####################
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
def migration_create(context, values):
"""Create a migration record."""
return IMPL.migration_create(context, values)
def migration_get(context, migration_id):
"""Finds a migration by the id."""
return IMPL.migration_get(context, migration_id)
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
status)
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
"""
Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
confirm_window, dest_compute)
####################
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False):
"""Associate fixed ip to instance.
Raises if fixed ip is not available.
"""
return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id,
reserved)
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None):
"""Find free ip in network and associate it to instance or host.
Raises if one is not available.
"""
return IMPL.fixed_ip_associate_pool(context, network_id,
instance_uuid, host)
def fixed_ip_create(context, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_create(context, values)
def fixed_ip_bulk_create(context, ips):
"""Create a lot of fixed ips from the values dictionary."""
return IMPL.fixed_ip_bulk_create(context, ips)
def fixed_ip_disassociate(context, address):
"""Disassociate a fixed ip from an instance by address."""
return IMPL.fixed_ip_disassociate(context, address)
def fixed_ip_disassociate_all_by_timeout(context, host, time):
"""Disassociate old fixed ips from host."""
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
def fixed_ip_get(context, id):
"""Get fixed ip by id or raise if it does not exist."""
return IMPL.fixed_ip_get(context, id)
def fixed_ip_get_all(context):
"""Get all defined fixed ips."""
return IMPL.fixed_ip_get_all(context)
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
def fixed_ip_get_by_instance(context, instance_uuid):
"""Get fixed ips by instance or raise if none exist."""
return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
def fixed_ip_get_by_network_host(context, network_uuid, host):
"""Get fixed ip for a host in a network."""
return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
def fixed_ips_by_virtual_interface(context, vif_id):
"""Get fixed ips by virtual interface or raise if none exist."""
return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
def fixed_ip_get_network(context, address):
"""Get a network for a fixed ip by address."""
return IMPL.fixed_ip_get_network(context, address)
def fixed_ip_update(context, address, values):
"""Create a fixed ip from the values dictionary."""
return IMPL.fixed_ip_update(context, address, values)
####################
def virtual_interface_create(context, values):
"""Create a virtual interface record in the database."""
return IMPL.virtual_interface_create(context, values)
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table,"""
return IMPL.virtual_interface_get(context, vif_id)
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table filtering on address."""
return IMPL.virtual_interface_get_by_address(context, address)
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table filtering on vif uuid."""
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id)
def virtual_interface_get_by_instance_and_network(context, instance_id,
network_id):
"""Gets all virtual interfaces for instance."""
return IMPL.virtual_interface_get_by_instance_and_network(context,
instance_id,
network_id)
def virtual_interface_delete(context, vif_id):
"""Delete virtual interface record from the database."""
return IMPL.virtual_interface_delete(context, vif_id)
def virtual_interface_delete_by_instance(context, instance_id):
"""Delete virtual interface records associated with instance."""
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
def virtual_interface_get_all(context):
"""Gets all virtual interfaces from the table"""
return IMPL.virtual_interface_get_all(context)
####################
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
def instance_data_get_for_project(context, project_id, session=None):
"""Get (instance_count, total_cores, total_ram) for project."""
return IMPL.instance_data_get_for_project(context, project_id,
session=session)
def instance_destroy(context, instance_uuid, constraint=None):
"""Destroy the instance or raise if it does not exist."""
return IMPL.instance_destroy(context, instance_uuid, constraint)
def instance_get_by_uuid(context, uuid):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid)
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
def instance_get_all(context, columns_to_join=None):
"""Get all instances."""
return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc'):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir)
def instance_get_active_by_window(context, begin, end=None, project_id=None,
host=None):
"""Get instances active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window(context, begin, end,
project_id, host)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host)
def instance_get_all_by_project(context, project_id):
"""Get all instances belonging to a project."""
return IMPL.instance_get_all_by_project(context, project_id)
def instance_get_all_by_host(context, host):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host)
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
def instance_get_all_by_reservation(context, reservation_id):
"""Get all instances belonging to a reservation."""
return IMPL.instance_get_all_by_reservation(context, reservation_id)
def instance_get_floating_address(context, instance_id):
"""Get the first floating ip address of an instance."""
return IMPL.instance_get_floating_address(context, instance_id)
def instance_get_all_hung_in_rebooting(context, reboot_window):
"""Get all instances stuck in a rebooting state."""
return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
def instance_test_and_set(context, instance_uuid, attr, ok_states,
new_state):
"""Atomically check if an instance is in a valid state, and if it is, set
the instance into a new state.
"""
return IMPL.instance_test_and_set(context, instance_uuid, attr,
ok_states, new_state)
def instance_update(context, instance_uuid, values):
"""Set the given properties on an instance and update it.
Raises NotFound if instance does not exist.
"""
return IMPL.instance_update(context, instance_uuid, values)
def instance_update_and_get_original(context, instance_uuid, values):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_id: = instance id or uuid
:param values: = dict containing column values
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
return IMPL.instance_update_and_get_original(context, instance_uuid,
values)
def instance_add_security_group(context, instance_id, security_group_id):
"""Associate the given security group with the given instance."""
return IMPL.instance_add_security_group(context, instance_id,
security_group_id)
def instance_remove_security_group(context, instance_id, security_group_id):
"""Disassociate the given security group from the given instance."""
return IMPL.instance_remove_security_group(context, instance_id,
security_group_id)
###################
def instance_info_cache_create(context, values):
"""Create a new instance cache record in the table.
:param context: = request context object
:param values: = dict containing column values
"""
return IMPL.instance_info_cache_create(context, values)
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return IMPL.instance_info_cache_get(context, instance_uuid)
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
return IMPL.instance_info_cache_update(context, instance_uuid, values)
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
return IMPL.instance_info_cache_delete(context, instance_uuid)
###################
def key_pair_create(context, values):
"""Create a key_pair from the values dictionary."""
return IMPL.key_pair_create(context, values)
def key_pair_destroy(context, user_id, name):
"""Destroy the key_pair or raise if it does not exist."""
return IMPL.key_pair_destroy(context, user_id, name)
def key_pair_destroy_all_by_user(context, user_id):
"""Destroy all key_pairs by user."""
return IMPL.key_pair_destroy_all_by_user(context, user_id)
def key_pair_get(context, user_id, name):
"""Get a key_pair or raise if it does not exist."""
return IMPL.key_pair_get(context, user_id, name)
def key_pair_get_all_by_user(context, user_id):
"""Get all key_pairs by user."""
return IMPL.key_pair_get_all_by_user(context, user_id)
def key_pair_count_by_user(context, user_id):
"""Count number of key pairs for the given user ID."""
return IMPL.key_pair_count_by_user(context, user_id)
####################
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a free network to a project."""
return IMPL.network_associate(context, project_id, network_id, force)
def network_count(context):
"""Return the number of networks."""
return IMPL.network_count(context)
def network_count_reserved_ips(context, network_id):
"""Return the number of reserved ips in the network."""
return IMPL.network_count_reserved_ips(context, network_id)
def network_create_safe(context, values):
"""Create a network from the values dict.
The network is only returned if the create succeeds. If the create violates
constraints because the network already exists, no exception is raised.
"""
return IMPL.network_create_safe(context, values)
def network_delete_safe(context, network_id):
"""Delete network with key network_id.
This method assumes that the network is not associated with any project
"""
return IMPL.network_delete_safe(context, network_id)
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
def network_disassociate(context, network_id):
"""Disassociate the network from project or raise if it does not exist."""
return IMPL.network_disassociate(context, network_id)
def network_get(context, network_id):
"""Get a network or raise if it does not exist."""
return IMPL.network_get(context, network_id)
def network_get_all(context):
"""Return all defined networks."""
return IMPL.network_get_all(context)
def network_get_all_by_uuids(context, network_uuids, project_id=None):
"""Return networks by ids."""
return IMPL.network_get_all_by_uuids(context, network_uuids, project_id)
# pylint: disable=C0103
def network_get_associated_fixed_ips(context, network_id, host=None):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id, host)
def network_get_by_bridge(context, bridge):
"""Get a network by bridge or raise if it does not exist."""
return IMPL.network_get_by_bridge(context, bridge)
def network_get_by_uuid(context, uuid):
"""Get a network by uuid or raise if it does not exist."""
return IMPL.network_get_by_uuid(context, uuid)
def network_get_by_cidr(context, cidr):
"""Get a network by cidr or raise if it does not exist"""
return IMPL.network_get_by_cidr(context, cidr)
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)
def network_get_all_by_instance(context, instance_id):
"""Get all networks by instance id or raise if none exist."""
return IMPL.network_get_all_by_instance(context, instance_id)
def network_get_all_by_host(context, host):
"""All networks for which the given host is the network host."""
return IMPL.network_get_all_by_host(context, host)
def network_get_index(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
def network_set_cidr(context, network_id, cidr):
"""Set the Classless Inner Domain Routing for the network."""
return IMPL.network_set_cidr(context, network_id, cidr)
def network_set_host(context, network_id, host_id):
"""Safely set the host for network."""
return IMPL.network_set_host(context, network_id, host_id)
def network_update(context, network_id, values):
"""Set the given properties on a network and update it.
Raises NotFound if network does not exist.
"""
return IMPL.network_update(context, network_id, values)
###################
def iscsi_target_count_by_host(context, host):
"""Return count of export devices."""
return IMPL.iscsi_target_count_by_host(context, host)
def iscsi_target_create_safe(context, values):
"""Create an iscsi_target from the values dictionary.
The device is not returned. If the create violates the unique
constraints because the iscsi_target and host already exist,
no exception is raised.
"""
return IMPL.iscsi_target_create_safe(context, values)
###############
def quota_create(context, project_id, resource, limit):
"""Create a quota for the given project and resource."""
return IMPL.quota_create(context, project_id, resource, limit)
def quota_get(context, project_id, resource):
"""Retrieve a quota or raise if it does not exist."""
return IMPL.quota_get(context, project_id, resource)
def quota_get_all_by_project(context, project_id):
"""Retrieve all quotas associated with a given project."""
return IMPL.quota_get_all_by_project(context, project_id)
def quota_update(context, project_id, resource, limit):
"""Update a quota or raise if it does not exist."""
return IMPL.quota_update(context, project_id, resource, limit)
def quota_destroy(context, project_id, resource):
"""Destroy the quota or raise if it does not exist."""
return IMPL.quota_destroy(context, project_id, resource)
###################
def quota_class_create(context, class_name, resource, limit):
"""Create a quota class for the given name and resource."""
return IMPL.quota_class_create(context, class_name, resource, limit)
def quota_class_get(context, class_name, resource):
"""Retrieve a quota class or raise if it does not exist."""
return IMPL.quota_class_get(context, class_name, resource)
def quota_class_get_all_by_name(context, class_name):
"""Retrieve all quotas associated with a given quota class."""
return IMPL.quota_class_get_all_by_name(context, class_name)
def quota_class_update(context, class_name, resource, limit):
"""Update a quota class or raise if it does not exist."""
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_destroy(context, class_name, resource):
"""Destroy the quota class or raise if it does not exist."""
return IMPL.quota_class_destroy(context, class_name, resource)
def quota_class_destroy_all_by_name(context, class_name):
"""Destroy all quotas associated with a given quota class."""
return IMPL.quota_class_destroy_all_by_name(context, class_name)
###################
def quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh):
"""Create a quota usage for the given project and resource."""
return IMPL.quota_usage_create(context, project_id, resource,
in_use, reserved, until_refresh)
def quota_usage_get(context, project_id, resource):
"""Retrieve a quota usage or raise if it does not exist."""
return IMPL.quota_usage_get(context, project_id, resource)
def quota_usage_get_all_by_project(context, project_id):
"""Retrieve all usage associated with a given resource."""
return IMPL.quota_usage_get_all_by_project(context, project_id)
def quota_usage_update(context, project_id, resource, in_use, reserved,
until_refresh):
"""Update a quota usage or raise if it does not exist."""
return IMPL.quota_usage_update(context, project_id, resource,
in_use, reserved, until_refresh)
def quota_usage_destroy(context, project_id, resource):
"""Destroy the quota usage or raise if it does not exist."""
return IMPL.quota_usage_destroy(context, project_id, resource)
###################
def reservation_create(context, uuid, usage, project_id, resource, delta,
expire):
"""Create a reservation for the given project and resource."""
return IMPL.reservation_create(context, uuid, usage, project_id,
resource, delta, expire)
def reservation_get(context, uuid):
"""Retrieve a reservation or raise if it does not exist."""
return IMPL.reservation_get(context, uuid)
def reservation_get_all_by_project(context, project_id):
"""Retrieve all reservations associated with a given project."""
return IMPL.reservation_get_all_by_project(context, project_id)
def reservation_destroy(context, uuid):
"""Destroy the reservation or raise if it does not exist."""
return IMPL.reservation_destroy(context, uuid)
###################
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age):
"""Check quotas and create appropriate reservations."""
return IMPL.quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age)
def reservation_commit(context, reservations):
"""Commit quota reservations."""
return IMPL.reservation_commit(context, reservations)
def reservation_rollback(context, reservations):
"""Roll back quota reservations."""
return IMPL.reservation_rollback(context, reservations)
def quota_destroy_all_by_project(context, project_id):
"""Destroy all quotas associated with a given project."""
return IMPL.quota_destroy_all_by_project(context, project_id)
def reservation_expire(context):
"""Roll back any expired reservations."""
return IMPL.reservation_expire(context)
###################
def volume_allocate_iscsi_target(context, volume_id, host):
"""Atomically allocate a free iscsi_target from the pool."""
return IMPL.volume_allocate_iscsi_target(context, volume_id, host)
def volume_attached(context, volume_id, instance_id, mountpoint):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, mountpoint)
def volume_create(context, values):
"""Create a volume from the values dictionary."""
return IMPL.volume_create(context, values)
def volume_data_get_for_project(context, project_id, session=None):
"""Get (volume_count, gigabytes) for project."""
return IMPL.volume_data_get_for_project(context, project_id,
session=session)
def volume_destroy(context, volume_id):
"""Destroy the volume or raise if it does not exist."""
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id)
def volume_get(context, volume_id):
"""Get a volume or raise if it does not exist."""
return IMPL.volume_get(context, volume_id)
def volume_get_all(context):
"""Get all volumes."""
return IMPL.volume_get_all(context)
def volume_get_all_by_host(context, host):
"""Get all volumes belonging to a host."""
return IMPL.volume_get_all_by_host(context, host)
def volume_get_all_by_instance_uuid(context, instance_uuid):
"""Get all volumes belonging to an instance."""
return IMPL.volume_get_all_by_instance_uuid(context, instance_uuid)
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
def volume_get_by_ec2_id(context, ec2_id):
"""Get a volume by ec2 id."""
return IMPL.volume_get_by_ec2_id(context, ec2_id)
def volume_get_iscsi_target_num(context, volume_id):
"""Get the target num (tid) allocated to the volume."""
return IMPL.volume_get_iscsi_target_num(context, volume_id)
def volume_update(context, volume_id, values):
"""Set the given properties on a volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
def get_ec2_volume_id_by_uuid(context, volume_id):
return IMPL.get_ec2_volume_id_by_uuid(context, volume_id)
def get_volume_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_volume_uuid_by_ec2_id(context, ec2_id)
def ec2_volume_create(context, volume_id, forced_id=None):
return IMPL.ec2_volume_create(context, volume_id, forced_id)
def get_snapshot_uuid_by_ec2_id(context, ec2_id):
return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
def ec2_snapshot_create(context, snapshot_id, forced_id=None):
return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
####################
def snapshot_create(context, values):
"""Create a snapshot from the values dictionary."""
return IMPL.snapshot_create(context, values)
def snapshot_destroy(context, snapshot_id):
"""Destroy the snapshot or raise if it does not exist."""
return IMPL.snapshot_destroy(context, snapshot_id)
def snapshot_get(context, snapshot_id):
"""Get a snapshot or raise if it does not exist."""
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_all(context):
"""Get all snapshots."""
return IMPL.snapshot_get_all(context)
def snapshot_get_all_by_project(context, project_id):
"""Get all snapshots belonging to a project."""
return IMPL.snapshot_get_all_by_project(context, project_id)
def snapshot_get_all_for_volume(context, volume_id):
"""Get all snapshots for a volume."""
return IMPL.snapshot_get_all_for_volume(context, volume_id)
def snapshot_update(context, snapshot_id, values):
"""Set the given properties on a snapshot and update it.
Raises NotFound if snapshot does not exist.
"""
return IMPL.snapshot_update(context, snapshot_id, values)
####################
def block_device_mapping_create(context, values):
"""Create an entry of block device mapping"""
return IMPL.block_device_mapping_create(context, values)
def block_device_mapping_update(context, bdm_id, values):
"""Update an entry of block device mapping"""
return IMPL.block_device_mapping_update(context, bdm_id, values)
def block_device_mapping_update_or_create(context, values):
"""Update an entry of block device mapping.
If not existed, create a new entry"""
return IMPL.block_device_mapping_update_or_create(context, values)
def block_device_mapping_get_all_by_instance(context, instance_uuid):
"""Get all block device mapping belonging to an instance"""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid)
def block_device_mapping_destroy(context, bdm_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy(context, bdm_id)
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_device(
context, instance_uuid, device_name)
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
"""Destroy the block device mapping."""
return IMPL.block_device_mapping_destroy_by_instance_and_volume(
context, instance_uuid, volume_id)
####################
def security_group_get_all(context):
"""Get all security groups."""
return IMPL.security_group_get_all(context)
def security_group_get(context, security_group_id):
"""Get security group by its id."""
return IMPL.security_group_get(context, security_group_id)
def security_group_get_by_name(context, project_id, group_name):
"""Returns a security group with the specified name from a project."""
return IMPL.security_group_get_by_name(context, project_id, group_name)
def security_group_get_by_project(context, project_id):
"""Get all security groups belonging to a project."""
return IMPL.security_group_get_by_project(context, project_id)
def security_group_get_by_instance(context, instance_id):
"""Get security groups to which the instance is assigned."""
return IMPL.security_group_get_by_instance(context, instance_id)
def security_group_exists(context, project_id, group_name):
"""Indicates if a group name exists in a project."""
return IMPL.security_group_exists(context, project_id, group_name)
def security_group_in_use(context, group_id):
"""Indicates if a security group is currently in use."""
return IMPL.security_group_in_use(context, group_id)
def security_group_create(context, values):
"""Create a new security group."""
return IMPL.security_group_create(context, values)
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
return IMPL.security_group_ensure_default(context)
def security_group_destroy(context, security_group_id):
"""Deletes a security group."""
return IMPL.security_group_destroy(context, security_group_id)
def security_group_count_by_project(context, project_id, session=None):
"""Count number of security groups in a project."""
return IMPL.security_group_count_by_project(context, project_id,
session=session)
####################
def security_group_rule_create(context, values):
"""Create a new security group."""
return IMPL.security_group_rule_create(context, values)
def security_group_rule_get_by_security_group(context, security_group_id):
"""Get all rules for a given security group."""
return IMPL.security_group_rule_get_by_security_group(context,
security_group_id)
def security_group_rule_get_by_security_group_grantee(context,
security_group_id):
"""Get all rules that grant access to the given security group."""
return IMPL.security_group_rule_get_by_security_group_grantee(context,
security_group_id)
def security_group_rule_destroy(context, security_group_rule_id):
"""Deletes a security group rule."""
return IMPL.security_group_rule_destroy(context, security_group_rule_id)
def security_group_rule_get(context, security_group_rule_id):
"""Gets a security group rule."""
return IMPL.security_group_rule_get(context, security_group_rule_id)
def security_group_rule_count_by_group(context, security_group_id):
"""Count rules in a given security group."""
return IMPL.security_group_rule_count_by_group(context, security_group_id)
###################
def provider_fw_rule_create(context, rule):
"""Add a firewall rule at the provider level (all hosts & instances)."""
return IMPL.provider_fw_rule_create(context, rule)
def provider_fw_rule_get_all(context):
"""Get all provider-level firewall rules."""
return IMPL.provider_fw_rule_get_all(context)
def provider_fw_rule_destroy(context, rule_id):
"""Delete a provider firewall rule from the database."""
return IMPL.provider_fw_rule_destroy(context, rule_id)
###################
def project_get_networks(context, project_id, associate=True):
"""Return the network associated with the project.
If associate is true, it will attempt to associate a new
network if one is not found, otherwise it returns None.
"""
return IMPL.project_get_networks(context, project_id, associate)
###################
def console_pool_create(context, values):
"""Create console pool."""
return IMPL.console_pool_create(context, values)
def console_pool_get_by_host_type(context, compute_host, proxy_host,
console_type):
"""Fetch a console pool for a given proxy host, compute host, and type."""
return IMPL.console_pool_get_by_host_type(context,
compute_host,
proxy_host,
console_type)
def console_pool_get_all_by_host_type(context, host, console_type):
"""Fetch all pools for given proxy host and type."""
return IMPL.console_pool_get_all_by_host_type(context,
host,
console_type)
def console_create(context, values):
"""Create a console."""
return IMPL.console_create(context, values)
def console_delete(context, console_id):
"""Delete a console."""
return IMPL.console_delete(context, console_id)
def console_get_by_pool_instance(context, pool_id, instance_uuid):
"""Get console entry for a given instance and pool."""
return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
def console_get_all_by_instance(context, instance_uuid):
"""Get consoles for a given instance."""
return IMPL.console_get_all_by_instance(context, instance_uuid)
def console_get(context, console_id, instance_uuid=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_uuid)
##################
def instance_type_create(context, values):
"""Create a new instance type."""
return IMPL.instance_type_create(context, values)
def instance_type_get_all(context, inactive=False, filters=None):
"""Get all instance types."""
return IMPL.instance_type_get_all(
context, inactive=inactive, filters=filters)
def instance_type_get(context, id):
"""Get instance type by id."""
return IMPL.instance_type_get(context, id)
def instance_type_get_by_name(context, name):
"""Get instance type by name."""
return IMPL.instance_type_get_by_name(context, name)
def instance_type_get_by_flavor_id(context, id):
"""Get instance type by flavor id."""
return IMPL.instance_type_get_by_flavor_id(context, id)
def instance_type_destroy(context, name):
"""Delete an instance type."""
return IMPL.instance_type_destroy(context, name)
def instance_type_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access by flavor id."""
return IMPL.instance_type_access_get_by_flavor_id(context, flavor_id)
def instance_type_access_add(context, flavor_id, project_id):
"""Add flavor access for project."""
return IMPL.instance_type_access_add(context, flavor_id, project_id)
def instance_type_access_remove(context, flavor_id, project_id):
"""Remove flavor access for project."""
return IMPL.instance_type_access_remove(context, flavor_id, project_id)
####################
def instance_metadata_get(context, instance_uuid):
"""Get all metadata for an instance."""
return IMPL.instance_metadata_get(context, instance_uuid)
def instance_metadata_delete(context, instance_uuid, key):
"""Delete the given metadata item."""
IMPL.instance_metadata_delete(context, instance_uuid, key)
def instance_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
return IMPL.instance_metadata_update(context, instance_uuid,
metadata, delete)
####################
def instance_system_metadata_get(context, instance_uuid):
"""Get all system metadata for an instance."""
return IMPL.instance_system_metadata_get(context, instance_uuid)
def instance_system_metadata_delete(context, instance_uuid, key):
"""Delete the given system metadata item."""
IMPL.instance_system_metadata_delete(context, instance_uuid, key)
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.instance_system_metadata_update(
context, instance_uuid, metadata, delete)
####################
def agent_build_create(context, values):
"""Create a new agent build entry."""
return IMPL.agent_build_create(context, values)
def agent_build_get_by_triple(context, hypervisor, os, architecture):
"""Get agent build by hypervisor/OS/architecture triple."""
return IMPL.agent_build_get_by_triple(context, hypervisor, os,
architecture)
def agent_build_get_all(context):
"""Get all agent builds."""
return IMPL.agent_build_get_all(context)
def agent_build_destroy(context, agent_update_id):
"""Destroy agent build entry."""
IMPL.agent_build_destroy(context, agent_update_id)
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
####################
def bw_usage_get_by_uuids(context, uuids, start_period):
"""Return bw usages for instance(s) in a given audit period."""
return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_refreshed=None):
"""Update cached bandwidth usage for an instance's network based on mac
address. Creates new record if needed.
"""
return IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
bw_out, last_refreshed=last_refreshed)
####################
def instance_type_extra_specs_get(context, flavor_id):
"""Get all extra specs for an instance type."""
return IMPL.instance_type_extra_specs_get(context, flavor_id)
def instance_type_extra_specs_delete(context, flavor_id, key):
"""Delete the given extra specs item."""
IMPL.instance_type_extra_specs_delete(context, flavor_id, key)
def instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs):
"""Create or update instance type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.instance_type_extra_specs_update_or_create(context, flavor_id,
extra_specs)
##################
def volume_metadata_get(context, volume_id):
"""Get all metadata for a volume."""
return IMPL.volume_metadata_get(context, volume_id)
def volume_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
IMPL.volume_metadata_delete(context, volume_id, key)
def volume_metadata_update(context, volume_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.volume_metadata_update(context, volume_id, metadata, delete)
##################
def volume_type_create(context, values):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values)
def volume_type_get_all(context, inactive=False):
"""Get all volume types."""
return IMPL.volume_type_get_all(context, inactive)
def volume_type_get(context, id):
"""Get volume type by id."""
return IMPL.volume_type_get(context, id)
def volume_type_get_by_name(context, name):
"""Get volume type by name."""
return IMPL.volume_type_get_by_name(context, name)
def volume_type_destroy(context, name):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, name)
def volume_get_active_by_window(context, begin, end=None, project_id=None):
"""Get all the volumes inside the window.
Specifying a project_id will filter for a certain project."""
return IMPL.volume_get_active_by_window(context, begin, end, project_id)
####################
def volume_type_extra_specs_get(context, volume_type_id):
"""Get all extra specs for a volume type."""
return IMPL.volume_type_extra_specs_get(context, volume_type_id)
def volume_type_extra_specs_delete(context, volume_type_id, key):
"""Delete the given extra specs item."""
IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs):
"""Create or update volume type extra specs. This adds or modifies the
key/value pairs specified in the extra specs dict argument"""
IMPL.volume_type_extra_specs_update_or_create(context, volume_type_id,
extra_specs)
###################
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id"""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid"""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid"""
return IMPL.s3_image_create(context, image_uuid)
####################
def sm_backend_conf_create(context, values):
"""Create a new SM Backend Config entry."""
return IMPL.sm_backend_conf_create(context, values)
def sm_backend_conf_update(context, sm_backend_conf_id, values):
"""Update a SM Backend Config entry."""
return IMPL.sm_backend_conf_update(context, sm_backend_conf_id, values)
def sm_backend_conf_delete(context, sm_backend_conf_id):
"""Delete a SM Backend Config."""
return IMPL.sm_backend_conf_delete(context, sm_backend_conf_id)
def sm_backend_conf_get(context, sm_backend_conf_id):
"""Get a specific SM Backend Config."""
return IMPL.sm_backend_conf_get(context, sm_backend_conf_id)
def sm_backend_conf_get_by_sr(context, sr_uuid):
"""Get a specific SM Backend Config."""
return IMPL.sm_backend_conf_get_by_sr(context, sr_uuid)
def sm_backend_conf_get_all(context):
"""Get all SM Backend Configs."""
return IMPL.sm_backend_conf_get_all(context)
####################
def sm_flavor_create(context, values):
"""Create a new SM Flavor entry."""
return IMPL.sm_flavor_create(context, values)
def sm_flavor_update(context, sm_flavor_id, values):
"""Update a SM Flavor entry."""
return IMPL.sm_flavor_update(context, sm_flavor_id, values)
def sm_flavor_delete(context, sm_flavor_id):
"""Delete a SM Flavor."""
return IMPL.sm_flavor_delete(context, sm_flavor_id)
def sm_flavor_get(context, sm_flavor_id):
"""Get a specific SM Flavor."""
return IMPL.sm_flavor_get(context, sm_flavor_id)
def sm_flavor_get_all(context):
"""Get all SM Flavors."""
return IMPL.sm_flavor_get_all(context)
def sm_flavor_get_by_label(context, sm_flavor_label):
"""Get a specific SM Flavor given label."""
return IMPL.sm_flavor_get_by_label(context, sm_flavor_label)
####################
def sm_volume_create(context, values):
"""Create a new child Zone entry."""
return IMPL.sm_volume_create(context, values)
def sm_volume_update(context, volume_id, values):
"""Update a child Zone entry."""
return IMPL.sm_volume_update(context, values)
def sm_volume_delete(context, volume_id):
"""Delete a child Zone."""
return IMPL.sm_volume_delete(context, volume_id)
def sm_volume_get(context, volume_id):
"""Get a specific child Zone."""
return IMPL.sm_volume_get(context, volume_id)
def sm_volume_get_all(context):
"""Get all child Zones."""
return IMPL.sm_volume_get_all(context)
####################
def aggregate_create(context, values, metadata=None):
"""Create a new aggregate with metadata."""
return IMPL.aggregate_create(context, values, metadata)
def aggregate_get(context, aggregate_id):
"""Get a specific aggregate by id."""
return IMPL.aggregate_get(context, aggregate_id)
def aggregate_get_by_host(context, host, key=None):
"""Get a list of aggregates that host belongs to"""
return IMPL.aggregate_get_by_host(context, host, key)
def aggregate_metadata_get_by_host(context, host, key=None):
"""Get metadata for all aggregates that host belongs to.
Returns a dictionary where each value is a set, this is to cover the case
where there two aggregates have different values for the same key.
Optional key filter"""
return IMPL.aggregate_metadata_get_by_host(context, host, key)
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates. If values contains a metadata
key, it updates the aggregate metadata too."""
return IMPL.aggregate_update(context, aggregate_id, values)
def aggregate_delete(context, aggregate_id):
"""Delete an aggregate."""
return IMPL.aggregate_delete(context, aggregate_id)
def aggregate_get_all(context):
"""Get all aggregates."""
return IMPL.aggregate_get_all(context)
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False):
"""Add/update metadata. If set_delete=True, it adds only."""
IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete)
def aggregate_metadata_get(context, aggregate_id):
"""Get metadata for the specified aggregate."""
return IMPL.aggregate_metadata_get(context, aggregate_id)
def aggregate_metadata_delete(context, aggregate_id, key):
"""Delete the given metadata key."""
IMPL.aggregate_metadata_delete(context, aggregate_id, key)
def aggregate_host_add(context, aggregate_id, host):
"""Add host to the aggregate."""
IMPL.aggregate_host_add(context, aggregate_id, host)
def aggregate_host_get_all(context, aggregate_id):
"""Get hosts for the specified aggregate."""
return IMPL.aggregate_host_get_all(context, aggregate_id)
def aggregate_host_delete(context, aggregate_id, host):
"""Delete the given host from the aggregate."""
IMPL.aggregate_host_delete(context, aggregate_id, host)
####################
def instance_fault_create(context, values):
"""Create a new Instance Fault."""
return IMPL.instance_fault_create(context, values)
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids)
####################
def get_ec2_instance_id_by_uuid(context, instance_id):
"""Get ec2 id through uuid from instance_id_mappings table"""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
"""Get uuid through ec2 id from instance_id_mappings table"""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_ec2_id):
"""Create the ec2 id to instance uuid mapping on demand"""
return IMPL.ec2_instance_create(context, instance_ec2_id)
####################
def task_log_end_task(context, task_name,
period_beginning,
period_ending,
host,
errors,
message=None,
session=None):
"""Mark a task as complete for a given host/time period"""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
host, | message,
session)
def task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items=None,
message=None,
session=None):
"""Mark a task as started for a given host/time period"""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
host,
task_items,
message,
session)
def task_log_get_all(context, task_name, period_beginning,
period_ending, host=None, state=None, session=None):
return IMPL.task_log_get_all(context, task_name, period_beginning,
period_ending, host, state, session)
def task_log_get(context, task_name, period_beginning,
period_ending, host, state=None, session=None):
return IMPL.task_log_get(context, task_name, period_beginning,
period_ending, host, state, session) | errors, |
GlobalStyles.ts | import { createGlobalStyle } from 'styled-components';
import { theme } from '../helpers/theme';
const GlobalStyles = createGlobalStyle`
@font-face {
font-family: 'Poppins';
src: url('Poppins-Black.eot'); /* IE9 Compat Modes */
src: url('Poppins-Black.woff2') format('woff2'),
url('Poppins-Black.woff') format('woff'),
url('Poppins-Black.ttf') format('truetype');
font-weight: normal;
}
@font-face {
font-family: 'Poppins';
src: url('Poppins-SemiBold.eot'); /* IE9 Compat Modes */
src: url('Poppins-SemiBold.woff2') format('woff2'),
url('Poppins-SemiBold.woff') format('woff'),
url('Poppins-Black.ttf') format('truetype');
font-weight: 900;
}
@font-face {
font-family: 'Poppins';
src: url('Poppins-SemiBold.eot'); /* IE9 Compat Modes */
src: url('Poppins-SemiBold.woff2') format('woff2'),
url('Poppins-SemiBold.woff') format('woff'),
url('Poppins-SemiBold.ttf') format('truetype');
font-weight: 600;
}
@font-face {
font-family: 'Playfair Display';
src: url('PlayfairDisplay-VariableFont_wght.eot'); /* IE9 Compat Modes */
src: url('PlayfairDisplay-VariableFont_wght.woff2') format('woff2'),
url('PlayfairDisplay-VariableFont_wght.woff') format('woff'),
url('PlayfairDisplay-VariableFont_wght.ttf') format('truetype');
font-weight: 100 1000;
}
:root {
--font-main: 'Poppins', sans-serif;
--font-secondary: 'Playfair Display', serif;
}
*, ::before, ::after {
box-sizing: border-box;
}
html,.html {
font-size: 62.5%;
overflow-x: hidden;
scrollbar-width: thin;
scrollbar-color: ${theme.darkMode.scrollbar()} transparent;
&::before, &::after {
box-sizing: border-box;
} | /* &::-webkit-scrollbar-track {
background: transparent;
}
&::-webkit-scrollbar-thumb {
background-color: ${theme.lightMode.scrollbar()};
border-radius: .5rem;
border: 3px solid transparent;
} */
}
}
}
body {
font-family: var(--font-secondary);
font-size: 1.6rem;
min-height: 100vh;
overflow-x: hidden;
line-height: 1.5;
/* &::-webkit-scrollbar {
width: 1rem;
}
&::-webkit-scrollbar-track {
background: transparent;
}
&::-webkit-scrollbar-thumb {
background-color: ${theme.darkMode.scrollbar()};
border-radius: .5rem;
border: 3px solid transparent;
} */
}
button {
background: transparent;
border: none;
}
button + button, .button + .button {
margin-left: .5rem;
}
h1, h2, h3, h4, h5, h6 {
font-family: var(--font-main);
font-weight: 900;
text-transform: lowercase;
line-height: 1.3;
-webkit-font-smoothing: antialiased;
}
h1 {
font-size: 4rem;
margin-bottom: 0;
}
h2 {
font-size: 3rem;
}
h2, h3, h4, h5, h6 {
border-bottom: 3px dashed;
}
img {
max-width: 100%;
}
a {
cursor: pointer;
display: inline-flex;
align-content: center;
justify-content: center;
text-decoration: none;
&.icon {
font-size: 2.6rem;
color: inherit;
}
}
p {
font-size: 2.2rem;
}
input, textarea {
padding: 2rem 1rem;
border: 1px solid transparent;
&:focus {
outline: none;
}
}
`;
export default GlobalStyles; | &--light-mode {
scrollbar-color: ${theme.lightMode.scrollbar()} transparent;
body { |
as_ref_mut.rs | // AsRef and AsMut allow for cheap reference-to-reference conversions.
// Read more about them at https://doc.rust-lang.org/std/convert/trait.AsRef.html
// and https://doc.rust-lang.org/std/convert/trait.AsMut.html, respectively.
use std::convert::AsRef;
// Obtain the number of bytes (not characters) in the given argument
// Add the AsRef trait appropriately as a trait bound
fn byte_counter<T: AsRef<str>>(arg: T) -> usize {
arg.as_ref().as_bytes().len()
}
// Obtain the number of characters (not bytes) in the given argument
// Add the AsRef trait appropriately as a trait bound
fn char_counter<T: AsRef<str>>(arg: T) -> usize {
arg.as_ref().chars().count()
}
fn main() {
let s = "Café au lait";
println!("{}", char_counter(s));
println!("{}", byte_counter(s));
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn different_counts() {
let s = "Café au lait";
assert_ne!(char_counter(s), byte_counter(s));
}
#[test]
fn same_counts() {
let s = "Cafe au lait";
assert_eq!(char_counter(s), byte_counter(s));
}
#[test]
fn di | {
let s = String::from("Café au lait");
assert_ne!(char_counter(s.clone()), byte_counter(s));
}
#[test]
fn same_counts_using_string() {
let s = String::from("Cafe au lait");
assert_eq!(char_counter(s.clone()), byte_counter(s));
}
}
| fferent_counts_using_string() |
files.js | "use strict";
Object.defineProperty(exports, "__esModule", { value: true });
var files_1 = require("../controllers/files");
var FileRoutes = /** @class */ (function () {
function | () {
}
FileRoutes.prototype.routes = function (app, auth) {
app.route('/file')
.post(auth, function (req, res) {
files_1.uploadFile(req, res);
})
.get(auth, function (req, res) {
files_1.getFiles(res);
});
app.route('/file/:id').get(auth, function (req, res) {
files_1.downloadFileRoute(req.params.id, res);
}).patch(auth, function (req, res) {
files_1.updateFile(req, res);
}).delete(auth, function (req, res) {
files_1.deleteFile(req.params.id, res);
});
};
return FileRoutes;
}());
exports.FileRoutes = FileRoutes;
| FileRoutes |
env.js | cb(null, {read: readF})
} | module.exports.init = (config, cb) => {
const readF = cb => cb(null, process.env)
|
|
spider_main.py | from QiuBaiSpider.pymysqldb_manager import DbManager
from QiuBaiSpider.page_items import PageItems
from QiuBaiSpider.tools import Tools
'''
爬取糗事百科笑话剔除正文DOM标签然后将爬取数据存入MySQL数据库
Extra module:
PyMySQL
'''
class Main(object):
def __init__(self, max_page=1):
self.max_page = max_page
self.db_manager = DbManager()
def run(self):
self.db_manager.connect()
for index in range(self.max_page):
self._page_run(index)
self.db_manager.close()
def _page_run(self, page):
page_dict_items = PageItems(page).get_page_dict_items()
if page_dict_items is None:
return
for dict_item in page_dict_items:
self.db_manager.insertDict(dict_item)
pass
if __name__ == "__main__":
Tools.setup_log_mode(False)
Main(10).run() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.