file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_metadata.py | #!/usr/bin/env python3
import unittest
from mock import MagicMock
from tests_functional import DoozerRunnerTestCase
from doozerlib import metadata
class TestMetadata(DoozerRunnerTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def | (self):
data_obj = MagicMock(key="cluster-etcd-operator", filename="cluster-etcd-operator.yml", data={"name": "cluster-etcd-operator"})
runtime = MagicMock()
runtime.group_config.urls.cgit = "http://pkgs.devel.redhat.com/cgit"
meta = metadata.Metadata("image", runtime, data_obj)
entry_list = meta.cgit_atom_feed(commit_hash='35ecfa4436139442edc19585c1c81ebfaca18550')
entry = entry_list[0]
self.assertEqual(entry.updated, '2019-07-09T18:01:53+00:00')
self.assertEqual(entry.id, '81597b027a3cb2c38865273b13ab320b361feca6')
entry_list = meta.cgit_atom_feed(branch='rhaos-4.8-rhel-8')
self.assertTrue(len(entry_list) > 1)
self.assertIsNotNone(entry_list[0].id)
if __name__ == "__main__":
unittest.main()
| test_cgit_atom |
models.py | from django.conf import settings
from django.db import models
# Create your models here.
from django.urls import reverse, NoReverseMatch
from django.utils.safestring import mark_safe
from users.models import Achievement
class Redirect(models.Model):
source = models.CharField(max_length=50)
sink = models.CharField(max_length=1024)
permanent = models.BooleanField(default=False)
usages = models.PositiveIntegerField(
default=0, help_text="The number of times that link has been used")
achievement = models.ForeignKey(
Achievement, on_delete=models.CASCADE, null=True, blank=True, default=None)
def __str__(self):
return self.source
def get_absolute_url(self):
return reverse("redirect", kwargs={"source": self.source})
@property
def url(self):
| try:
url_ = settings.DEFAULT_PROTOCOL + settings.PRIMARY_HOST + self.get_absolute_url()
return mark_safe('<a href="{}">{}</a>'.format(url_, url_))
except NoReverseMatch:
return "-" |
|
Output.tsx | import React from 'react';
import { useSelector } from 'react-redux';
import { State } from './reducers';
import SimplePane from './Output/SimplePane';
import * as selectors from './selectors';
const Output: React.SFC = () => {
const somethingToShow = useSelector(selectors.getSomethingToShow);
let details = useSelector((state: State) => state.output.compile);
if (!somethingToShow) {
return null;
}
if (details.body != null) {
WebAssembly.instantiate(details.body, { env: { alert: function (v) { alert(v); }} })
.then((instance) => {
if (instance.exports.main) {
let main = instance.exports.main as CallableFunction;
main();
}
});
}
return (
<div className="output">
<div className="output-body">
<SimplePane {...details} kind="execute" />
</div>
</div>
); | };
export default Output; | |
where.go | // Code generated by entc, DO NOT EDIT.
package tag
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"blog/internal/data/ent/predicate"
)
// ID filters vertices based on their ID field.
func ID(id int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
}
// IDEQ applies the EQ predicate on the ID field.
func IDEQ(id int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldID), id))
})
}
// IDNEQ applies the NEQ predicate on the ID field.
func IDNEQ(id int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldID), id))
})
}
// IDIn applies the In predicate on the ID field.
func IDIn(ids ...int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(ids) == 0 {
s.Where(sql.False())
return
}
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.In(s.C(FieldID), v...))
})
}
// IDNotIn applies the NotIn predicate on the ID field.
func IDNotIn(ids ...int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(ids) == 0 {
s.Where(sql.False())
return
}
v := make([]interface{}, len(ids))
for i := range v {
v[i] = ids[i]
}
s.Where(sql.NotIn(s.C(FieldID), v...))
})
}
// IDGT applies the GT predicate on the ID field.
func IDGT(id int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldID), id))
})
}
// IDGTE applies the GTE predicate on the ID field.
func IDGTE(id int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldID), id))
})
}
// IDLT applies the LT predicate on the ID field.
func IDLT(id int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldID), id))
})
}
// IDLTE applies the LTE predicate on the ID field.
func IDLTE(id int64) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldID), id))
})
}
// Slug applies equality check predicate on the "slug" field. It's identical to SlugEQ.
func | (v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldSlug), v))
})
}
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
func Name(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldName), v))
})
}
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
func CreatedAt(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
}
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
func UpdatedAt(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
}
// SlugEQ applies the EQ predicate on the "slug" field.
func SlugEQ(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldSlug), v))
})
}
// SlugNEQ applies the NEQ predicate on the "slug" field.
func SlugNEQ(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldSlug), v))
})
}
// SlugIn applies the In predicate on the "slug" field.
func SlugIn(vs ...string) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.In(s.C(FieldSlug), v...))
})
}
// SlugNotIn applies the NotIn predicate on the "slug" field.
func SlugNotIn(vs ...string) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.NotIn(s.C(FieldSlug), v...))
})
}
// SlugGT applies the GT predicate on the "slug" field.
func SlugGT(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldSlug), v))
})
}
// SlugGTE applies the GTE predicate on the "slug" field.
func SlugGTE(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldSlug), v))
})
}
// SlugLT applies the LT predicate on the "slug" field.
func SlugLT(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldSlug), v))
})
}
// SlugLTE applies the LTE predicate on the "slug" field.
func SlugLTE(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldSlug), v))
})
}
// SlugContains applies the Contains predicate on the "slug" field.
func SlugContains(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldSlug), v))
})
}
// SlugHasPrefix applies the HasPrefix predicate on the "slug" field.
func SlugHasPrefix(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldSlug), v))
})
}
// SlugHasSuffix applies the HasSuffix predicate on the "slug" field.
func SlugHasSuffix(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldSlug), v))
})
}
// SlugEqualFold applies the EqualFold predicate on the "slug" field.
func SlugEqualFold(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldSlug), v))
})
}
// SlugContainsFold applies the ContainsFold predicate on the "slug" field.
func SlugContainsFold(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldSlug), v))
})
}
// NameEQ applies the EQ predicate on the "name" field.
func NameEQ(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldName), v))
})
}
// NameNEQ applies the NEQ predicate on the "name" field.
func NameNEQ(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldName), v))
})
}
// NameIn applies the In predicate on the "name" field.
func NameIn(vs ...string) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.In(s.C(FieldName), v...))
})
}
// NameNotIn applies the NotIn predicate on the "name" field.
func NameNotIn(vs ...string) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.NotIn(s.C(FieldName), v...))
})
}
// NameGT applies the GT predicate on the "name" field.
func NameGT(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldName), v))
})
}
// NameGTE applies the GTE predicate on the "name" field.
func NameGTE(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldName), v))
})
}
// NameLT applies the LT predicate on the "name" field.
func NameLT(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldName), v))
})
}
// NameLTE applies the LTE predicate on the "name" field.
func NameLTE(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldName), v))
})
}
// NameContains applies the Contains predicate on the "name" field.
func NameContains(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.Contains(s.C(FieldName), v))
})
}
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
func NameHasPrefix(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.HasPrefix(s.C(FieldName), v))
})
}
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
func NameHasSuffix(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.HasSuffix(s.C(FieldName), v))
})
}
// NameEqualFold applies the EqualFold predicate on the "name" field.
func NameEqualFold(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EqualFold(s.C(FieldName), v))
})
}
// NameContainsFold applies the ContainsFold predicate on the "name" field.
func NameContainsFold(v string) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.ContainsFold(s.C(FieldName), v))
})
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldCreatedAt), v))
})
}
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
func CreatedAtNEQ(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldCreatedAt), v))
})
}
// CreatedAtIn applies the In predicate on the "created_at" field.
func CreatedAtIn(vs ...time.Time) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.In(s.C(FieldCreatedAt), v...))
})
}
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
func CreatedAtNotIn(vs ...time.Time) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.NotIn(s.C(FieldCreatedAt), v...))
})
}
// CreatedAtGT applies the GT predicate on the "created_at" field.
func CreatedAtGT(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldCreatedAt), v))
})
}
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
func CreatedAtGTE(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldCreatedAt), v))
})
}
// CreatedAtLT applies the LT predicate on the "created_at" field.
func CreatedAtLT(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldCreatedAt), v))
})
}
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
func CreatedAtLTE(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldCreatedAt), v))
})
}
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
func UpdatedAtEQ(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.EQ(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
func UpdatedAtNEQ(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.NEQ(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtIn applies the In predicate on the "updated_at" field.
func UpdatedAtIn(vs ...time.Time) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.In(s.C(FieldUpdatedAt), v...))
})
}
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
func UpdatedAtNotIn(vs ...time.Time) predicate.Tag {
v := make([]interface{}, len(vs))
for i := range v {
v[i] = vs[i]
}
return predicate.Tag(func(s *sql.Selector) {
// if not arguments were provided, append the FALSE constants,
// since we can't apply "IN ()". This will make this predicate falsy.
if len(v) == 0 {
s.Where(sql.False())
return
}
s.Where(sql.NotIn(s.C(FieldUpdatedAt), v...))
})
}
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
func UpdatedAtGT(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GT(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
func UpdatedAtGTE(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.GTE(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
func UpdatedAtLT(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LT(s.C(FieldUpdatedAt), v))
})
}
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
func UpdatedAtLTE(v time.Time) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s.Where(sql.LTE(s.C(FieldUpdatedAt), v))
})
}
// HasPosts applies the HasEdge predicate on the "posts" edge.
func HasPosts() predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(PostsTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, PostsTable, PostsPrimaryKey...),
)
sqlgraph.HasNeighbors(s, step)
})
}
// HasPostsWith applies the HasEdge predicate on the "posts" edge with a given conditions (other predicates).
func HasPostsWith(preds ...predicate.Article) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(PostsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2M, false, PostsTable, PostsPrimaryKey...),
)
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
}
})
})
}
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Tag) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Tag) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Tag) predicate.Tag {
return predicate.Tag(func(s *sql.Selector) {
p(s.Not())
})
}
| Slug |
xmesh.py | import time
import math
import bpy
import bmesh
import bgl
from typing import List, Callable
from mathutils import Vector, Matrix, Color, kdtree
from mathutils.bvhtree import BVHTree
from mathutils.geometry import intersect_point_line, intersect_line_plane
from bpy_extras import view3d_utils
from .maths import Point, Normal, XForm, Ray, Vector, Point2D
class XMesh:
def __init__(self, obj, triangulate=True):
self.obj = obj
self.xform = XForm(self.obj.matrix_world)
eme = self.obj.to_mesh(scene=bpy.context.scene, apply_modifiers=deform, settings='PREVIEW')
eme.update()
self.bme = bmesh.new()
self.bme.from_mesh(eme)
if triangulate: self.triangulate()
self.dirty()
def dirty(self):
self._dirty = True
def clean(self):
if not self._dirty: return
self.bme.verts.ensure_lookup_table()
self.bme.edges.ensure_lookup_table()
self.bme.faces.ensure_lookup_table()
self._bvh = BVHTree.FromBMesh(self.bme)
self._dirty = False
###################################################################################
# properties
###################################################################################
@property
def bvh(self):
|
###################################################################################
# simple manipulations
###################################################################################
def triangulate(self):
faces = [face for face in self.bme.faces if len(face.verts) != 3]
#print('%d non-triangles' % len(faces))
bmesh.ops.triangulate(self.bme, faces=faces)
self.dirty()
###################################################################################
# ray casting functions
###################################################################################
def raycast(self, ray:Ray):
ray_local = self.xform.w2l_ray(ray)
p,n,i,d = self.bvh.ray_cast(ray_local.o, ray_local.d, ray_local.max)
if p is None: return (None,None,None,None)
if not self.get_bbox().Point_within(p, margin=1):
return (None,None,None,None)
p_w,n_w = self.xform.l2w_point(p), self.xform.l2w_normal(n)
d_w = (ray.o - p_w).length
return (p_w,n_w,i,d_w)
def raycast_all(self, ray:Ray):
l2w_point,l2w_normal = self.xform.l2w_point,self.xform.l2w_normal
ray_local = self.xform.w2l_ray(ray)
hits = []
origin,direction,maxdist = ray_local.o,ray_local.d,ray_local.max
dist = 0
while True:
p,n,i,d = self.bvh.ray_cast(origin, direction, maxdist)
if not p: break
p,n = l2w_point(p),l2w_normal(n)
d = (origin - p).length
dist += d
hits += [(p,n,i,dist)]
origin += direction * (d + 0.00001)
maxdist -= d
return hits
def raycast_hit(self, ray:Ray):
ray_local = self.xform.w2l_ray(ray)
p,n,i,d = self.bvh.ray_cast(ray_local.o, ray_local.d, ray_local.max)
return p is not None
###################################################################################
# nearest functions
###################################################################################
def nearest(self, point:Point, max_dist=float('inf')): #sys.float_info.max):
point_local = self.xform.w2l_point(point)
p,n,i,_ = self.bvh.find_nearest(point_local, max_dist)
if p is None: return (None,None,None,None)
p,n = self.xform.l2w_point(p), self.xform.l2w_normal(n)
d = (point - p).length
return (p,n,i,d)
def nearest_bmvert_Point(self, point:Point, verts=None):
if verts is None:
verts = self.bme.verts
else:
verts = [self._unwrap(bmv) for bmv in verts]
point_local = self.xform.w2l_point(point)
bv,bd = None,None
for bmv in verts:
d3d = (bmv.co - point_local).length
if bv is None or d3d < bd: bv,bd = bmv,d3d
bmv_world = self.xform.l2w_point(bv.co)
return (self._wrap_bmvert(bv),(point-bmv_world).length)
def nearest_bmverts_Point(self, point:Point, dist3d:float):
nearest = []
for bmv in self.bme.verts:
bmv_world = self.xform.l2w_point(bmv.co)
d3d = (bmv_world - point).length
if d3d > dist3d: continue
nearest += [(self._wrap_bmvert(bmv), d3d)]
return nearest
def nearest_bmedge_Point(self, point:Point, edges=None):
if edges is None:
edges = self.bme.edges
else:
edges = [self._unwrap(bme) for bme in edges]
l2w_point = self.xform.l2w_point
be,bd,bpp = None,None,None
for bme in self.bme.edges:
bmv0,bmv1 = l2w_point(bme.verts[0].co), l2w_point(bme.verts[1].co)
diff = bmv1 - bmv0
l = diff.length
d = diff / l
pp = bmv0 + d * max(0, min(l, (point - bmv0).dot(d)))
dist = (point - pp).length
if be is None or dist < bd: be,bd,bpp = bme,dist,pp
if be is None: return (None,None)
return (self._wrap_bmedge(be), (point-self.xform.l2w_point(bpp)).length)
def nearest_bmedges_Point(self, point:Point, dist3d:float):
l2w_point = self.xform.l2w_point
nearest = []
for bme in self.bme.edges:
bmv0,bmv1 = l2w_point(bme.verts[0].co), l2w_point(bme.verts[1].co)
diff = bmv1 - bmv0
l = diff.length
d = diff / l
pp = bmv0 + d * max(0, min(l, (point - bmv0).dot(d)))
dist = (point - pp).length
if dist > dist3d: continue
nearest += [(self._wrap_bmedge(bme), dist)]
return nearest
def nearest2D_bmverts_Point2D(self, xy:Point2D, dist2D:float, Point_to_Point2D, verts=None):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if verts is None:
verts = self.bme.verts
else:
verts = [self._unwrap(bmv) for bmv in verts]
nearest = []
for bmv in verts:
p2d = Point_to_Point2D(self.xform.l2w_point(bmv.co))
if p2d is None: continue
if (p2d - xy).length > dist2D: continue
d3d = 0
nearest += [(self._wrap_bmvert(bmv), d3d)]
return nearest
def nearest2D_bmvert_Point2D(self, xy:Point2D, Point_to_Point2D, verts=None, max_dist=None):
if not max_dist or max_dist < 0: max_dist = float('inf')
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if verts is None:
verts = self.bme.verts
else:
verts = [self._unwrap(bmv) for bmv in verts]
l2w_point = self.xform.l2w_point
bv,bd = None,None
for bmv in verts:
p2d = Point_to_Point2D(l2w_point(bmv.co))
if p2d is None: continue
d2d = (xy - p2d).length
if d2d > max_dist: continue
if bv is None or d2d < bd: bv,bd = bmv,d2d
if bv is None: return (None,None)
return (self._wrap_bmvert(bv),bd)
def nearest2D_bmedges_Point2D(self, xy:Point2D, dist2D:float, Point_to_Point2D, edges=None, shorten=0.01):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
edges = self.bme.edges if edges is None else [self._unwrap(bme) for bme in edges]
l2w_point = self.xform.l2w_point
nearest = []
dist2D2 = dist2D**2
s0,s1 = shorten/2,1-shorten/2
proj = lambda bmv: Point_to_Point2D(l2w_point(bmv.co))
for bme in edges:
v0,v1 = proj(bme.verts[0]),proj(bme.verts[1])
l = v0.distance_to(v1)
if l == 0:
pp = v0
else:
d = (v1 - v0) / l
pp = v0 + d * max(l*s0, min(l*s1, d.dot(xy-v0)))
dist2 = pp.distance_squared_to(xy)
if dist2 > dist2D2: continue
nearest.append((self._wrap_bmedge(bme), math.sqrt(dist2)))
return nearest
def nearest2D_bmedge_Point2D(self, xy:Point2D, Point_to_Point2D, edges=None, shorten=0.01, max_dist=None):
if not max_dist or max_dist < 0: max_dist = float('inf')
if edges is None:
edges = self.bme.edges
else:
edges = [self._unwrap(bme) for bme in edges]
l2w_point = self.xform.l2w_point
be,bd,bpp = None,None,None
for bme in edges:
bmv0 = Point_to_Point2D(l2w_point(bme.verts[0].co))
bmv1 = Point_to_Point2D(l2w_point(bme.verts[1].co))
diff = bmv1 - bmv0
l = diff.length
if l == 0:
dist = (xy - bmv0).length
else:
d = diff / l
margin = l * shorten / 2
pp = bmv0 + d * max(margin, min(l-margin, (xy - bmv0).dot(d)))
dist = (xy - pp).length
if dist > max_dist: continue
if be is None or dist < bd: be,bd,bpp = bme,dist,pp
if be is None: return (None,None)
return (self._wrap_bmedge(be), (xy-bpp).length)
def nearest2D_bmfaces_Point2D(self, xy:Point2D, Point_to_Point2D, faces=None):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if faces is None:
faces = self.bme.faces
else:
faces = [self._unwrap(bmf) for bmf in faces]
nearest = []
for bmf in faces:
pts = [Point_to_Point2D(self.xform.l2w_point(bmv.co)) for bmv in bmf.verts]
pts = [pt for pt in pts if pt]
pt0 = pts[0]
# TODO: Get dist?
for pt1,pt2 in zip(pts[1:-1],pts[2:]):
if intersect_point_tri(xy, pt0, pt1, pt2):
nearest += [(self._wrap_bmface(bmf), dist)]
#p2d = Point_to_Point2D(self.xform.l2w_point(bmv.co))
#d2d = (xy - p2d).length
#if p2d is None: continue
#if bv is None or d2d < bd: bv,bd = bmv,d2d
#if bv is None: return (None,None)
#return (self._wrap_bmvert(bv),bd)
return nearest
def nearest2D_bmface_Point2D(self, xy:Point2D, Point_to_Point2D, faces=None):
# TODO: compute distance from camera to point
# TODO: sort points based on 3d distance
if faces is None:
faces = self.bme.faces
else:
faces = [self._unwrap(bmf) for bmf in faces]
bv,bd = None,None
for bmf in faces:
pts = [Point_to_Point2D(self.xform.l2w_point(bmv.co)) for bmv in bmf.verts]
pts = [pt for pt in pts if pt]
pt0 = pts[0]
for pt1,pt2 in zip(pts[1:-1],pts[2:]):
if intersect_point_tri(xy, pt0, pt1, pt2):
return self._wrap_bmface(bmf)
#p2d = Point_to_Point2D(self.xform.l2w_point(bmv.co))
#d2d = (xy - p2d).length
#if p2d is None: continue
#if bv is None or d2d < bd: bv,bd = bmv,d2d
#if bv is None: return (None,None)
#return (self._wrap_bmvert(bv),bd)
return None
##########################################################
def _visible_verts(self, is_visible:Callable[[Point,Normal], bool]):
l2w_point, l2w_normal = self.xform.l2w_point, self.xform.l2w_normal
#is_vis = lambda bmv: is_visible(l2w_point(bmv.co), l2w_normal(bmv.normal))
is_vis = lambda bmv: is_visible(l2w_point(bmv.co), None)
return { bmv for bmv in self.bme.verts if is_vis(bmv) }
def _visible_edges(self, is_visible, bmvs=None):
if bmvs is None: bmvs = self._visible_verts(is_visible)
return { bme for bme in self.bme.edges if all(bmv in bmvs for bmv in bme.verts) }
def _visible_faces(self, is_visible, bmvs=None):
if bmvs is None: bmvs = self._visible_verts(is_visible)
return { bmf for bmf in self.bme.faces if all(bmv in bmvs for bmv in bmf.verts) }
def visible_verts(self, is_visible):
return { self._wrap_bmvert(bmv) for bmv in self._visible_verts(is_visible) }
def visible_edges(self, is_visible, verts=None):
bmvs = None if verts is None else { self._unwrap(bmv) for bmv in verts }
return { self._wrap_bmedge(bme) for bme in self._visible_edges(is_visible, bmvs=bmvs) }
def visible_faces(self, is_visible, verts=None):
bmvs = None if verts is None else { self._unwrap(bmv) for bmv in verts }
bmfs = { self._wrap_bmface(bmf) for bmf in self._visible_faces(is_visible, bmvs=bmvs) }
#print('seeing %d / %d faces' % (len(bmfs), len(self.bme.faces)))
return bmfs
| self.clean()
return self._bvh |
mod.rs | use serde::Serialize;
/// serializes value to json;
/// pretty_print: false = inline
/// pretty_print: true = pretty printed / multiline
pub fn jsonify<T>(value: T, pretty_print: bool) -> String
where
T: Serialize,
{
let fj = if pretty_print {
serde_json::to_string_pretty
} else {
serde_json::to_string
};
match fj(&value) {
Ok(json) => json,
Err(_) => r#"{"error": "encountered error serializing value"}"#.to_owned(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Eq, Serialize)]
struct SerMock {
foo: String,
bar: u32,
}
impl PartialEq for SerMock {
fn eq(&self, other: &SerMock) -> bool {
self.foo.eq(&other.foo) && self.bar == other.bar
}
}
#[test]
fn | () {
let sermock = SerMock {
foo: "foo".to_string(),
bar: 1,
};
let json = jsonify(sermock, false);
assert_eq!(json, r#"{"foo":"foo","bar":1}"#, "json expected to match");
}
#[test]
fn should_ser_to_pretty_json() {
let sermock = SerMock {
foo: "foo".to_string(),
bar: 1,
};
let json = jsonify(sermock, true);
assert!(json.contains('\n'), "json expected to be multiline");
}
#[test]
fn should_deser_from_json() {
let sermock = SerMock {
foo: "foo".to_string(),
bar: 1,
};
let json = jsonify(&sermock, false);
let sermock_clone: SerMock = serde_json::from_str(&json).expect("should deser");
assert!(
sermock.eq(&sermock_clone),
"instances should contain the same data"
);
}
}
| should_ser_to_json |
response.py | import io
import json as _json
import logging
import zlib
from contextlib import contextmanager
from http.client import HTTPMessage as _HttplibHTTPMessage
from http.client import HTTPResponse as _HttplibHTTPResponse
from socket import timeout as SocketTimeout
from typing import (
TYPE_CHECKING,
Any,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
try:
try:
import brotlicffi as brotli # type: ignore[import]
except ImportError:
import brotli # type: ignore[import]
except ImportError:
brotli = None
from ._collections import HTTPHeaderDict
from .connection import _TYPE_BODY, BaseSSLError, HTTPConnection, HTTPException
from .exceptions import (
BodyNotHttplibCompatible,
DecodeError,
HTTPError,
IncompleteRead,
InvalidChunkLength,
InvalidHeader,
ProtocolError,
ReadTimeoutError,
ResponseNotChunked,
SSLError,
)
from .util.response import is_fp_closed, is_response_to_head
from .util.retry import Retry
if TYPE_CHECKING:
from typing_extensions import Literal
from .connectionpool import HTTPConnectionPool
log = logging.getLogger(__name__)
class ContentDecoder:
def decompress(self, data: bytes) -> bytes:
raise NotImplementedError()
def flush(self) -> bytes:
raise NotImplementedError()
class DeflateDecoder(ContentDecoder):
def __init__(self) -> None:
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None # type: ignore[assignment]
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None # type: ignore[assignment]
def flush(self) -> bytes:
return self._obj.flush()
class GzipDecoderState:
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def decompress(self, data: bytes) -> bytes:
ret = bytearray()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return bytes(ret)
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
# Ignore data after the first error
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
# Allow trailing garbage acceptable in other gzip clients
return bytes(ret)
raise
data = self._obj.unused_data
if not data:
return bytes(ret)
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def flush(self) -> bytes:
return self._obj.flush()
if brotli is not None:
class BrotliDecoder(ContentDecoder):
# Supports both 'brotlipy' and 'Brotli' packages
# since they share an import name. The top branches
# are for 'brotlipy' and bottom branches for 'Brotli'
def __init__(self) -> None:
self._obj = brotli.Decompressor()
if hasattr(self._obj, "decompress"):
setattr(self, "decompress", self._obj.decompress)
else:
setattr(self, "decompress", self._obj.process)
def flush(self) -> bytes:
if hasattr(self._obj, "flush"):
return self._obj.flush() # type: ignore[no-any-return]
return b""
class MultiDecoder(ContentDecoder):
"""
From RFC7231:
If one or more encodings have been applied to a representation, the
sender that applied the encodings MUST generate a Content-Encoding
header field that lists the content codings in the order in which
they were applied.
"""
def __init__(self, modes: str) -> None:
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self) -> bytes:
return self._decoders[0].flush()
def decompress(self, data: bytes) -> bytes:
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode: str) -> ContentDecoder:
if "," in mode:
return MultiDecoder(mode)
if mode == "gzip":
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
return DeflateDecoder()
class BaseHTTPResponse(io.IOBase):
CONTENT_DECODERS = ["gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
DECODER_ERROR_CLASSES: Tuple[Type[Exception], ...] = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
def __init__(
self,
*,
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int,
version: int,
reason: Optional[str],
decode_content: bool,
request_url: Optional[str],
retries: Optional[Retry] = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
self.status = status
self.version = version
self.reason = reason
self.decode_content = decode_content
self.request_url: Optional[str]
self.retries = retries
self.chunked = False
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: Optional[ContentDecoder] = None
def get_redirect_location(self) -> Union[Optional[str], "Literal[False]"]:
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
@property
def data(self) -> bytes:
raise NotImplementedError()
def json(self) -> Any:
"""
Parses the body of the HTTP response as JSON.
To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.
This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.
Read more :ref:`here <json>`.
"""
data = self.data.decode("utf-8")
return _json.loads(data) |
@property
def closed(self) -> bool:
raise NotImplementedError()
@property
def connection(self) -> Optional[HTTPConnection]:
raise NotImplementedError()
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Iterator[bytes]:
raise NotImplementedError()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
raise NotImplementedError()
def read_chunked(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
) -> Iterator[bytes]:
raise NotImplementedError()
def release_conn(self) -> None:
raise NotImplementedError()
def drain_conn(self) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
def _init_decoder(self) -> None:
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
def _decode(
self, data: bytes, decode_content: Optional[bool], flush_decoder: bool
) -> bytes:
"""
Decode the data passed in and potentially flush the decoder.
"""
if not decode_content:
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
) from e
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self) -> bytes:
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
return self._decoder.decompress(b"") + self._decoder.flush()
return b""
# Compatibility methods for `io` module
def readable(self) -> bool:
return True
def readinto(self, b: bytearray) -> int:
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
# Compatibility methods for http.client.HTTPResponse
def getheaders(self) -> List[Tuple[str, str]]:
return list(self.headers.items())
def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.headers.get(name, default)
# Compatibility method for http.cookiejar
def info(self) -> HTTPHeaderDict:
return self.headers
def geturl(self) -> Optional[Union[str, "Literal[False]"]]:
return self.url
class HTTPResponse(BaseHTTPResponse):
"""
HTTP Response container.
Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
def __init__(
self,
body: _TYPE_BODY = "",
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int = 0,
version: int = 0,
reason: Optional[str] = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: Optional[_HttplibHTTPResponse] = None,
pool: Optional["HTTPConnectionPool"] = None,
connection: Optional[HTTPConnection] = None,
msg: Optional[_HttplibHTTPMessage] = None,
retries: Optional[Retry] = None,
enforce_content_length: bool = False,
request_method: Optional[str] = None,
request_url: Optional[str] = None,
auto_close: bool = True,
) -> None:
super().__init__(
headers=headers,
status=status,
version=version,
reason=reason,
decode_content=decode_content,
request_url=request_url,
retries=retries,
)
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: Optional[_HttplibHTTPResponse] = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
if self.retries is not None and self.retries.history:
self._request_url = self.retries.history[-1].redirect_location
else:
self._request_url = request_url
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: Optional[int] = None
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def release_conn(self) -> None:
if not self._pool or not self._connection:
return None
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self) -> None:
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, OSError, BaseSSLError, HTTPException):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> Optional[HTTPConnection]:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
if bytes are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method: Optional[str]) -> Optional[int]:
"""
Set initial length value for Response content if available.
"""
length: Optional[int]
content_length: Optional[str] = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
else: # if content_length is None
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
@contextmanager
def _error_catcher(self) -> Generator[None, None, None]:
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except (HTTPException, OSError) as e:
# This includes IncompleteRead.
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return None # type: ignore[return-value]
flush_decoder = False
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read() if not fp_closed else b""
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt) if not fp_closed else b""
if (
amt != 0 and not data
): # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
yield from self.read_chunked(amt, decode_content=decode_content)
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(
ResponseCls: Type["HTTPResponse"], r: _HttplibHTTPResponse, **response_kw: Any
) -> "HTTPResponse":
"""
Given an :class:`http.client.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
headers = HTTPHeaderDict(headers.items()) # type: ignore[assignment]
resp = ResponseCls(
body=r,
headers=headers, # type: ignore[arg-type]
status=r.status,
version=r.version,
reason=r.reason,
original_response=r,
**response_kw,
)
return resp
# Overrides from io.IOBase
def close(self) -> None:
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return, attr-defined]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def supports_chunked_reads(self) -> bool:
"""
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, "fp")
def _update_chunk_length(self) -> None:
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return None
line = self._fp.fp.readline() # type: ignore[union-attr]
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise InvalidChunkLength(self, line) from None
def _handle_chunk(self, amt: Optional[int]) -> bytes:
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
returned_chunk = chunk
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif self.chunk_left is not None and amt < self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk # type: ignore[no-any-return]
def read_chunked(
self, amt: Optional[int] = None, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be http.client.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return None
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None: # type: ignore[union-attr]
return None
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while self._fp is not None:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b"\r\n":
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
@property
def url(self) -> Optional[str]:
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
return self._request_url
@url.setter
def url(self, url: str) -> None:
self._request_url = url
def __iter__(self) -> Iterator[bytes]:
buffer: List[bytes] = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer) |
@property
def url(self) -> Optional[str]:
raise NotImplementedError() |
jsonrpc.go | package odoo
import (
"bytes"
"encoding/json"
"fmt"
"io"
"github.com/google/uuid"
)
// JSONRPCRequest represents a generic json-rpc request
type JSONRPCRequest struct {
// ID should be a randomly generated value, either as a string or int.
// The server will return this value in the response.
ID string `json:"id,omitempty"`
// JSONRPC is always set to "2.0"
JSONRPC string `json:"jsonrpc,omitempty"`
// Method to call, usually just "call"
Method string `json:"method,omitempty"`
// Params includes the actual request payload.
Params interface{} `json:"params,omitempty"`
}
var uuidGenerator = uuid.NewString
// NewJSONRPCRequest returns a JSON RPC request with its protocol fields populated:
//
// * "id" will be set to a random UUID
// * "jsonrpc" will be set to "2.0"
// * "method" will be set to "call"
// * "params" will be set to whatever was passed in
func NewJSONRPCRequest(params interface{}) *JSONRPCRequest {
return &JSONRPCRequest{
ID: uuidGenerator(),
JSONRPC: "2.0",
Method: "call",
Params: params,
}
}
// Encode encodes the request as JSON in a buffer and returns the buffer.
func (r *JSONRPCRequest) Encode() (io.Reader, error) {
buf := new(bytes.Buffer)
if err := json.NewEncoder(buf).Encode(r); err != nil {
return nil, err
}
return buf, nil
}
// JSONRPCResponse holds the JSONRPC response.
type JSONRPCResponse struct {
// ID that was sent with the request
ID string `json:"id,omitempty"`
// JSONRPC is always set to "2.0"
JSONRPC string `json:"jsonrpc,omitempty"`
// Result payload
Result *json.RawMessage `json:"result,omitempty"`
// Optional error field
Error *JSONRPCError `json:"error,omitempty"`
}
// JSONRPCError holds error information.
type JSONRPCError struct {
Message string `json:"message,omitempty"`
Code int `json:"code,omitempty"`
Data map[string]interface{} `json:"data,omitempty"`
}
// DecodeResult takes a buffer, decodes the intermediate JSONRPCResponse and then the contained "result" field into "result".
func DecodeResult(buf io.Reader, result interface{}) error {
// Decode intermediate
var res JSONRPCResponse
if err := json.NewDecoder(buf).Decode(&res); err != nil {
return fmt.Errorf("decode intermediate: %w", err)
}
if res.Error != nil {
return fmt.Errorf("%s: %s", res.Error.Message, res.Error.Data["message"])
}
return json.Unmarshal(*res.Result, result)
}
func newEncodingRequestError(err error) error {
return fmt.Errorf("encoding request: %w", err)
}
func newCreatingRequestError(err error) error | {
return fmt.Errorf("creating request: %w", err)
} |
|
iommu.rs | // Copyright ยฉ 2019 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use super::Error as DeviceError;
use super::{
ActivateError, ActivateResult, DescriptorChain, EpollHelper, EpollHelperError,
EpollHelperHandler, Queue, VirtioCommon, VirtioDevice, VirtioDeviceType,
EPOLL_HELPER_EVENT_LAST, VIRTIO_F_VERSION_1,
};
use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::GuestMemoryMmap;
use crate::{DmaRemapping, VirtioInterrupt, VirtioInterruptType};
use seccompiler::{apply_filter, SeccompAction};
use std::collections::BTreeMap;
use std::fmt::{self, Display};
use std::io;
use std::mem::size_of;
use std::ops::Bound::Included;
use std::os::unix::io::AsRawFd;
use std::result;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Barrier, RwLock};
use std::thread;
use versionize::{VersionMap, Versionize, VersionizeResult};
use versionize_derive::Versionize;
use vm_device::dma_mapping::ExternalDmaMapping;
use vm_memory::{
Address, ByteValued, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryAtomic,
GuestMemoryError,
};
use vm_migration::VersionMapped;
use vm_migration::{Migratable, MigratableError, Pausable, Snapshot, Snapshottable, Transportable};
use vmm_sys_util::eventfd::EventFd;
/// Queues sizes
const QUEUE_SIZE: u16 = 256;
const NUM_QUEUES: usize = 2;
const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE; NUM_QUEUES];
/// New descriptors are pending on the request queue.
/// "requestq" is meant to be used anytime an action is required to be
/// performed on behalf of the guest driver.
const REQUEST_Q_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 1;
/// New descriptors are pending on the event queue.
/// "eventq" lets the device report any fault or other asynchronous event to
/// the guest driver.
const EVENT_Q_EVENT: u16 = EPOLL_HELPER_EVENT_LAST + 2;
/// PROBE properties size.
/// This is the minimal size to provide at least one RESV_MEM property.
/// Because virtio-iommu expects one MSI reserved region, we must provide it,
/// otherwise the driver in the guest will define a predefined one between
/// 0x8000000 and 0x80FFFFF, which is only relevant for ARM architecture, but
/// will conflict with x86.
const PROBE_PROP_SIZE: u32 =
(size_of::<VirtioIommuProbeProperty>() + size_of::<VirtioIommuProbeResvMem>()) as u32;
const MSI_IOVA_START: u64 = 0xfee0_0000;
const MSI_IOVA_END: u64 = 0xfeef_ffff;
/// Virtio IOMMU features
#[allow(unused)]
const VIRTIO_IOMMU_F_INPUT_RANGE: u32 = 0;
#[allow(unused)]
const VIRTIO_IOMMU_F_DOMAIN_RANGE: u32 = 1;
#[allow(unused)]
const VIRTIO_IOMMU_F_MAP_UNMAP: u32 = 2;
#[allow(unused)]
const VIRTIO_IOMMU_F_BYPASS: u32 = 3;
const VIRTIO_IOMMU_F_PROBE: u32 = 4;
#[allow(unused)]
const VIRTIO_IOMMU_F_MMIO: u32 = 5;
#[allow(unused)]
const VIRTIO_IOMMU_F_BYPASS_CONFIG: u32 = 6;
// Support 2MiB and 4KiB page sizes.
const VIRTIO_IOMMU_PAGE_SIZE_MASK: u64 = (2 << 20) | (4 << 10);
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuRange32 {
start: u32,
end: u32,
}
unsafe impl ByteValued for VirtioIommuRange32 {}
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuRange64 {
start: u64,
end: u64,
}
unsafe impl ByteValued for VirtioIommuRange64 {}
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuConfig {
page_size_mask: u64,
input_range: VirtioIommuRange64,
domain_range: VirtioIommuRange32,
probe_size: u32,
bypass: u8,
reserved: [u8; 7],
}
unsafe impl ByteValued for VirtioIommuConfig {}
/// Virtio IOMMU request type
const VIRTIO_IOMMU_T_ATTACH: u8 = 1;
const VIRTIO_IOMMU_T_DETACH: u8 = 2;
const VIRTIO_IOMMU_T_MAP: u8 = 3;
const VIRTIO_IOMMU_T_UNMAP: u8 = 4;
const VIRTIO_IOMMU_T_PROBE: u8 = 5;
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuReqHead {
type_: u8,
reserved: [u8; 3],
}
unsafe impl ByteValued for VirtioIommuReqHead {}
/// Virtio IOMMU request status
const VIRTIO_IOMMU_S_OK: u8 = 0;
#[allow(unused)]
const VIRTIO_IOMMU_S_IOERR: u8 = 1;
#[allow(unused)]
const VIRTIO_IOMMU_S_UNSUPP: u8 = 2;
#[allow(unused)]
const VIRTIO_IOMMU_S_DEVERR: u8 = 3;
#[allow(unused)]
const VIRTIO_IOMMU_S_INVAL: u8 = 4;
#[allow(unused)]
const VIRTIO_IOMMU_S_RANGE: u8 = 5;
#[allow(unused)]
const VIRTIO_IOMMU_S_NOENT: u8 = 6;
#[allow(unused)]
const VIRTIO_IOMMU_S_FAULT: u8 = 7;
#[allow(unused)]
const VIRTIO_IOMMU_S_NOMEM: u8 = 8;
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuReqTail {
status: u8,
reserved: [u8; 3],
}
unsafe impl ByteValued for VirtioIommuReqTail {}
/// ATTACH request
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuReqAttach {
domain: u32,
endpoint: u32,
reserved: [u8; 8],
}
unsafe impl ByteValued for VirtioIommuReqAttach {}
/// DETACH request
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuReqDetach {
domain: u32,
endpoint: u32,
reserved: [u8; 8],
}
unsafe impl ByteValued for VirtioIommuReqDetach {}
/// Virtio IOMMU request MAP flags
#[allow(unused)]
const VIRTIO_IOMMU_MAP_F_READ: u32 = 1;
#[allow(unused)]
const VIRTIO_IOMMU_MAP_F_WRITE: u32 = 1 << 1;
#[allow(unused)]
const VIRTIO_IOMMU_MAP_F_MMIO: u32 = 1 << 2;
#[allow(unused)]
const VIRTIO_IOMMU_MAP_F_MASK: u32 =
VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE | VIRTIO_IOMMU_MAP_F_MMIO;
/// MAP request
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuReqMap {
domain: u32,
virt_start: u64,
virt_end: u64,
phys_start: u64,
flags: u32,
}
unsafe impl ByteValued for VirtioIommuReqMap {}
/// UNMAP request
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuReqUnmap {
domain: u32,
virt_start: u64,
virt_end: u64,
reserved: [u8; 4],
}
unsafe impl ByteValued for VirtioIommuReqUnmap {}
/// Virtio IOMMU request PROBE types
#[allow(unused)]
const VIRTIO_IOMMU_PROBE_T_NONE: u16 = 0;
const VIRTIO_IOMMU_PROBE_T_RESV_MEM: u16 = 1;
#[allow(unused)]
const VIRTIO_IOMMU_PROBE_T_MASK: u16 = 0xfff;
/// PROBE request
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuReqProbe {
endpoint: u32,
reserved: [u64; 8],
}
unsafe impl ByteValued for VirtioIommuReqProbe {}
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuProbeProperty {
type_: u16,
length: u16,
}
unsafe impl ByteValued for VirtioIommuProbeProperty {}
/// Virtio IOMMU request PROBE property RESV_MEM subtypes
#[allow(unused)]
const VIRTIO_IOMMU_RESV_MEM_T_RESERVED: u8 = 0;
const VIRTIO_IOMMU_RESV_MEM_T_MSI: u8 = 1;
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuProbeResvMem {
subtype: u8,
reserved: [u8; 3],
start: u64,
end: u64,
}
unsafe impl ByteValued for VirtioIommuProbeResvMem {}
/// Virtio IOMMU fault flags
#[allow(unused)]
const VIRTIO_IOMMU_FAULT_F_READ: u32 = 1;
#[allow(unused)]
const VIRTIO_IOMMU_FAULT_F_WRITE: u32 = 1 << 1;
#[allow(unused)]
const VIRTIO_IOMMU_FAULT_F_EXEC: u32 = 1 << 2;
#[allow(unused)]
const VIRTIO_IOMMU_FAULT_F_ADDRESS: u32 = 1 << 8;
/// Virtio IOMMU fault reasons
#[allow(unused)]
const VIRTIO_IOMMU_FAULT_R_UNKNOWN: u32 = 0;
#[allow(unused)]
const VIRTIO_IOMMU_FAULT_R_DOMAIN: u32 = 1;
#[allow(unused)]
const VIRTIO_IOMMU_FAULT_R_MAPPING: u32 = 2;
/// Fault reporting through eventq
#[allow(unused)]
#[derive(Copy, Clone, Debug, Default)]
#[repr(packed)]
struct VirtioIommuFault {
reason: u8,
reserved: [u8; 3],
flags: u32,
endpoint: u32,
reserved2: [u8; 4],
address: u64,
}
unsafe impl ByteValued for VirtioIommuFault {}
#[derive(Debug)]
enum Error {
/// Guest gave us bad memory addresses.
GuestMemory(GuestMemoryError),
/// Guest gave us a write only descriptor that protocol says to read from.
UnexpectedWriteOnlyDescriptor,
/// Guest gave us a read only descriptor that protocol says to write to.
UnexpectedReadOnlyDescriptor,
/// Guest gave us too few descriptors in a descriptor chain.
DescriptorChainTooShort,
/// Guest gave us a buffer that was too short to use.
BufferLengthTooSmall,
/// Guest sent us invalid request.
InvalidRequest,
/// Guest sent us invalid ATTACH request.
InvalidAttachRequest,
/// Guest sent us invalid DETACH request.
InvalidDetachRequest,
/// Guest sent us invalid MAP request.
InvalidMapRequest,
/// Guest sent us invalid UNMAP request.
InvalidUnmapRequest,
/// Guest sent us invalid PROBE request.
InvalidProbeRequest,
/// Failed to performing external mapping.
ExternalMapping(io::Error),
/// Failed to performing external unmapping.
ExternalUnmapping(io::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
BufferLengthTooSmall => write!(f, "buffer length too small"),
DescriptorChainTooShort => write!(f, "descriptor chain too short"),
GuestMemory(e) => write!(f, "bad guest memory address: {}", e),
InvalidRequest => write!(f, "invalid request"),
InvalidAttachRequest => write!(f, "invalid attach request"),
InvalidDetachRequest => write!(f, "invalid detach request"),
InvalidMapRequest => write!(f, "invalid map request"),
InvalidUnmapRequest => write!(f, "invalid unmap request"),
InvalidProbeRequest => write!(f, "invalid probe request"),
UnexpectedReadOnlyDescriptor => write!(f, "unexpected read-only descriptor"),
UnexpectedWriteOnlyDescriptor => write!(f, "unexpected write-only descriptor"),
ExternalMapping(e) => write!(f, "failed performing external mapping: {}", e),
ExternalUnmapping(e) => write!(f, "failed performing external unmapping: {}", e),
}
}
}
struct Request {}
impl Request {
// Parse the available vring buffer. Based on the hashmap table of external
// mappings required from various devices such as VFIO or vhost-user ones,
// this function might update the hashmap table of external mappings per
// domain.
// Basically, the VMM knows about the device_id <=> mapping relationship
// before running the VM, but at runtime, a new domain <=> mapping hashmap
// is created based on the information provided from the guest driver for
// virtio-iommu (giving the link device_id <=> domain).
fn parse(
avail_desc: &DescriptorChain,
mem: &GuestMemoryMmap,
mapping: &Arc<IommuMapping>,
ext_mapping: &BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
ext_domain_mapping: &mut BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
) -> result::Result<usize, Error> {
// The head contains the request type which MUST be readable.
if avail_desc.is_write_only() {
return Err(Error::UnexpectedWriteOnlyDescriptor);
}
if (avail_desc.len as usize) < size_of::<VirtioIommuReqHead>() {
return Err(Error::InvalidRequest);
}
let req_head: VirtioIommuReqHead =
mem.read_obj(avail_desc.addr).map_err(Error::GuestMemory)?;
let req_offset = size_of::<VirtioIommuReqHead>();
let desc_size_left = (avail_desc.len as usize) - req_offset;
let req_addr = if let Some(addr) = avail_desc.addr.checked_add(req_offset as u64) {
addr
} else {
return Err(Error::InvalidRequest);
};
// Create the reply
let mut reply: Vec<u8> = Vec::new();
let hdr_len = match req_head.type_ {
VIRTIO_IOMMU_T_ATTACH => {
if desc_size_left != size_of::<VirtioIommuReqAttach>() {
return Err(Error::InvalidAttachRequest);
}
let req: VirtioIommuReqAttach = mem
.read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?;
debug!("Attach request {:?}", req);
// Copy the value to use it as a proper reference.
let domain = req.domain;
let endpoint = req.endpoint;
// Add endpoint associated with specific domain
mapping.endpoints.write().unwrap().insert(endpoint, domain);
// If the endpoint is part of the list of devices with an
// external mapping, insert a new entry for the corresponding
// domain, with the same reference to the trait.
if let Some(map) = ext_mapping.get(&endpoint) {
ext_domain_mapping.insert(domain, map.clone());
}
// Add new domain with no mapping if the entry didn't exist yet
let mut mappings = mapping.mappings.write().unwrap();
mappings.entry(domain).or_insert_with(BTreeMap::new);
0
}
VIRTIO_IOMMU_T_DETACH => {
if desc_size_left != size_of::<VirtioIommuReqDetach>() {
return Err(Error::InvalidDetachRequest);
}
let req: VirtioIommuReqDetach = mem
.read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?;
debug!("Detach request {:?}", req);
// Copy the value to use it as a proper reference.
let domain = req.domain;
let endpoint = req.endpoint;
// If the endpoint is part of the list of devices with an
// external mapping, remove the entry for the corresponding
// domain.
if ext_mapping.contains_key(&endpoint) {
ext_domain_mapping.remove(&domain);
}
// Remove endpoint associated with specific domain
mapping.endpoints.write().unwrap().remove(&endpoint);
0
}
VIRTIO_IOMMU_T_MAP => {
if desc_size_left != size_of::<VirtioIommuReqMap>() {
return Err(Error::InvalidMapRequest);
}
let req: VirtioIommuReqMap = mem
.read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?;
debug!("Map request {:?}", req);
// Copy the value to use it as a proper reference.
let domain = req.domain;
// Trigger external mapping if necessary.
if let Some(ext_map) = ext_domain_mapping.get(&domain) {
let size = req.virt_end - req.virt_start + 1;
ext_map
.map(req.virt_start, req.phys_start, size)
.map_err(Error::ExternalMapping)?;
}
// Add new mapping associated with the domain
if let Some(entry) = mapping.mappings.write().unwrap().get_mut(&domain) {
entry.insert(
req.virt_start,
Mapping {
gpa: req.phys_start,
size: req.virt_end - req.virt_start + 1,
},
);
} else {
return Err(Error::InvalidMapRequest);
}
0
}
VIRTIO_IOMMU_T_UNMAP => {
if desc_size_left != size_of::<VirtioIommuReqUnmap>() {
return Err(Error::InvalidUnmapRequest);
}
let req: VirtioIommuReqUnmap = mem
.read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?;
debug!("Unmap request {:?}", req);
// Copy the value to use it as a proper reference.
let domain = req.domain;
let virt_start = req.virt_start;
// Trigger external unmapping if necessary.
if let Some(ext_map) = ext_domain_mapping.get(&domain) {
let size = req.virt_end - virt_start + 1;
ext_map
.unmap(virt_start, size)
.map_err(Error::ExternalUnmapping)?;
}
// Add new mapping associated with the domain
if let Some(entry) = mapping.mappings.write().unwrap().get_mut(&domain) {
entry.remove(&virt_start);
}
0
}
VIRTIO_IOMMU_T_PROBE => {
if desc_size_left != size_of::<VirtioIommuReqProbe>() {
return Err(Error::InvalidProbeRequest);
}
let req: VirtioIommuReqProbe = mem
.read_obj(req_addr as GuestAddress)
.map_err(Error::GuestMemory)?;
debug!("Probe request {:?}", req);
let probe_prop = VirtioIommuProbeProperty {
type_: VIRTIO_IOMMU_PROBE_T_RESV_MEM,
length: size_of::<VirtioIommuProbeResvMem>() as u16,
};
reply.extend_from_slice(probe_prop.as_slice());
let resv_mem = VirtioIommuProbeResvMem {
subtype: VIRTIO_IOMMU_RESV_MEM_T_MSI,
start: MSI_IOVA_START,
end: MSI_IOVA_END,
..Default::default()
};
reply.extend_from_slice(resv_mem.as_slice());
PROBE_PROP_SIZE
}
_ => return Err(Error::InvalidRequest),
};
let status_desc = avail_desc
.next_descriptor()
.ok_or(Error::DescriptorChainTooShort)?;
// The status MUST always be writable
if !status_desc.is_write_only() {
return Err(Error::UnexpectedReadOnlyDescriptor);
}
if status_desc.len < hdr_len + size_of::<VirtioIommuReqTail>() as u32 {
return Err(Error::BufferLengthTooSmall);
}
let tail = VirtioIommuReqTail {
status: VIRTIO_IOMMU_S_OK,
..Default::default()
};
reply.extend_from_slice(tail.as_slice());
mem.write_slice(reply.as_slice(), status_desc.addr)
.map_err(Error::GuestMemory)?;
Ok((hdr_len as usize) + size_of::<VirtioIommuReqTail>())
}
}
struct IommuEpollHandler {
queues: Vec<Queue>,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
queue_evts: Vec<EventFd>,
kill_evt: EventFd,
pause_evt: EventFd,
mapping: Arc<IommuMapping>,
ext_mapping: BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
ext_domain_mapping: BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
}
impl IommuEpollHandler {
fn request_queue(&mut self) -> bool {
let mut used_desc_heads = [(0, 0); QUEUE_SIZE as usize];
let mut used_count = 0;
let mem = self.mem.memory();
for avail_desc in self.queues[0].iter(&mem) {
let len = match Request::parse(
&avail_desc,
&mem,
&self.mapping,
&self.ext_mapping,
&mut self.ext_domain_mapping,
) {
Ok(len) => len as u32,
Err(e) => {
error!("failed parsing descriptor: {}", e);
0
}
};
used_desc_heads[used_count] = (avail_desc.index, len);
used_count += 1;
}
for &(desc_index, len) in &used_desc_heads[..used_count] {
self.queues[0].add_used(&mem, desc_index, len);
}
used_count > 0
}
fn event_queue(&mut self) -> bool {
false
}
fn signal_used_queue(&self, queue: &Queue) -> result::Result<(), DeviceError> {
self.interrupt_cb
.trigger(&VirtioInterruptType::Queue, Some(queue))
.map_err(|e| {
error!("Failed to signal used queue: {:?}", e);
DeviceError::FailedSignalingUsedQueue(e)
})
}
fn run(
&mut self,
paused: Arc<AtomicBool>,
paused_sync: Arc<Barrier>,
) -> result::Result<(), EpollHelperError> {
let mut helper = EpollHelper::new(&self.kill_evt, &self.pause_evt)?;
helper.add_event(self.queue_evts[0].as_raw_fd(), REQUEST_Q_EVENT)?;
helper.add_event(self.queue_evts[1].as_raw_fd(), EVENT_Q_EVENT)?;
helper.run(paused, paused_sync, self)?;
Ok(())
}
}
impl EpollHelperHandler for IommuEpollHandler {
fn handle_event(&mut self, _helper: &mut EpollHelper, event: &epoll::Event) -> bool {
let ev_type = event.data as u16;
match ev_type {
REQUEST_Q_EVENT => {
if let Err(e) = self.queue_evts[0].read() {
error!("Failed to get queue event: {:?}", e);
return true;
} else if self.request_queue() {
if let Err(e) = self.signal_used_queue(&self.queues[0]) {
error!("Failed to signal used queue: {:?}", e);
return true;
}
}
}
EVENT_Q_EVENT => {
if let Err(e) = self.queue_evts[1].read() {
error!("Failed to get queue event: {:?}", e);
return true;
} else if self.event_queue() {
if let Err(e) = self.signal_used_queue(&self.queues[1]) {
error!("Failed to signal used queue: {:?}", e);
return true;
}
}
}
_ => {
error!("Unexpected event: {}", ev_type);
return true;
}
}
false
}
}
#[derive(Clone, Copy, Versionize)]
struct Mapping {
gpa: u64,
size: u64,
}
pub struct IommuMapping {
// Domain related to an endpoint.
endpoints: Arc<RwLock<BTreeMap<u32, u32>>>,
// List of mappings per domain.
mappings: Arc<RwLock<BTreeMap<u32, BTreeMap<u64, Mapping>>>>,
}
impl DmaRemapping for IommuMapping {
fn translate(&self, id: u32, addr: u64) -> std::result::Result<u64, std::io::Error> {
debug!("Translate addr 0x{:x}", addr);
if let Some(domain) = self.endpoints.read().unwrap().get(&id) {
if let Some(mapping) = self.mappings.read().unwrap().get(domain) {
let range_start = if VIRTIO_IOMMU_PAGE_SIZE_MASK > addr {
0
} else {
addr - VIRTIO_IOMMU_PAGE_SIZE_MASK
};
for (&key, &value) in mapping.range((Included(&range_start), Included(&addr))) {
if addr >= key && addr < key + value.size {
let new_addr = addr - key + value.gpa;
debug!("Into new_addr 0x{:x}", new_addr);
return Ok(new_addr);
}
}
}
}
debug!("Into same addr...");
Ok(addr)
}
}
pub struct Iommu {
common: VirtioCommon,
id: String,
config: VirtioIommuConfig,
mapping: Arc<IommuMapping>,
ext_mapping: BTreeMap<u32, Arc<dyn ExternalDmaMapping>>,
seccomp_action: SeccompAction,
}
#[derive(Versionize)]
struct IommuState {
avail_features: u64,
acked_features: u64,
endpoints: Vec<(u32, u32)>,
mappings: Vec<(u32, Vec<(u64, Mapping)>)>,
}
impl VersionMapped for IommuState {}
impl Iommu {
pub fn new(id: String, seccomp_action: SeccompAction) -> io::Result<(Self, Arc<IommuMapping>)> {
let config = VirtioIommuConfig {
page_size_mask: VIRTIO_IOMMU_PAGE_SIZE_MASK,
probe_size: PROBE_PROP_SIZE,
..Default::default()
};
let mapping = Arc::new(IommuMapping {
endpoints: Arc::new(RwLock::new(BTreeMap::new())),
mappings: Arc::new(RwLock::new(BTreeMap::new())),
});
Ok((
Iommu {
id,
common: VirtioCommon {
device_type: VirtioDeviceType::Iommu as u32,
queue_sizes: QUEUE_SIZES.to_vec(),
avail_features: 1u64 << VIRTIO_F_VERSION_1
| 1u64 << VIRTIO_IOMMU_F_MAP_UNMAP
| 1u64 << VIRTIO_IOMMU_F_PROBE,
paused_sync: Some(Arc::new(Barrier::new(2))),
..Default::default()
},
config,
mapping: mapping.clone(),
ext_mapping: BTreeMap::new(),
seccomp_action,
},
mapping,
))
}
fn state(&self) -> IommuState {
IommuState {
avail_features: self.common.avail_features,
acked_features: self.common.acked_features,
endpoints: self
.mapping
.endpoints
.read()
.unwrap()
.clone()
.into_iter()
.collect(),
mappings: self
.mapping
.mappings
.read()
.unwrap()
.clone()
.into_iter()
.map(|(k, v)| (k, v.into_iter().collect()))
.collect(),
}
}
fn set_state(&mut self, state: &IommuState) {
self.common.avail_features = state.avail_features;
self.common.acked_features = state.acked_features;
*(self.mapping.endpoints.write().unwrap()) = state.endpoints.clone().into_iter().collect();
*(self.mapping.mappings.write().unwrap()) = state
.mappings
.clone()
.into_iter()
.map(|(k, v)| (k, v.into_iter().collect()))
.collect();
}
pub fn add_external_mapping(&mut self, device_id: u32, mapping: Arc<dyn ExternalDmaMapping>) {
self.ext_mapping.insert(device_id, mapping);
}
}
impl Drop for Iommu {
fn d | &mut self) {
if let Some(kill_evt) = self.common.kill_evt.take() {
// Ignore the result because there is nothing we can do about it.
let _ = kill_evt.write(1);
}
}
}
impl VirtioDevice for Iommu {
fn device_type(&self) -> u32 {
self.common.device_type
}
fn queue_max_sizes(&self) -> &[u16] {
&self.common.queue_sizes
}
fn features(&self) -> u64 {
self.common.avail_features
}
fn ack_features(&mut self, value: u64) {
self.common.ack_features(value)
}
fn read_config(&self, offset: u64, data: &mut [u8]) {
self.read_config_from_slice(self.config.as_slice(), offset, data);
}
fn activate(
&mut self,
mem: GuestMemoryAtomic<GuestMemoryMmap>,
interrupt_cb: Arc<dyn VirtioInterrupt>,
queues: Vec<Queue>,
queue_evts: Vec<EventFd>,
) -> ActivateResult {
self.common.activate(&queues, &queue_evts, &interrupt_cb)?;
let (kill_evt, pause_evt) = self.common.dup_eventfds();
let mut handler = IommuEpollHandler {
queues,
mem,
interrupt_cb,
queue_evts,
kill_evt,
pause_evt,
mapping: self.mapping.clone(),
ext_mapping: self.ext_mapping.clone(),
ext_domain_mapping: BTreeMap::new(),
};
let paused = self.common.paused.clone();
let paused_sync = self.common.paused_sync.clone();
let mut epoll_threads = Vec::new();
// Retrieve seccomp filter for virtio_iommu thread
let virtio_iommu_seccomp_filter =
get_seccomp_filter(&self.seccomp_action, Thread::VirtioIommu)
.map_err(ActivateError::CreateSeccompFilter)?;
thread::Builder::new()
.name(self.id.clone())
.spawn(move || {
if !virtio_iommu_seccomp_filter.is_empty() {
if let Err(e) = apply_filter(&virtio_iommu_seccomp_filter) {
error!("Error applying seccomp filter: {:?}", e);
return;
}
}
if let Err(e) = handler.run(paused, paused_sync.unwrap()) {
error!("Error running worker: {:?}", e);
}
})
.map(|thread| epoll_threads.push(thread))
.map_err(|e| {
error!("failed to clone the virtio-iommu epoll thread: {}", e);
ActivateError::BadActivate
})?;
self.common.epoll_threads = Some(epoll_threads);
event!("virtio-device", "activated", "id", &self.id);
Ok(())
}
fn reset(&mut self) -> Option<Arc<dyn VirtioInterrupt>> {
let result = self.common.reset();
event!("virtio-device", "reset", "id", &self.id);
result
}
}
impl Pausable for Iommu {
fn pause(&mut self) -> result::Result<(), MigratableError> {
self.common.pause()
}
fn resume(&mut self) -> result::Result<(), MigratableError> {
self.common.resume()
}
}
impl Snapshottable for Iommu {
fn id(&self) -> String {
self.id.clone()
}
fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
Snapshot::new_from_versioned_state(&self.id, &self.state())
}
fn restore(&mut self, snapshot: Snapshot) -> std::result::Result<(), MigratableError> {
self.set_state(&snapshot.to_versioned_state(&self.id)?);
Ok(())
}
}
impl Transportable for Iommu {}
impl Migratable for Iommu {}
| rop( |
main.rs | use std::io::{self, prelude::*};
use std::{
collections::{HashMap, VecDeque},
error::Error,
writeln,
};
fn main() -> Result<(), Box<dyn Error>> {
let stdin = io::stdin();
let stdout = io::stdout();
let mut out = io::BufWriter::new(stdout.lock());
let reader = io::BufReader::new(stdin.lock());
let input = reader.lines().filter_map(Result::ok).map(|line| {
line.replace("no other bags", "")
.replace("bags", "")
.replace("bag", "")
.replace("bag", "")
.replace("contain", "")
.replace(",", "")
.replace(".", "")
});
let shiny = "shinygold".to_string();
| //light red bags contain 1 bright white bag, 2 muted yellow bags.
//light red bags contain 1 bright white bag, 2 muted yellow bags. #no other bags
//light red contain 1 bright white bag, 2 muted yellow . #bags
//light red contain 1 bright white , 2 muted yellow . #bag
//light red 1 bright white , 2 muted yellow . #contain
//light red 1 bright white 2 muted yellow . #,
//light red 1 bright white 2 muted yellow #.
//light red 1 bright white 2 muted yellow #
let mut rules_map: HashMap<String, HashMap<String, usize>> = HashMap::new();
for line in input {
let mut line_iter = line.split_whitespace();
let mut rule: HashMap<String, usize> = HashMap::new();
let mut buffer = 0;
let mut key = line_iter.next().unwrap().to_string();
key.push_str(line_iter.next().unwrap());
loop {
// populating the rule line
let mut next = match line_iter.next() {
Some(k) => k.to_string(),
None => break,
};
if let Ok(k) = next.parse::<usize>() {
buffer = k;
} else {
next.push_str(line_iter.next().unwrap());
rule.insert(next, buffer);
}
}
rules_map.insert(key, rule);
}
let mut result: usize = 0;
let mut queue: VecDeque<(&String, usize)> =
rules_map[&shiny].iter().map(|(k, &v)| (k, v)).collect();
while queue.len() != 0 {
let (name, quantity) = queue.pop_front().unwrap();
result += quantity;
queue.extend(
rules_map[name]
.iter()
.map(|(key, value)| (key, value * quantity)),
);
}
writeln!(&mut out, "{:?}", result)?;
Ok(())
} | |
ProgressBarStyle.py | class ProgressBarStyle(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the style that a System.Windows.Forms.ProgressBar uses to indicate the progress of an operation.
enum ProgressBarStyle,values: Blocks (0),Continuous (1),Marquee (2)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
| Blocks=None
Continuous=None
Marquee=None
value__=None | pass
|
slots.rs | /*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//! Utility stream for yielding slots in a loop.
//!
//! This is used instead of `futures_timer::Interval` because it was unreliable.
use itp_sgx_io::SealedIO;
use its_consensus_common::Error as ConsensusError;
use its_primitives::traits::{Block, SignedBlock as SignedSidechainBlock};
use sp_runtime::traits::Block as ParentchainBlock;
use std::time::{Duration, SystemTime};
pub use sp_consensus_slots::Slot;
#[cfg(all(not(feature = "std"), feature = "sgx"))]
use std::untrusted::time::SystemTimeEx;
/// Returns current duration since unix epoch.
pub fn duration_now() -> Duration {
let now = SystemTime::now();
now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| {
panic!("Current time {:?} is before unix epoch. Something is wrong: {:?}", now, e)
})
}
/// Returns the duration until the next slot from now.
pub fn time_until_next_slot(slot_duration: Duration) -> Duration {
let now = duration_now().as_millis();
if slot_duration.as_millis() == Default::default() {
log::warn!("[Slots]: slot_duration.as_millis() is 0");
return Default::default()
}
let next_slot = (now + slot_duration.as_millis()) / slot_duration.as_millis();
let remaining_millis = next_slot * slot_duration.as_millis() - now;
Duration::from_millis(remaining_millis as u64)
}
/// calculates the remaining time `until`.
pub fn remaining_time(until: Duration) -> Option<Duration> {
until.checked_sub(duration_now())
}
/// Information about a slot.
#[derive(Debug)]
pub struct SlotInfo<B: ParentchainBlock> {
/// The slot number as found in the inherent data.
pub slot: Slot,
/// Current timestamp as found in the inherent data.
pub timestamp: Duration,
/// Slot duration.
pub duration: Duration,
/// The time at which the slot ends.
pub ends_at: Duration,
/// Parentchain header this slot is based on.
pub parentchain_head: B::Header,
}
impl<B: ParentchainBlock> SlotInfo<B> {
/// Create a new [`SlotInfo`].
///
/// `ends_at` is calculated using `timestamp` and `duration`.
pub fn new(
slot: Slot,
timestamp: Duration,
duration: Duration,
parentchain_head: B::Header,
) -> Self {
Self {
slot,
timestamp,
duration,
ends_at: timestamp + time_until_next_slot(duration),
parentchain_head,
}
}
}
pub(crate) fn timestamp_within_slot<B: ParentchainBlock, SB: SignedSidechainBlock>(
slot: &SlotInfo<B>,
proposal: &SB,
) -> bool {
let proposal_stamp = proposal.block().timestamp();
slot.timestamp.as_millis() as u64 <= proposal_stamp
&& slot.ends_at.as_millis() as u64 >= proposal_stamp
}
pub fn slot_from_time_stamp_and_duration(timestamp: Duration, duration: Duration) -> Slot {
((timestamp.as_millis() / duration.as_millis()) as u64).into()
}
pub fn yield_next_slot<SG, B>(
timestamp: Duration,
duration: Duration,
header: B::Header,
last_slot_getter: &mut SG,
) -> Result<Option<SlotInfo<B>>, ConsensusError>
where
SG: GetLastSlot,
B: ParentchainBlock,
{
if duration == Default::default() {
return Err(ConsensusError::Other("Tried to yield next slot with 0 duration".into()))
}
let last_slot = last_slot_getter.get_last_slot()?;
let slot = slot_from_time_stamp_and_duration(timestamp, duration);
if slot <= last_slot {
return Ok(None)
}
last_slot_getter.set_last_slot(slot)?;
Ok(Some(SlotInfo::new(slot, timestamp, duration, header)))
}
pub trait GetLastSlot {
fn get_last_slot(&self) -> Result<Slot, ConsensusError>;
fn set_last_slot(&mut self, slot: Slot) -> Result<(), ConsensusError>;
}
impl<T: SealedIO<Unsealed = Slot, Error = ConsensusError>> GetLastSlot for T {
fn get_last_slot(&self) -> Result<Slot, ConsensusError> {
Self::unseal()
}
fn set_last_slot(&mut self, slot: Slot) -> Result<(), ConsensusError> {
Self::seal(slot)
}
}
#[cfg(all(not(feature = "std"), feature = "sgx"))]
pub mod sgx {
use super::*;
use codec::{Decode, Encode};
use itp_sgx_io::{seal, unseal, SealedIO};
use lazy_static::lazy_static;
use std::sync::SgxRwLock;
pub struct LastSlotSeal;
lazy_static! {
static ref FILE_LOCK: SgxRwLock<()> = Default::default();
}
const LAST_SLOT_BIN: &'static str = "last_slot.bin";
impl SealedIO for LastSlotSeal {
type Error = ConsensusError;
type Unsealed = Slot;
fn unseal() -> Result<Self::Unsealed, Self::Error> {
let _ = FILE_LOCK.read().map_err(|e| Self::Error::Other(format!("{:?}", e).into()))?;
match unseal(LAST_SLOT_BIN) {
Ok(slot) => Ok(Decode::decode(&mut slot.as_slice())?),
Err(_) => {
log::info!("Could not open {:?} file, returning first slot", LAST_SLOT_BIN);
Ok(Default::default())
},
}
}
fn seal(unsealed: Self::Unsealed) -> Result<(), Self::Error> {
let _ = FILE_LOCK.write().map_err(|e| Self::Error::Other(format!("{:?}", e).into()))?;
Ok(unsealed.using_encoded(|bytes| seal(bytes, LAST_SLOT_BIN))?)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use itp_sgx_io::SealedIO;
use itp_types::{Block as ParentchainBlock, Header as ParentChainHeader};
use its_primitives::{
traits::{Block as BlockT, SignBlock},
types::block::{Block, SignedBlock},
};
use sp_keyring::ed25519::Keyring;
use sp_runtime::{testing::H256, traits::Header as HeaderT};
use std::fmt::Debug;
const SLOT_DURATION: Duration = Duration::from_millis(1000);
struct LastSlotSealMock;
impl SealedIO for LastSlotSealMock {
type Error = ConsensusError;
type Unsealed = Slot;
fn unseal() -> Result<Self::Unsealed, Self::Error> {
Ok(slot_from_time_stamp_and_duration(duration_now(), SLOT_DURATION))
}
| }
}
fn test_block_with_time_stamp(timestamp: u64) -> SignedBlock {
Block::new(
Default::default(),
0,
H256::random(),
H256::random(),
H256::random(),
Default::default(),
Default::default(),
timestamp,
)
.sign_block(&Keyring::Alice.pair())
}
fn slot(slot: u64) -> SlotInfo<ParentchainBlock> {
SlotInfo {
slot: slot.into(),
timestamp: duration_now(),
duration: SLOT_DURATION,
ends_at: duration_now() + SLOT_DURATION,
parentchain_head: ParentChainHeader {
parent_hash: Default::default(),
number: 1,
state_root: Default::default(),
extrinsics_root: Default::default(),
digest: Default::default(),
},
}
}
pub fn default_header() -> ParentChainHeader {
ParentChainHeader::new(
Default::default(),
Default::default(),
Default::default(),
Default::default(),
Default::default(),
)
}
fn timestamp_in_the_future(later: Duration) -> u64 {
let moment = SystemTime::now() + later;
let dur = moment.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| {
panic!("Current time {:?} is before unix epoch. Something is wrong: {:?}", moment, e)
});
dur.as_millis() as u64
}
fn timestamp_in_the_past(earlier: Duration) -> u64 {
let moment = SystemTime::now() - earlier;
let dur = moment.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| {
panic!("Current time {:?} is before unix epoch. Something is wrong: {:?}", moment, e)
});
dur.as_millis() as u64
}
fn assert_consensus_other_err<T: Debug>(result: Result<T, ConsensusError>, msg: &str) {
assert_matches!(result.unwrap_err(), ConsensusError::Other(
m,
) if &m.to_string() == msg)
}
#[test]
fn time_until_next_slot_returns_default_on_nano_duration() {
// prevent panic: https://github.com/integritee-network/worker/issues/439
assert_eq!(time_until_next_slot(Duration::from_nanos(999)), Default::default())
}
#[test]
fn timestamp_within_slot_returns_true_for_correct_timestamp() {
let slot = slot(1);
let time_stamp_in_slot = timestamp_in_the_future(SLOT_DURATION / 2);
let block = test_block_with_time_stamp(time_stamp_in_slot);
assert!(timestamp_within_slot(&slot, &block));
}
#[test]
fn timestamp_within_slot_returns_false_if_timestamp_after_slot() {
let slot = slot(1);
let time_stamp_after_slot =
timestamp_in_the_future(SLOT_DURATION + Duration::from_millis(1));
let block_too_late = test_block_with_time_stamp(time_stamp_after_slot);
assert!(!timestamp_within_slot(&slot, &block_too_late));
}
#[test]
fn timestamp_within_slot_returns_false_if_timestamp_before_slot() {
let slot = slot(1);
let time_stamp_before_slot = timestamp_in_the_past(Duration::from_millis(1));
let block_too_early = test_block_with_time_stamp(time_stamp_before_slot);
assert!(!timestamp_within_slot(&slot, &block_too_early));
}
#[test]
fn yield_next_slot_returns_none_when_slot_equals_last_slot() {
assert!(yield_next_slot::<_, ParentchainBlock>(
duration_now(),
SLOT_DURATION,
default_header(),
&mut LastSlotSealMock,
)
.unwrap()
.is_none())
}
#[test]
fn yield_next_slot_returns_next_slot() {
assert!(yield_next_slot::<_, ParentchainBlock>(
duration_now() + SLOT_DURATION,
SLOT_DURATION,
default_header(),
&mut LastSlotSealMock
)
.unwrap()
.is_some())
}
#[test]
fn yield_next_slot_returns_err_on_0_duration() {
assert_consensus_other_err(
yield_next_slot::<_, ParentchainBlock>(
duration_now(),
Default::default(),
default_header(),
&mut LastSlotSealMock,
),
"Tried to yield next slot with 0 duration",
)
}
} | fn seal(_unsealed: Self::Unsealed) -> Result<(), Self::Error> {
println!("Seal method stub called.");
Ok(()) |
0012_auto_20180603_2214.py | # Generated by Django 2.0.3 on 2018-06-03 14:14
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
dependencies = [
('mymodel', '0011_auto_20180603_2208'),
]
operations = [
migrations.AlterField(
model_name='room',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='owner_rooms', to='mymodel.User'),
),
]
| Migration |
app.js | // Global state
var $upNext = null;
var $document;
var $body;
var $section;
var $slides;
var $arrows;
var $nextArrow;
var $previousArrow;
var $startCardButton;
var isTouch = Modernizr.touch;
var $likeStory;
var $likeStoryButtons;
var $facebook;
var $facebookBtn;
var $support;
var $supportBtn;
var $didNotLike;
var $dislikeEmail;
var mobileSuffix;
var w;
var h;
var startTouch;
var lastSlideExitEvent;
var callToActionTest;
var ASSETS_PATH = APP_CONFIG.DEPLOYMENT_TARGET ? APP_CONFIG.S3_BASE_URL + '/posts/' + APP_CONFIG.DEPLOY_SLUG + '/assets/' : 'http://assets.apps.npr.org.s3.amazonaws.com/lookatthis/' + APP_CONFIG.DEPLOY_SLUG + '/';
var NO_AUDIO = (window.location.search.indexOf('noaudio') >= 0);
var completion = 0;
var swipeTolerance = 40;
var touchFactor = 1;
var resize = function() {
/*
* Resize the content
*/
w = $(window).width();
h = $(window).height();
if ($section.height() < h) {
$section.height(h);
}
$slides.width(w);
};
var onPageLoad = function() {
/*
* Set up page on load.
*/
lazyLoad(0);
$('.section').css({
'opacity': 1,
'visibility': 'visible',
});
$('.slide.deck-current').find('.imgLiquid.second').css('opacity', 1);
showNavigation(0);
};
var trackCompletion = function(index) {
/*
* Track completion based on slide index.
*/
how_far = (index + 1) / ($slides.length - APP_CONFIG.NUM_SLIDES_AFTER_CONTENT);
if (how_far >= completion + 0.25) {
completion = how_far - (how_far % 0.25);
if (completion === 0.25) {
ANALYTICS.completeTwentyFivePercent();
}
else if (completion === 0.5) {
ANALYTICS.completeFiftyPercent();
}
else if (completion === 0.75) {
ANALYTICS.completeSeventyFivePercent();
}
else if (completion === 1) {
ANALYTICS.completeOneHundredPercent();
}
}
}
var lazyLoad = function(slideIndex) {
/*
* Lazy-load images in current and future slides.
*/
var slides = [
$slides.eq(slideIndex),
$slides.eq(slideIndex + 1),
$slides.eq(slideIndex + 2)
];
// Mobile suffix should be blank by default.
mobileSuffix = '';
if (w < 769) {
// mobileSuffix = '-sq';
}
for (var i = 0; i < slides.length; i++) {
loadImages(slides[i]);
if (APP_CONFIG.FILMSTRIP) {
FILMSTRIP.initFilmstrip(slides[i])
}
};
}
var loadImages = function($slide) {
/*
* Sets the background image on a div for our fancy slides.
*/
var $container = $slide.find('.imgLiquid');
mobileSuffix = '';
if (w < 769 && !$slide.hasClass('no-crop')) {
mobileSuffix = '-sq';
}
$container.each(function(key, value) {
var bgimg = $(value).children('img');
if (bgimg.data('bgimage')) {
var image_filename = bgimg.data('bgimage').split('.')[0];
var image_extension = '.' + bgimg.data('bgimage').split('.')[1];
var image_path = 'assets/' + image_filename + mobileSuffix + image_extension;
bgimg.attr('src', image_path);
}
if (bgimg.attr('src')) {
$(value).imgLiquid({
fill: true,
horizontalAlign: "center",
verticalAlign: "center",
});
}
});
var $images = $slide.find('img.lazy-load');
if ($images.length > 0) {
for (var i = 0; i < $images.length; i++) {
var image = $images.eq(i).data('src');
$images.eq(i).attr('src', 'assets/' + image);
}
}
};
var showNavigation = function(index) {
/*
* Hide and show arrows based on slide index
*/
if (index === 0) {
$arrows.hide();
$previousArrow.css('left', 0);
$nextArrow.css('right', 0);
} else if ($slides.last().index() === index) {
$arrows.show();
$nextArrow.hide().css('right', 0);
} else {
$arrows.show();
}
if (isTouch) {
resetArrows();
}
}
var checkOverflow = function(index) {
var $thisSlide = $slides.eq(index);
var slideHeight = $thisSlide.height();
var blockHeight = $thisSlide.find('.full-block').height();
if (blockHeight > slideHeight) {
$thisSlide.parents('.section').height(blockHeight);
} else {
$thisSlide.parents('.section').height(h);
}
}
var onSlideChange = function(e, fromIndex, toIndex) {
/*
* Called transitioning between slides.
*/
lazyLoad(toIndex);
showNavigation(toIndex);
trackCompletion(toIndex);
checkOverflow(toIndex);
document.activeElement.blur();
if (APP_CONFIG.AUDIO) {
AUDIO.checkForAudio(toIndex);
}
if (APP_CONFIG.VIDEO) {
VIDEO.checkForVideo(toIndex);
}
if (APP_CONFIG.FILMSTRIP) {
FILMSTRIP.clearFilmstrip(fromIndex);
FILMSTRIP.animateFilmstrip(toIndex);
}
if (APP_CONFIG.PROGRESS_BAR) {
PROGRESS_BAR.animateProgress(toIndex);
}
if (fromIndex === 0 && toIndex === 1) {
$('.slide.start').find('.imgLiquid').addClass('no-transition');
$('.slide.start').find('.first').css('opacity', 1);
$('.slide.start').find('.second').css('opacity', 0);
$('.slide.start').find('.imgLiquid').removeClass('no-transition');
}
if (toIndex === 0) {
$('.slide.start').find('.imgLiquid.second').css('opacity', 1);
}
ANALYTICS.exitSlide(fromIndex.toString());
ANALYTICS.trackEvent(lastSlideExitEvent, fromIndex.toString());
if (toIndex === $slides.length - 1) {
ANALYTICS.trackEvent('tests-run', callToActionTest);
}
}
var onStartCardButtonClick = function() {
/*
* Called when clicking the "go" button.
*/
lastSlideExitEvent = 'exit-start-card-button-click';
$.deck('next');
}
var onDocumentKeyDown = function(e) {
/*
* Called when key is pressed
*/
var keyOptions = $.deck('getOptions').keys;
var keys = keyOptions.next.concat(keyOptions.previous);
if (keys.indexOf(e.which) > -1) {
lastSlideExitEvent = 'exit-keyboard';
ANALYTICS.useKeyboardNavigation();
}
return true;
}
var onSlideClick = function(e) {
/*
* Advance on slide tap on touch devices
*/
if (isTouch && !$(e.target).is('button')) {
lastSlideExitEvent = 'exit-tap';
$.deck('next');
}
}
var onNextPostClick = function(e) {
/*
* Click next post
*/
e.preventDefault();
ANALYTICS.trackEvent('next-post');
window.top.location = NEXT_POST_URL;
return true;
}
var fakeMobileHover = function() {
/*
* Fake hover when tapping buttons
*/
$(this).css({
'background-color': '#fff',
'color': '#000',
'opacity': .9
});
}
var rmFakeMobileHover = function() {
/*
* Remove fake hover when tapping buttons
*/
$(this).css({
'background-color': 'rgba(0, 0, 0, 0.2)',
'color': '#fff',
'opacity': .3
});
}
var onNextArrowClick = function() {
/*
* Next arrow click
*/
lastSlideExitEvent = 'exit-next-button-click';
$.deck('next');
}
var onPreviousArrowClick = function() {
/*
* Previous arrow click
*/
lastSlideExitEvent = 'exit-previous-button-click';
$.deck('prev');
}
var onTouchStart = function(e) {
/*
* Capture start position when swipe initiated
*/
if (!startTouch) {
startTouch = $.extend({}, e.originalEvent.targetTouches[0]);
}
}
var onTouchMove = function(e) {
/*
* Track finger swipe
*/
$.each(e.originalEvent.changedTouches, function(i, touch) {
if (!startTouch || touch.identifier !== startTouch.identifier) {
return true;
}
var yDistance = touch.screenY - startTouch.screenY;
var xDistance = touch.screenX - startTouch.screenX;
var direction = (xDistance > 0) ? 'right' : 'left';
if (Math.abs(yDistance) < Math.abs(xDistance)) {
e.preventDefault();
}
if (direction == 'right' && xDistance > swipeTolerance) {
lastSlideExitEvent = 'exit-swipe-right';
} else if (direction == 'right' && xDistance < swipeTolerance) {
$previousArrow.filter(':visible').css({
'left': (xDistance * touchFactor) + 'px'
}); |
if (direction == 'left' && Math.abs(xDistance) > swipeTolerance) {
lastSlideExitEvent = 'exit-swipe-left';
} else if (direction == 'left' && Math.abs(xDistance) < swipeTolerance) {
$nextArrow.filter(':visible').css({
'right': (Math.abs(xDistance) * touchFactor) + 'px'
});
}
});
}
var onTouchEnd = function(e) {
/*
* Clear swipe start position when swipe ends
*/
$.each(e.originalEvent.changedTouches, function(i, touch) {
if (startTouch && touch.identifier === startTouch.identifier) {
startTouch = undefined;
}
});
}
var resetArrows = function() {
/*
* Reset arrows when advancing slides
*/
$nextArrow.animate({
'right': 0
});
$previousArrow.animate({
'left': 0
});
}
var determineTests = function() {
var possibleCallToActionTests = ['facebook', 'support'];
callToActionTest = possibleCallToActionTests[getRandomInt(0, possibleCallToActionTests.length)];
}
var getRandomInt = function(min, max) {
return Math.floor(Math.random() * (max - min)) + min;
}
var onLikeStoryButtonsClick = function(e) {
e.preventDefault();
$likeStory.hide();
if ($(this).hasClass('yes')) {
ANALYTICS.trackEvent('like-story-yes', callToActionTest);
if (callToActionTest === 'facebook') {
$facebook.show();
} else {
$support.show();
}
} else {
ANALYTICS.trackEvent('like-story-no');
$didNotLike.show();
}
}
var onFacebookBtnClick = function(e) {
e.preventDefault();
var $this = $(this);
var link = $this.attr('href');
ANALYTICS.trackEvent('facebook-share');
window.top.location = link
return true;
}
var onSupportBtnClick = function(e) {
e.preventDefault();
var $this = $(this);
var link = $this.attr('href');
ANALYTICS.trackEvent('support-btn-click');
window.top.location = link
return true;
}
var onDislikeEmailClick = function() {
ANALYTICS.trackEvent('email-btn-click');
}
$(document).ready(function() {
$document = $(document);
$body = $('body');
$section = $('.section');
$slides = $('.slide');
$navButton = $('.primary-navigation-btn');
$startCardButton = $('.btn-go');
$arrows = $('.controlArrow');
$previousArrow = $arrows.filter('.prev');
$nextArrow = $arrows.filter('.next');
$upNext = $('.up-next');
$likeStory = $('.like-story');
$likeStoryButtons = $('.btn-like-story');
$facebook = $('.facebook');
$facebookBtn = $('.btn-facebook');
$support = $('.support');
$supportBtn = $('.btn-support');
$didNotLike = $('.did-not-like');
$dislikeEmail = $('.dislike-email');
$startCardButton.on('click', onStartCardButtonClick);
$slides.on('click', onSlideClick);
$likeStoryButtons.on('click', onLikeStoryButtonsClick);
$facebookBtn.on('click', onFacebookBtnClick);
$supportBtn.on('click', onSupportBtnClick);
$dislikeEmail.on('click', onDislikeEmailClick);
$upNext.on('click', onNextPostClick);
$document.on('deck.change', onSlideChange);
$previousArrow.on('click', onPreviousArrowClick);
$nextArrow.on('click', onNextArrowClick);
if (isTouch) {
$arrows.on('touchstart', fakeMobileHover);
$arrows.on('touchend', rmFakeMobileHover);
$body.on('touchstart', onTouchStart);
$body.on('touchmove', onTouchMove);
$body.on('touchend', onTouchEnd);
}
// Turn off Modernizr history when deploying
if (APP_CONFIG.DEPLOYMENT_TARGET) {
Modernizr.history = null;
}
$.deck($slides, {
touch: { swipeTolerance: swipeTolerance }
});
onPageLoad();
resize();
determineTests();
// Redraw slides if the window resizes
$(window).on("orientationchange", resize);
$(window).resize(resize);
$document.keydown(onDocumentKeyDown);
}); | } |
puco.component.ts | import { Component, OnInit, Input, OnDestroy, Output, EventEmitter } from '@angular/core';
import { ISubscription } from 'rxjs/Subscription';
import { ObraSocialService } from './../../services/obraSocial.service';
import { ProfeService } from './../../services/profe.service';
import { SugerenciasService } from '../../services/sendmailsugerencias.service';
import { IProfe } from '../../interfaces/IProfe';
import { forkJoin as observableForkJoin } from 'rxjs';
import { DocumentosService } from '../../services/documentos.service';
import { Auth } from '@andes/auth';
import { saveAs } from 'file-saver';
import { Slug } from 'ng2-slugify';
@Component({
selector: 'puco',
templateUrl: 'puco.html',
styleUrls: ['puco.scss']
})
export class PucoComponent implements OnInit, OnDestroy {
public loading = false;
public errorSearchTerm = false; // true si se ingresan caracteres alfabeticos
public periodos = []; // select
public periodoSelect;
public listaPeriodosPuco = ''; // solo para sidebar
public ultimaActualizacionPuco: Date;
public listaPeriodosProfe = ''; // solo para sidebar
public ultimaActualizacionProfe: Date;
public cantidadPeriodos = 6; // cantidad de versiones de padrones que se traen desde la DB
public periodoMasAntiguo; // la รบltima version hacia atrรกs del padron a buscar
public usuarios = [];
public showPrintForm = false;
public usuarioSelected = null;
private resPuco = [];
private resProfe: IProfe;
private timeoutHandle: number;
private slug = new Slug('default'); // para documento pdf
@Input() autofocus: Boolean = true;
// termino a buscar ..
public searchTerm: String = '';
// ultima request que se almacena con el subscribe
private lastRequest: ISubscription;
constructor(
private obraSocialService: ObraSocialService,
private profeService: ProfeService,
private sugerenciasService: SugerenciasService,
private auth: Auth,
private documentosService: DocumentosService) { }
/* limpiamos la request que se haya ejecutado */
ngOnDestroy() {
if (this.lastRequest) {
this.lastRequest.unsubscribe();
}
}
ngOnInit() {
observableForkJoin([
this.obraSocialService.getPadrones({}),
this.profeService.getPadrones({})]
).subscribe(padrones => {
let periodoMasActual = new Date(); // fecha actual para configurar el select a continuacion ..
// se construye el contenido del select segun la cantidad de meses hacia atras que se pudiera consultar
for (let i = 0; i < this.cantidadPeriodos; i++) {
let periodoAux = moment(periodoMasActual).subtract(i, 'month');
this.periodos[i] = { id: i, nombre: moment(periodoAux).format('MMMM [de] YYYY'), version: periodoAux }; // Ej: {1, "mayo 2018", "2018/05/05"}
}
this.setPeriodo(this.periodos[0]); // por defecto se setea el periodo en el corriente mes
this.periodoMasAntiguo = this.periodos[this.cantidadPeriodos - 1]; // ultimo mes hacia atras que mostrarรก el select
// (Para el sidebar) Se setean las variables para mostrar los padrones de PUCO que se encuentran disponibles.
if (padrones[0].length) {
for (let i = 0; i < padrones[0].length; i++) {
if (i === padrones[0].length - 1) {
this.listaPeriodosPuco += moment(padrones[0][i].version).utc().format('MMMM [de] YYYY');
} else {
this.listaPeriodosPuco += moment(padrones[0][i].version).utc().format('MMMM [de] YYYY') + ', ';
}
}
this.ultimaActualizacionPuco = moment(padrones[0][0].version).utc();
}
// (Para el sidebar) Se setean las variables para mostrar los padrones de INCLUIR SALUD que se encuentran disponibles.
if (padrones[1].length) {
for (let i = 0; i < padrones[1].length; i++) {
if (i === padrones[1].length - 1) {
this.listaPeriodosProfe += moment(padrones[1][i].version).format('MMMM [de] YYYY');
} else {
this.listaPeriodosProfe += moment(padrones[1][i].version).format('MMMM [de] YYYY') + ', ';
}
}
this.ultimaActualizacionProfe = padrones[1][0].version;
}
});
}
// Realiza controles simples cuando se modifica el valor del select
public setPeriodo(periodo) {
if (periodo === null) {
this.usuarios = []; // Si se borra el periodo del select, se borran los resultados
this.searchTerm = '';
} else {
this.periodoSelect = periodo;
if (this.searchTerm) {
this.buscar();
}
}
}
/* Al realizar una bรบsqueda, verifica que esta no se realice sobre un periodo/mes aรบn no actualizado (Sin padrรณn correspondiente).
* De ser asi retorna el padrรณn mas actual para que la bรบsqueda se realice sobre este.
*/
verificarPeriodo(periodo1, periodo2) {
periodo1 = new Date(periodo1);
periodo2 = new Date(periodo2);
let p1 = moment(periodo1).startOf('month').format('YYYY-MM-DD');
let p2 = moment(periodo2).startOf('month').format('YYYY-MM-DD');
if (moment(p1).diff(p2) > 0) {
return p2;
} else {
return p1;
}
}
buscar(): void {
// Cancela la bรบsqueda anterior
if (this.timeoutHandle) {
window.clearTimeout(this.timeoutHandle);
this.loading = false;
}
// Se limpian los resultados de la busqueda anterior
this.usuarios = [];
if (this.searchTerm && /^([0-9])*$/.test(this.searchTerm.toString())) {
this.loading = true;
this.errorSearchTerm = false;
let search = this.searchTerm.trim();
this.timeoutHandle = window.setTimeout(() => {
this.timeoutHandle = null;
if (this.periodoSelect) {
// se verifica que el periodo seleccionado corresponda a un padrรณn existente. | this.obraSocialService.get({ dni: search, periodo: periodoPuco }),
this.profeService.get({ dni: search, periodo: periodoProfe })]).subscribe(t => {
this.loading = false;
this.resPuco = t[0];
this.resProfe = (t[1] as any);
if (this.resPuco) {
this.usuarios = <any>this.resPuco;
}
if (this.resProfe) {
if (this.resPuco) {
this.usuarios = this.resPuco.concat(this.resProfe);
} else {
this.usuarios = <any>this.resProfe;
}
}
});
} else { // Cuando se quiere buscar un dni sin ingresar un periodo
this.loading = false;
}
}, 400);
} else {
if (this.searchTerm) {
this.errorSearchTerm = true;
// this.searchTerm = this.searchTerm.substr(0, this.searchTerm.length - 1);
}
}
}
// Boton reporte de errores/sugerencias
sugerencias() {
this.sugerenciasService.post();
}
checkLog() {
return this.auth.loggedIn();
}
imprimirConstatacion(usuario: any) {
let dto = {
dni: usuario.dni,
nombre: usuario.nombre,
codigoFinanciador: usuario.codigoFinanciador,
financiador: usuario.financiador
};
this.documentosService.descargarConstanciaPuco(dto).subscribe((data: any) => {
if (data) {
data.nombre = dto.nombre;
// Generar descarga como PDF
this.descargarConstancia(data, { type: 'application/pdf' });
} else {
// Fallback a impresiรณn normal desde el navegador
window.print();
}
});
}
private descargarConstancia(data: any, headers: any): void {
let blob = new Blob([data], headers);
saveAs(blob, this.slug.slugify(data.nombre + ' ' + moment().format('DD-MM-YYYY-hmmss')) + '.pdf');
}
} | let periodoPuco = this.verificarPeriodo(this.periodoSelect.version, this.ultimaActualizacionPuco);
let periodoProfe = this.verificarPeriodo(this.periodoSelect.version, this.ultimaActualizacionProfe);
observableForkJoin([ |
subSchema.go | // Copyright 2015 xeipuuv ( https://github.com/xeipuuv )
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at | // http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// author xeipuuv
// author-github https://github.com/xeipuuv
// author-mail [email protected]
//
// repository-name gojsonschema
// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language.
//
// description Defines the structure of a sub-subSchema.
// A sub-subSchema can contain other sub-schemas.
//
// created 27-02-2013
package gojsonschema
import (
"errors"
"math/big"
"regexp"
"strings"
"github.com/Snap-AV/gojsonschema/gojsonreference"
)
const (
KEY_SCHEMA = "$schema"
KEY_ID = "id"
KEY_ID_NEW = "$id"
KEY_REF = "$ref"
KEY_TITLE = "title"
KEY_DESCRIPTION = "description"
KEY_TYPE = "type"
KEY_ITEMS = "items"
KEY_ADDITIONAL_ITEMS = "additionalItems"
KEY_PROPERTIES = "properties"
KEY_PATTERN_PROPERTIES = "patternProperties"
KEY_ADDITIONAL_PROPERTIES = "additionalProperties"
KEY_PROPERTY_NAMES = "propertyNames"
KEY_DEFINITIONS = "definitions"
KEY_MULTIPLE_OF = "multipleOf"
KEY_MINIMUM = "minimum"
KEY_MAXIMUM = "maximum"
KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum"
KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum"
KEY_MIN_LENGTH = "minLength"
KEY_MAX_LENGTH = "maxLength"
KEY_PATTERN = "pattern"
KEY_FORMAT = "format"
KEY_MIN_PROPERTIES = "minProperties"
KEY_MAX_PROPERTIES = "maxProperties"
KEY_DEPENDENCIES = "dependencies"
KEY_REQUIRED = "required"
KEY_MIN_ITEMS = "minItems"
KEY_MAX_ITEMS = "maxItems"
KEY_UNIQUE_ITEMS = "uniqueItems"
KEY_CONTAINS = "contains"
KEY_CONST = "const"
KEY_ENUM = "enum"
KEY_ONE_OF = "oneOf"
KEY_ANY_OF = "anyOf"
KEY_ALL_OF = "allOf"
KEY_NOT = "not"
KEY_IF = "if"
KEY_THEN = "then"
KEY_ELSE = "else"
)
type subSchema struct {
draft *Draft
// basic subSchema meta properties
id *gojsonreference.JsonReference
title *string
description *string
property string
// Types associated with the subSchema
types jsonSchemaType
// Reference url
ref *gojsonreference.JsonReference
// Schema referenced
refSchema *subSchema
// hierarchy
parent *subSchema
itemsChildren []*subSchema
itemsChildrenIsSingleSchema bool
propertiesChildren []*subSchema
// validation : number / integer
multipleOf *big.Rat
maximum *big.Rat
exclusiveMaximum *big.Rat
minimum *big.Rat
exclusiveMinimum *big.Rat
// validation : string
minLength *int
maxLength *int
pattern *regexp.Regexp
format string
// validation : object
minProperties *int
maxProperties *int
required []string
dependencies map[string]interface{}
additionalProperties interface{}
patternProperties map[string]*subSchema
propertyNames *subSchema
// validation : array
minItems *int
maxItems *int
uniqueItems bool
contains *subSchema
additionalItems interface{}
// validation : all
_const *string //const is a golang keyword
enum []string
// validation : subSchema
oneOf []*subSchema
anyOf []*subSchema
allOf []*subSchema
not *subSchema
_if *subSchema // if/else are golang keywords
_then *subSchema
_else *subSchema
}
func (s *subSchema) AddConst(i interface{}) error {
is, err := marshalWithoutNumber(i)
if err != nil {
return err
}
s._const = is
return nil
}
func (s *subSchema) AddEnum(i interface{}) error {
is, err := marshalWithoutNumber(i)
if err != nil {
return err
}
if isStringInSlice(s.enum, *is) {
return errors.New(formatErrorDescription(
Locale.KeyItemsMustBeUnique(),
ErrorDetails{"key": KEY_ENUM},
))
}
s.enum = append(s.enum, *is)
return nil
}
func (s *subSchema) ContainsEnum(i interface{}) (bool, error) {
is, err := marshalWithoutNumber(i)
if err != nil {
return false, err
}
return isStringInSlice(s.enum, *is), nil
}
func (s *subSchema) AddOneOf(subSchema *subSchema) {
s.oneOf = append(s.oneOf, subSchema)
}
func (s *subSchema) AddAllOf(subSchema *subSchema) {
s.allOf = append(s.allOf, subSchema)
}
func (s *subSchema) AddAnyOf(subSchema *subSchema) {
s.anyOf = append(s.anyOf, subSchema)
}
func (s *subSchema) SetNot(subSchema *subSchema) {
s.not = subSchema
}
func (s *subSchema) SetIf(subSchema *subSchema) {
s._if = subSchema
}
func (s *subSchema) SetThen(subSchema *subSchema) {
s._then = subSchema
}
func (s *subSchema) SetElse(subSchema *subSchema) {
s._else = subSchema
}
func (s *subSchema) AddRequired(value string) error {
if isStringInSlice(s.required, value) {
return errors.New(formatErrorDescription(
Locale.KeyItemsMustBeUnique(),
ErrorDetails{"key": KEY_REQUIRED},
))
}
s.required = append(s.required, value)
return nil
}
func (s *subSchema) AddItemsChild(child *subSchema) {
s.itemsChildren = append(s.itemsChildren, child)
}
func (s *subSchema) AddPropertiesChild(child *subSchema) {
s.propertiesChildren = append(s.propertiesChildren, child)
}
func (s *subSchema) PatternPropertiesString() string {
if s.patternProperties == nil || len(s.patternProperties) == 0 {
return STRING_UNDEFINED // should never happen
}
patternPropertiesKeySlice := []string{}
for pk := range s.patternProperties {
patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`)
}
if len(patternPropertiesKeySlice) == 1 {
return patternPropertiesKeySlice[0]
}
return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]"
} | // |
__init__.py | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class graceful_restart(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/graceful-restart. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters for OSPFv2
graceful restart
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class graceful_restart(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/graceful-restart. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters for OSPFv2
graceful restart
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
return self.__config
def | (self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
| _set_config |
clear.d.ts | import Painter from './painter';
interface IClearPainterParams {
color?: string;
}
export default class ClearPainter extends Painter {
private readonly params;
get red(): number;
set red(v: number);
get green(): number;
set green(v: number);
get blue(): number;
set blue(v: number);
get alpha(): number; | private _green;
private _blue;
private _alpha;
constructor(params: IClearPainterParams);
render(): void;
protected initialize(): void;
protected destroy(): void;
}
export {}; | set alpha(v: number);
get color(): string;
set color(cssColor: string);
private _red; |
transformers_test.go | package util | "errors"
"testing"
adminErrors "github.com/lyft/flyteadmin/pkg/errors"
mockScope "github.com/lyft/flytestdlib/promutils"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var testRequestMetrics = NewRequestMetrics(mockScope.NewTestScope(), "foo")
func TestTransformError_FlyteAdminError(t *testing.T) {
invalidArgError := adminErrors.NewFlyteAdminError(codes.InvalidArgument, "invalid arg")
transformedError := TransformAndRecordError(invalidArgError, &testRequestMetrics)
transormerStatus, ok := status.FromError(transformedError)
assert.True(t, ok)
assert.Equal(t, codes.InvalidArgument, transormerStatus.Code())
}
func TestTransformError_FlyteAdminErrorWithDetails(t *testing.T) {
terminalStateError := adminErrors.NewAlreadyInTerminalStateError(context.Background(), "terminal state", "curPhase")
transformedError := TransformAndRecordError(terminalStateError, &testRequestMetrics)
transormerStatus, ok := status.FromError(transformedError)
assert.True(t, ok)
assert.Equal(t, codes.FailedPrecondition, transormerStatus.Code())
assert.Equal(t, 1, len(transormerStatus.Details()))
}
func TestTransformError_BasicError(t *testing.T) {
err := errors.New("some error")
transformedError := TransformAndRecordError(err, &testRequestMetrics)
transormerStatus, ok := status.FromError(transformedError)
assert.True(t, ok)
assert.Equal(t, codes.Internal, transormerStatus.Code())
} |
import (
"context" |
bot.py | import asyncio
import logging
import socket
import warnings
from collections import defaultdict
from typing import Dict, Optional
import aiohttp
import discord
from async_rediscache import RedisSession
from discord.ext import commands
from sentry_sdk import push_scope
from bot import api, constants
from bot.async_stats import AsyncStatsClient
log = logging.getLogger('bot')
LOCALHOST = "127.0.0.1"
class Bot(commands.Bot):
"""A subclass of `discord.ext.commands.Bot` with an aiohttp session and an API client."""
def __init__(self, *args, redis_session: RedisSession, **kwargs):
if "connector" in kwargs:
warnings.warn(
"If login() is called (or the bot is started), the connector will be overwritten "
"with an internal one"
)
super().__init__(*args, **kwargs)
self.http_session: Optional[aiohttp.ClientSession] = None
self.redis_session = redis_session
self.api_client = api.APIClient(loop=self.loop)
self.filter_list_cache = defaultdict(dict)
self._connector = None
self._resolver = None
self._statsd_timerhandle: asyncio.TimerHandle = None
self._guild_available = asyncio.Event()
statsd_url = constants.Stats.statsd_host
if constants.DEBUG_MODE:
# Since statsd is UDP, there are no errors for sending to a down port.
# For this reason, setting the statsd host to 127.0.0.1 for development
# will effectively disable stats.
statsd_url = LOCALHOST
self.stats = AsyncStatsClient(self.loop, LOCALHOST)
self._connect_statsd(statsd_url)
def _connect_statsd(self, statsd_url: str, retry_after: int = 2, attempt: int = 1) -> None:
"""Callback used to retry a connection to statsd if it should fail."""
if attempt >= 8:
log.error("Reached 8 attempts trying to reconnect AsyncStatsClient. Aborting")
return
try:
self.stats = AsyncStatsClient(self.loop, statsd_url, 8125, prefix="bot")
except socket.gaierror:
log.warning(f"Statsd client failed to connect (Attempt(s): {attempt})")
# Use a fallback strategy for retrying, up to 8 times.
self._statsd_timerhandle = self.loop.call_later(
retry_after,
self._connect_statsd,
statsd_url,
retry_after * 2,
attempt + 1
)
async def cache_filter_list_data(self) -> None:
"""Cache all the data in the FilterList on the site."""
full_cache = await self.api_client.get('bot/filter-lists')
for item in full_cache:
self.insert_item_into_filter_list_cache(item)
def _recreate(self) -> None:
"""Re-create the connector, aiohttp session, the APIClient and the Redis session."""
# Use asyncio for DNS resolution instead of threads so threads aren't spammed.
# Doesn't seem to have any state with regards to being closed, so no need to worry?
self._resolver = aiohttp.AsyncResolver()
# Its __del__ does send a warning but it doesn't always show up for some reason.
if self._connector and not self._connector._closed:
log.warning(
"The previous connector was not closed; it will remain open and be overwritten"
)
if self.redis_session.closed:
# If the RedisSession was somehow closed, we try to reconnect it
# here. Normally, this shouldn't happen.
self.loop.create_task(self.redis_session.connect())
# Use AF_INET as its socket family to prevent HTTPS related problems both locally
# and in production.
self._connector = aiohttp.TCPConnector(
resolver=self._resolver,
family=socket.AF_INET,
)
# Client.login() will call HTTPClient.static_login() which will create a session using
# this connector attribute.
self.http.connector = self._connector
# Its __del__ does send a warning but it doesn't always show up for some reason.
if self.http_session and not self.http_session.closed:
log.warning(
"The previous session was not closed; it will remain open and be overwritten"
)
self.http_session = aiohttp.ClientSession(connector=self._connector)
self.api_client.recreate(force=True, connector=self._connector)
# Build the FilterList cache
self.loop.create_task(self.cache_filter_list_data())
@classmethod
def create(cls) -> "Bot":
"""Create and return an instance of a Bot."""
loop = asyncio.get_event_loop()
allowed_roles = [discord.Object(id_) for id_ in constants.MODERATION_ROLES]
intents = discord.Intents().all()
intents.presences = False
intents.dm_typing = False
intents.dm_reactions = False
intents.invites = False
intents.webhooks = False
intents.integrations = False
return cls(
redis_session=_create_redis_session(loop),
loop=loop,
command_prefix=commands.when_mentioned_or(constants.Bot.prefix),
activity=discord.Game(name=f"Commands: {constants.Bot.prefix}help"),
case_insensitive=True,
max_messages=10_000,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=allowed_roles),
intents=intents,
)
def load_extensions(self) -> None:
"""Load all enabled extensions."""
# Must be done here to avoid a circular import.
from bot.utils.extensions import EXTENSIONS
| extensions = set(EXTENSIONS) # Create a mutable copy.
if not constants.HelpChannels.enable:
extensions.remove("bot.exts.help_channels")
for extension in extensions:
self.load_extension(extension)
def add_cog(self, cog: commands.Cog) -> None:
"""Adds a "cog" to the bot and logs the operation."""
super().add_cog(cog)
log.info(f"Cog loaded: {cog.qualified_name}")
def add_command(self, command: commands.Command) -> None:
"""Add `command` as normal and then add its root aliases to the bot."""
super().add_command(command)
self._add_root_aliases(command)
def remove_command(self, name: str) -> Optional[commands.Command]:
"""
Remove a command/alias as normal and then remove its root aliases from the bot.
Individual root aliases cannot be removed by this function.
To remove them, either remove the entire command or manually edit `bot.all_commands`.
"""
command = super().remove_command(name)
if command is None:
# Even if it's a root alias, there's no way to get the Bot instance to remove the alias.
return
self._remove_root_aliases(command)
return command
def clear(self) -> None:
"""
Clears the internal state of the bot and recreates the connector and sessions.
Will cause a DeprecationWarning if called outside a coroutine.
"""
# Because discord.py recreates the HTTPClient session, may as well follow suit and recreate
# our own stuff here too.
self._recreate()
super().clear()
async def close(self) -> None:
"""Close the Discord connection and the aiohttp session, connector, statsd client, and resolver."""
await super().close()
await self.api_client.close()
if self.http_session:
await self.http_session.close()
if self._connector:
await self._connector.close()
if self._resolver:
await self._resolver.close()
if self.stats._transport:
self.stats._transport.close()
if self.redis_session:
await self.redis_session.close()
if self._statsd_timerhandle:
self._statsd_timerhandle.cancel()
def insert_item_into_filter_list_cache(self, item: Dict[str, str]) -> None:
"""Add an item to the bots filter_list_cache."""
type_ = item["type"]
allowed = item["allowed"]
content = item["content"]
self.filter_list_cache[f"{type_}.{allowed}"][content] = {
"id": item["id"],
"comment": item["comment"],
"created_at": item["created_at"],
"updated_at": item["updated_at"],
}
async def login(self, *args, **kwargs) -> None:
"""Re-create the connector and set up sessions before logging into Discord."""
self._recreate()
await self.stats.create_socket()
await super().login(*args, **kwargs)
async def on_guild_available(self, guild: discord.Guild) -> None:
"""
Set the internal guild available event when constants.Guild.id becomes available.
If the cache appears to still be empty (no members, no channels, or no roles), the event
will not be set.
"""
if guild.id != constants.Guild.id:
return
if not guild.roles or not guild.members or not guild.channels:
msg = "Guild available event was dispatched but the cache appears to still be empty!"
log.warning(msg)
try:
webhook = await self.fetch_webhook(constants.Webhooks.dev_log)
except discord.HTTPException as e:
log.error(f"Failed to fetch webhook to send empty cache warning: status {e.status}")
else:
await webhook.send(f"<@&{constants.Roles.admin}> {msg}")
return
self._guild_available.set()
async def on_guild_unavailable(self, guild: discord.Guild) -> None:
"""Clear the internal guild available event when constants.Guild.id becomes unavailable."""
if guild.id != constants.Guild.id:
return
self._guild_available.clear()
async def wait_until_guild_available(self) -> None:
"""
Wait until the constants.Guild.id guild is available (and the cache is ready).
The on_ready event is inadequate because it only waits 2 seconds for a GUILD_CREATE
gateway event before giving up and thus not populating the cache for unavailable guilds.
"""
await self._guild_available.wait()
async def on_error(self, event: str, *args, **kwargs) -> None:
"""Log errors raised in event listeners rather than printing them to stderr."""
self.stats.incr(f"errors.event.{event}")
with push_scope() as scope:
scope.set_tag("event", event)
scope.set_extra("args", args)
scope.set_extra("kwargs", kwargs)
log.exception(f"Unhandled exception in {event}.")
def _add_root_aliases(self, command: commands.Command) -> None:
"""Recursively add root aliases for `command` and any of its subcommands."""
if isinstance(command, commands.Group):
for subcommand in command.commands:
self._add_root_aliases(subcommand)
for alias in getattr(command, "root_aliases", ()):
if alias in self.all_commands:
raise commands.CommandRegistrationError(alias, alias_conflict=True)
self.all_commands[alias] = command
def _remove_root_aliases(self, command: commands.Command) -> None:
"""Recursively remove root aliases for `command` and any of its subcommands."""
if isinstance(command, commands.Group):
for subcommand in command.commands:
self._remove_root_aliases(subcommand)
for alias in getattr(command, "root_aliases", ()):
self.all_commands.pop(alias, None)
def _create_redis_session(loop: asyncio.AbstractEventLoop) -> RedisSession:
"""
Create and connect to a redis session.
Ensure the connection is established before returning to prevent race conditions.
`loop` is the event loop on which to connect. The Bot should use this same event loop.
"""
redis_session = RedisSession(
address=(constants.Redis.host, constants.Redis.port),
password=constants.Redis.password,
minsize=1,
maxsize=20,
use_fakeredis=constants.Redis.use_fakeredis,
global_namespace="bot",
)
loop.run_until_complete(redis_session.connect())
return redis_session | |
mux.go | package lmhttp
import (
"fmt"
"net/http"
"sort"
) | *http.ServeMux
}
func NewClearMux() *ClearMux {
m := &ClearMux{ServeMux: http.NewServeMux()}
m.ServeMux.Handle("/", http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) {
for _, s := range m.endpoints {
fmt.Fprintf(rw, " * %s\n", s)
}
}))
return m
}
func (m *ClearMux) Handle(pattern string, handler http.Handler) {
m.endpoints = append(m.endpoints, pattern)
sort.Strings(m.endpoints) // could use a heap but meh
m.ServeMux.Handle(pattern, handler)
} |
type ClearMux struct {
endpoints []string |
lib.rs | #![forbid(unsafe_code)]
use core::ops::{Deref, DerefMut};
pub use insta::{assert_debug_snapshot, assert_snapshot, glob, with_settings};
use parking_lot::{RwLock, RwLockReadGuard};
use std::{fs, rc::Rc};
use toyc_errors::emitter::Emitter;
use toyc_errors::{Diagnostic, Handler};
use toyc_session::Session;
pub fn snapshots(f: fn(&str) -> ()) {
glob!("toys/*.toy", |path| {
let text = fs::read_to_string(&path)
.expect("failed reading file")
// strip '\r' so snapshots match on windows
.replace('\r', "");
with_settings!({
snapshot_suffix => path.file_name().unwrap().to_str().unwrap(),
}, {
f(&text);
}); |
pub struct TestSession {
inner: Session,
diagnostics: Rc<RwLock<Vec<Diagnostic>>>,
}
impl TestSession {
pub fn new<T: ToString>(job_name: T, src: String) -> TestSession {
let diagnostics = Rc::new(RwLock::default());
TestSession {
inner: Session::from_str(
job_name.to_string(),
src,
Handler::new(Box::new(TestEmitter(diagnostics.clone()))),
),
diagnostics,
}
}
pub fn diagnostics(
this: &TestSession,
) -> impl Deref<Target = [Diagnostic]> + '_ {
RwLockReadGuard::map(this.diagnostics.read(), Vec::as_slice)
}
}
impl Deref for TestSession {
type Target = Session;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for TestSession {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[derive(Debug)]
struct TestEmitter(Rc<RwLock<Vec<Diagnostic>>>);
impl Emitter for TestEmitter {
fn emit_diagnostic(&mut self, diagnostic: &Diagnostic) {
self.0.write().push(diagnostic.clone());
}
} | });
} |
hashlib.py | # $Id: hashlib.py 66094 2008-08-31 16:35:01Z gregory.p.smith $
#
# Copyright (C) 2005-2007 Gregory P. Smith ([email protected])
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
| Named constructor functions are also available, these are faster
than using new(name):
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are
guaranteed to exist.
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the bytes in arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the bytes passed to the update() method
so far.
- hexdigest(): Like digest() except the digest is returned as a unicode
object of double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update(b"Nobody inspects")
>>> m.update(b" the spammish repetition")
>>> m.digest()
b'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
def __get_builtin_constructor(name):
if name in ('SHA1', 'sha1'):
import _sha1
return _sha1.sha1
elif name in ('MD5', 'md5'):
import _md5
return _md5.md5
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
raise ValueError("unsupported hash type")
def __py_new(name, data=b''):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
"""
return __get_builtin_constructor(name)(data)
def __hash_new(name, data=b''):
"""new(name, data=b'') - Return a new hashing object using the named algorithm;
optionally initialized with data (which must be bytes).
"""
try:
return _hashlib.new(name, data)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(data)
try:
import _hashlib
# use the wrapper of the C implementation
new = __hash_new
for opensslFuncName in filter(lambda n: n.startswith('openssl_'), dir(_hashlib)):
funcName = opensslFuncName[len('openssl_'):]
try:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
f = getattr(_hashlib, opensslFuncName)
f()
# Use the C function directly (very fast)
exec(funcName + ' = f')
except ValueError:
try:
# Use the builtin implementation directly (fast)
exec(funcName + ' = __get_builtin_constructor(funcName)')
except ValueError:
# this one has no builtin implementation, don't define it
pass
# clean up our locals
del f
del opensslFuncName
del funcName
except ImportError:
# We don't have the _hashlib OpenSSL module?
# use the built in legacy interfaces via a wrapper function
new = __py_new
# lookup the C function to use directly for the named constructors
md5 = __get_builtin_constructor('md5')
sha1 = __get_builtin_constructor('sha1')
sha224 = __get_builtin_constructor('sha224')
sha256 = __get_builtin_constructor('sha256')
sha384 = __get_builtin_constructor('sha384')
sha512 = __get_builtin_constructor('sha512') | new(name, data=b'') - returns a new hash object implementing the
given hash function; initializing the hash
using the given binary data.
|
init.go | /**
@author:panliang
@data:2021/6/18
@note
**/
package core
import (
"time"
"im_app/pkg/config"
"im_app/pkg/model"
"im_app/pkg/mq"
"im_app/pkg/pool"
"im_app/pkg/redis"
)
func | () {
// ๅฏๅจmysql่ฟๆฅๆฑ
db := model.ConnectDB()
sqlDB, _ := db.DB()
sqlDB.SetMaxOpenConns(config.GetInt("database.mysql.max_open_connections"))
// ่ฎพ็ฝฎๆๅคง็ฉบ้ฒๆฐ
sqlDB.SetMaxIdleConns(config.GetInt("database.mysql.max_idle_connections"))
// ่ฎพ็ฝฎๆฏไธช่ฟๆฅ็่ถ
ๆถๆถ้ด
sqlDB.SetConnMaxLifetime(time.Duration(config.GetInt("database.mysql.max_life_seconds")) * time.Second)
// ๅฏๅจredis่ฟๆฅๆฑ
redis.InitClient()
// ๅฏๅจๅ็จๆฑ
pool.ConnectPool()
// ๅฏๅจmq
mq.ConnectMQ()
}
| SetupPool |
test_tasks.py | import pytest
from celery.result import EagerResult
from university_dost.users.tasks import get_users_count
from university_dost.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def | (settings):
"""A basic test to execute the get_users_count Celery task."""
UserFactory.create_batch(3)
settings.CELERY_TASK_ALWAYS_EAGER = True
task_result = get_users_count.delay()
assert isinstance(task_result, EagerResult)
assert task_result.result == 3
| test_user_count |
setup.py | from setuptools import find_packages, setup
def req_file(filename):
with open(filename) as f:
content = f.readlines()
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
setup(
name="bsmetadata",
python_requires=">=3.7.11, <3.10",
version="0.1.0",
url="https://github.com/bigscience-workshop/metadata.git",
author="Multiple Authors",
author_email="xxx",
description="Codebase for including metadata (e.g., URLs, timestamps, HTML tags) during language model pretraining.", | packages=find_packages(),
install_requires=install_requires,
) |
|
abstract.mapper.ts | export interface AbstractMapper<E, D> {
toDto(entity: E): D;
toEntity(dto: D): E; | } | |
mod.rs | //! The sendmail transport sends the email using the local `sendmail` command.
//!
//! ## Sync example
//!
//! ```rust
//! # use std::error::Error;
//! #
//! # #[cfg(all(feature = "sendmail-transport", feature = "builder"))]
//! # fn main() -> Result<(), Box<dyn Error>> {
//! use lettre::{Message, SendmailTransport, Transport};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! let sender = SendmailTransport::new();
//! let result = sender.send(&email);
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//!
//! # #[cfg(not(all(feature = "sendmail-transport", feature = "builder")))]
//! # fn main() {}
//! ```
//!
//! ## Async tokio 1.x example
//!
//! ```rust,no_run
//! # use std::error::Error;
//! #
//! # #[cfg(all(feature = "tokio1", feature = "sendmail-transport", feature = "builder"))]
//! # async fn run() -> Result<(), Box<dyn Error>> {
//! use lettre::{
//! AsyncSendmailTransport, AsyncTransport, Message, SendmailTransport, Tokio1Executor,
//! };
//! | //! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! let sender = AsyncSendmailTransport::<Tokio1Executor>::new();
//! let result = sender.send(email).await;
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
//!
//! ## Async async-std 1.x example
//!
//!```rust,no_run
//! # use std::error::Error;
//! #
//! # #[cfg(all(feature = "async-std1", feature = "sendmail-transport", feature = "builder"))]
//! # async fn run() -> Result<(), Box<dyn Error>> {
//! use lettre::{Message, AsyncTransport, AsyncStd1Executor, AsyncSendmailTransport};
//!
//! let email = Message::builder()
//! .from("NoBody <[email protected]>".parse()?)
//! .reply_to("Yuin <[email protected]>".parse()?)
//! .to("Hei <[email protected]>".parse()?)
//! .subject("Happy new year")
//! .body(String::from("Be happy!"))?;
//!
//! let sender = AsyncSendmailTransport::<AsyncStd1Executor>::new();
//! let result = sender.send(email).await;
//! assert!(result.is_ok());
//! # Ok(())
//! # }
//! ```
#[cfg(any(feature = "async-std1", feature = "tokio1"))]
use std::marker::PhantomData;
use std::{
ffi::OsString,
io::Write,
process::{Command, Stdio},
};
#[cfg(any(feature = "async-std1", feature = "tokio1"))]
use async_trait::async_trait;
pub use self::error::Error;
#[cfg(feature = "async-std1")]
use crate::AsyncStd1Executor;
#[cfg(feature = "tokio1")]
use crate::Tokio1Executor;
use crate::{address::Envelope, Transport};
#[cfg(any(feature = "async-std1", feature = "tokio1"))]
use crate::{AsyncTransport, Executor};
mod error;
const DEFAULT_SENDMAIL: &str = "sendmail";
/// Sends emails using the `sendmail` command
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(docsrs, doc(cfg(feature = "sendmail-transport")))]
pub struct SendmailTransport {
command: OsString,
}
/// Asynchronously sends emails using the `sendmail` command
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg(any(feature = "async-std1", feature = "tokio1"))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "tokio1", feature = "async-std1"))))]
pub struct AsyncSendmailTransport<E: Executor> {
inner: SendmailTransport,
marker_: PhantomData<E>,
}
impl SendmailTransport {
/// Creates a new transport with the `sendmail` command
///
/// Note: This uses the `sendmail` command in the current `PATH`. To use another command,
/// use [SendmailTransport::new_with_command].
pub fn new() -> SendmailTransport {
SendmailTransport {
command: DEFAULT_SENDMAIL.into(),
}
}
/// Creates a new transport to the given sendmail command
pub fn new_with_command<S: Into<OsString>>(command: S) -> SendmailTransport {
SendmailTransport {
command: command.into(),
}
}
fn command(&self, envelope: &Envelope) -> Command {
let mut c = Command::new(&self.command);
c.arg("-i");
if let Some(from) = envelope.from() {
c.arg("-f").arg(from);
}
c.arg("--")
.args(envelope.to())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
c
}
}
#[cfg(any(feature = "async-std1", feature = "tokio1"))]
impl<E> AsyncSendmailTransport<E>
where
E: Executor,
{
/// Creates a new transport with the `sendmail` command
///
/// Note: This uses the `sendmail` command in the current `PATH`. To use another command,
/// use [AsyncSendmailTransport::new_with_command].
pub fn new() -> Self {
Self {
inner: SendmailTransport::new(),
marker_: PhantomData,
}
}
/// Creates a new transport to the given sendmail command
pub fn new_with_command<S: Into<OsString>>(command: S) -> Self {
Self {
inner: SendmailTransport::new_with_command(command),
marker_: PhantomData,
}
}
#[cfg(feature = "tokio1")]
fn tokio1_command(&self, envelope: &Envelope) -> tokio1_crate::process::Command {
use tokio1_crate::process::Command;
let mut c = Command::new(&self.inner.command);
c.kill_on_drop(true);
c.arg("-i");
if let Some(from) = envelope.from() {
c.arg("-f").arg(from);
}
c.arg("--")
.args(envelope.to())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
c
}
#[cfg(feature = "async-std1")]
fn async_std_command(&self, envelope: &Envelope) -> async_std::process::Command {
use async_std::process::Command;
let mut c = Command::new(&self.inner.command);
// TODO: figure out why enabling this kills it earlier
// c.kill_on_drop(true);
c.arg("-i");
if let Some(from) = envelope.from() {
c.arg("-f").arg(from);
}
c.arg("--")
.args(envelope.to())
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped());
c
}
}
impl Default for SendmailTransport {
fn default() -> Self {
Self::new()
}
}
#[cfg(any(feature = "async-std1", feature = "tokio1"))]
impl<E> Default for AsyncSendmailTransport<E>
where
E: Executor,
{
fn default() -> Self {
Self::new()
}
}
impl Transport for SendmailTransport {
type Ok = ();
type Error = Error;
fn send_raw(&self, envelope: &Envelope, email: &[u8]) -> Result<Self::Ok, Self::Error> {
// Spawn the sendmail command
let mut process = self.command(envelope).spawn().map_err(error::client)?;
process
.stdin
.as_mut()
.unwrap()
.write_all(email)
.map_err(error::client)?;
let output = process.wait_with_output().map_err(error::client)?;
if output.status.success() {
Ok(())
} else {
let stderr = String::from_utf8(output.stderr).map_err(error::response)?;
Err(error::client(stderr))
}
}
}
#[cfg(feature = "async-std1")]
#[async_trait]
impl AsyncTransport for AsyncSendmailTransport<AsyncStd1Executor> {
type Ok = ();
type Error = Error;
async fn send_raw(&self, envelope: &Envelope, email: &[u8]) -> Result<Self::Ok, Self::Error> {
use async_std::io::prelude::WriteExt;
let mut command = self.async_std_command(envelope);
// Spawn the sendmail command
let mut process = command.spawn().map_err(error::client)?;
process
.stdin
.as_mut()
.unwrap()
.write_all(email)
.await
.map_err(error::client)?;
let output = process.output().await.map_err(error::client)?;
if output.status.success() {
Ok(())
} else {
let stderr = String::from_utf8(output.stderr).map_err(error::response)?;
Err(error::client(stderr))
}
}
}
#[cfg(feature = "tokio1")]
#[async_trait]
impl AsyncTransport for AsyncSendmailTransport<Tokio1Executor> {
type Ok = ();
type Error = Error;
async fn send_raw(&self, envelope: &Envelope, email: &[u8]) -> Result<Self::Ok, Self::Error> {
use tokio1_crate::io::AsyncWriteExt;
let mut command = self.tokio1_command(envelope);
// Spawn the sendmail command
let mut process = command.spawn().map_err(error::client)?;
process
.stdin
.as_mut()
.unwrap()
.write_all(email)
.await
.map_err(error::client)?;
let output = process.wait_with_output().await.map_err(error::client)?;
if output.status.success() {
Ok(())
} else {
let stderr = String::from_utf8(output.stderr).map_err(error::response)?;
Err(error::client(stderr))
}
}
} | //! let email = Message::builder() |
aws_utils.py | #!/opt/workflow/bin/python2.7
#
# Copyright 2013-2016 Edico Genome Corporation. All rights reserved.
#
# This file contains confidential and proprietary information of the Edico Genome
# Corporation and is protected under the U.S. and international copyright and other
# intellectual property laws.
#
# $Id$
# $Author$
# $Change$
# $DateTime$
#
# AWS service utilities for schedulers to use: dragen_jobd, dragen_job_execute and node_update
#
from __future__ import absolute_import
from __future__ import division
from builtins import map
from builtins import str
from past.utils import old_div
import os
from glob import glob
from multiprocessing import Pool
import boto3
from boto3.s3.transfer import S3Transfer
from boto3.s3.transfer import TransferConfig
from botocore import UNSIGNED
from botocore import exceptions
from botocore.config import Config
from . import scheduler_utils as utils
# CONSTANTS ....
DOWNLOAD_THREAD_COUNT = 4
########################################################################################
# s3_download_file - Download a file from given "req_info" dict. Before actually downloading
# the object see if it already exists locally
# req_info = {"bucket": <str>, "obj_key":<str>, "tgt_path":<str>, "region":<str>}
# Return: Downloaded file size
def s3_download_file(req_info, nosign=False):
# If region is missing fill in default
if not req_info['region']:
req_info['region'] = 'us-east-1'
# Configure the download
if nosign:
client = boto3.client('s3', req_info['region'], config=Config(signature_version=UNSIGNED))
else:
client = boto3.client('s3', req_info['region'])
# Make sure the target directory exists
tgt_dir = req_info['tgt_path'].rsplit('/', 1)[0] # get the directory part
utils.check_create_dir(tgt_dir)
# Check if the object already exists locally and get the size on disk
if os.path.exists(req_info['tgt_path']):
loc_size = os.path.getsize(req_info['tgt_path'])
# Check if the S3 object length matches the local file size
obj_info = s3_get_object_info(req_info['bucket'], req_info['obj_key'])
if obj_info['ContentLength'] == loc_size:
return loc_size
# Perform the download
transfer = S3Transfer(client)
transfer.download_file(req_info['bucket'], req_info['obj_key'], req_info['tgt_path'])
# Once download is complete, get the file info to check the size
return os.path.getsize(req_info['tgt_path'])
########################################################################################
# s3_download_dir - Download all the objects with the given "directory".
# Inputs:
# bucket - source bucket
# src_dir - the prefix for the object key (i.e. 'references/hg19'
# tgt_dir = directory to download to ending with '/'. The prefix dir is created if not existing)
# Return: Total number of bytes downloaded
def s3_download_dir(bucket, src_dir, tgt_dir, region='us-east-1', nosign=False):
# Get the list of objects specified within the "dir"
if nosign:
client = boto3.client('s3', region, config=Config(signature_version=UNSIGNED))
else:
client = boto3.client('s3', region)
response = client.list_objects(Bucket=bucket, Prefix=src_dir)
if not response['Contents']:
return 0
# Filter out any results that are "dirs" by checking for ending '/'
object_list = [x for x in response['Contents'] if not x['Key'].endswith('/')]
# To avoid a race condition for parallel downloads, make sure each has a directory created
# - Create the full dir path of each object and make sure the dir exists
list([utils.check_create_dir(str(tgt_dir.rstrip('/') + '/' + x['Key']).rsplit('/', 1)[0]) for x in object_list])
# Convert the list of objects to a dict we can pass to the download function
download_dict_list = [{
'bucket': bucket,
'obj_key': x['Key'],
'tgt_path': tgt_dir.rstrip('/') + '/' + x['Key'],
'region': region
} for x in object_list]
# Create a thread pools to handle the downloads faster
pool = Pool(DOWNLOAD_THREAD_COUNT)
# Use the multiple thread pools to divvy up the downloads
results = pool.map(s3_download_file, download_dict_list)
# Close the pool and wait for the work to finish
pool.close()
pool.join()
# return the total number of bytes downloaded
return sum(results)
########################################################################################
# s3_get_object_info - Get information about an S3 object without downloading it
# Inputs:
# bucket - object bucket
# obj_path - The key for the object (aka the 'path')
# Return: Total number of bytes downloaded, or raise a Client Error exception
def s3_get_object_info(bucket, obj_path):
client = boto3.client('s3')
info = client.head_object(
Bucket=bucket,
Key=obj_path
)
return info
########################################################################################
# s3_delete_object - Delete the specified object from S3 bucket
# Inputs:
# bucket - object bucket
# obj_path - The key for the object (aka the 'path')
# Return: Total number of bytes downloaded, or raise a Client Error exception
def s3_delete_object(bucket, obj_path):
client = boto3.client('s3')
resp = client.delete_objects(
Bucket=bucket,
Delete={
'Objects': [
{'Key': obj_path}
]
}
)
return resp
########################################################################################
# s3_upload - Recursively upload source file(s) residing in the given input
# location (abs_src_path) to the bucket and S3 base path (key) provided as input
def s3_upload(abs_src_path, bucket, key):
# Configure the upload
s3_client, transfer_client = _s3_initialize_client(bucket)
if os.path.isdir(abs_src_path):
up_size = _s3_upload_files_recursively(abs_src_path, bucket, key, s3_client, transfer_client)
elif os.path.isfile(abs_src_path):
up_size = _s3_upload_file(abs_src_path, bucket, key, s3_client, transfer_client)
else:
raise ValueError(
'{0} MUST be either a file or a directory'.format(abs_src_path))
return up_size
########################################################################################
# ############################# LOCAL FUNCTIONS ########################################
def _s3_upload_files_recursively(dir_path, bucket, obj_key, s3_client, transfer_client):
filenames = [fpath for dirpath in os.walk(dir_path) for fpath in
glob(os.path.join(dirpath[0], '*'))]
# upload a finite number of files for safety
filenames = filenames[:100]
tot_bytes = 0
# make sure there is a trailing '/' in obj_key to indicate it is 'root' and not actual keyname
if not obj_key.endswith('/'):
obj_key += '/'
for filename in filenames:
if os.path.isfile(filename):
size = _s3_upload_file(filename, bucket, obj_key, s3_client, transfer_client)
if size:
tot_bytes += size
return tot_bytes
def | (file_path, bucket, obj_key, s3_client, transfer_client):
# Check if the key is a 'root' instead of full key name
if obj_key.endswith('/'):
name_only = file_path.rsplit('/', 1)[1] # strip out the leading directory path
obj_key = obj_key + name_only
transfer_client.upload_file(
file_path,
bucket,
obj_key,
extra_args={'ServerSideEncryption': 'AES256'}
)
# Once Upload is complete, get the object info to check the size
response = s3_client.head_object(Bucket=bucket, Key=obj_key)
return response['ContentLength'] if response else None
def _s3_initialize_client(s3_bucket):
client = boto3.client('s3', region_name=_s3_get_bucket_location(s3_bucket))
config = boto3.s3.transfer.TransferConfig(
multipart_chunksize=256 * 1024 * 1024,
max_concurrency=10,
max_io_queue=1000,
io_chunksize=2 * 1024 * 1024)
transfer_client = boto3.s3.transfer.S3Transfer(client, config, boto3.s3.transfer.OSUtils())
return client, transfer_client
def _s3_get_bucket_location(s3_bucket):
client = boto3.client('s3')
resp = client.head_bucket(Bucket=s3_bucket)
location = resp['ResponseMetadata']['HTTPHeaders'].get('x-amz-bucket-region')
return location
| _s3_upload_file |
store.go | /*
* Copyright 2019 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ristretto
import (
"sync"
"time"
)
type storeItem struct {
key uint64
conflict uint64
value interface{}
expiration time.Time
}
// store is the interface fulfilled by all hash map implementations in this
// file. Some hash map implementations are better suited for certain data
// distributions than others, so this allows us to abstract that out for use
// in Ristretto.
//
// Every store is safe for concurrent usage.
type store interface {
// Get returns the value associated with the key parameter.
Get(uint64, uint64) (interface{}, bool)
// Expiration returns the expiration time for this key.
Expiration(uint64) time.Time
// Set adds the key-value pair to the Map or updates the value if it's
// already present. The key-value pair is passed as a pointer to an
// item object.
Set(*item)
// Del deletes the key-value pair from the Map.
Del(uint64, uint64) (uint64, interface{})
// Update attempts to update the key with a new value and returns true if
// successful.
Update(*item) bool
// Cleanup removes items that have an expired TTL.
Cleanup(policy policy, onEvict onEvictFunc)
// Clear clears all contents of the store.
Clear()
}
// newStore returns the default store implementation.
func newStore() store {
return newShardedMap()
}
const numShards uint64 = 256
type shardedMap struct {
shards []*lockedMap
expiryMap *expirationMap
}
func newShardedMap() *shardedMap {
sm := &shardedMap{
shards: make([]*lockedMap, int(numShards)),
expiryMap: newExpirationMap(),
}
for i := range sm.shards {
sm.shards[i] = newLockedMap(sm.expiryMap)
}
return sm
}
func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) {
return sm.shards[key%numShards].get(key, conflict)
}
func (sm *shardedMap) Expiration(key uint64) time.Time {
return sm.shards[key%numShards].Expiration(key)
}
func (sm *shardedMap) Set(i *item) {
if i == nil {
// If item is nil make this Set a no-op.
return
}
sm.shards[i.key%numShards].Set(i)
}
func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) {
return sm.shards[key%numShards].Del(key, conflict)
}
func (sm *shardedMap) Update(newItem *item) bool {
return sm.shards[newItem.key%numShards].Update(newItem)
}
func (sm *shardedMap) Cleanup(policy policy, onEvict onEvictFunc) {
sm.expiryMap.cleanup(sm, policy, onEvict)
}
func (sm *shardedMap) Clear() {
for i := uint64(0); i < numShards; i++ {
sm.shards[i].Clear()
}
}
type lockedMap struct {
sync.RWMutex
data map[uint64]storeItem
em *expirationMap
}
func newLockedMap(em *expirationMap) *lockedMap |
func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) {
m.RLock()
item, ok := m.data[key]
m.RUnlock()
if !ok {
return nil, false
}
if conflict != 0 && (conflict != item.conflict) {
return nil, false
}
// Handle expired items.
if !item.expiration.IsZero() && time.Now().After(item.expiration) {
return nil, false
}
return item.value, true
}
func (m *lockedMap) Expiration(key uint64) time.Time {
m.RLock()
defer m.RUnlock()
return m.data[key].expiration
}
func (m *lockedMap) Set(i *item) {
if i == nil {
// If the item is nil make this Set a no-op.
return
}
m.Lock()
item, ok := m.data[i.key]
if ok {
m.em.update(i.key, i.conflict, item.expiration, i.expiration)
} else {
m.em.add(i.key, i.conflict, i.expiration)
m.data[i.key] = storeItem{
key: i.key,
conflict: i.conflict,
value: i.value,
expiration: i.expiration,
}
m.Unlock()
return
}
if i.conflict != 0 && (i.conflict != item.conflict) {
m.Unlock()
return
}
m.data[i.key] = storeItem{
key: i.key,
conflict: i.conflict,
value: i.value,
expiration: i.expiration,
}
m.Unlock()
}
func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) {
m.Lock()
item, ok := m.data[key]
if !ok {
m.Unlock()
return 0, nil
}
if conflict != 0 && (conflict != item.conflict) {
m.Unlock()
return 0, nil
}
if !item.expiration.IsZero() {
m.em.del(key, item.expiration)
}
delete(m.data, key)
m.Unlock()
return item.conflict, item.value
}
func (m *lockedMap) Update(newItem *item) bool {
m.Lock()
item, ok := m.data[newItem.key]
if !ok {
m.Unlock()
return false
}
if newItem.conflict != 0 && (newItem.conflict != item.conflict) {
m.Unlock()
return false
}
m.em.update(newItem.key, newItem.conflict, item.expiration, newItem.expiration)
m.data[newItem.key] = storeItem{
key: newItem.key,
conflict: newItem.conflict,
value: newItem.value,
expiration: newItem.expiration,
}
m.Unlock()
return true
}
func (m *lockedMap) Clear() {
m.Lock()
m.data = make(map[uint64]storeItem)
m.Unlock()
}
| {
return &lockedMap{
data: make(map[uint64]storeItem),
em: em,
}
} |
zap_test.go | package zap
import (
"os"
"testing"
"github.com/vine-io/vine/lib/logger"
"go.uber.org/zap/zapcore"
)
func TestName(t *testing.T) {
l, err := New()
if err != nil {
t.Fatal(err)
}
if l.String() != "zap" {
t.Errorf("name is error %s", l.String())
}
t.Logf("test logger name: %s", l.String())
}
func TestLogf(t *testing.T) {
l, err := New()
if err != nil {
t.Fatal(err)
}
logger.DefaultLogger = l
logger.Logf(logger.InfoLevel, "test logf: %s", "name")
}
func TestSetLevel(t *testing.T) {
l, err := New(WithJSONEncode())
if err != nil {
t.Fatal(err)
}
logger.DefaultLogger = l
logger.Init(logger.WithLevel(logger.DebugLevel))
l.Logf(logger.DebugLevel, "test show debug: %s", "debug msg")
logger.Init(logger.WithLevel(logger.InfoLevel))
l.Logf(logger.DebugLevel, "test non-show debug: %s", "debug msg")
}
func TestWithFileWriter(t *testing.T) {
// Filename: "/var/log/myapp/foo.log",
// MaxSize: 500, // megabytes
// MaxBackups: 3,
// MaxAge: 28, //days
// Compress: true, // disabled by default
l, err := New(WithFileWriter(FileWriter{
FileName: "test.log", | MaxAge: 30,
Compress: false,
}), WithWriter(zapcore.AddSync(os.Stdout)))
if err != nil {
t.Fatal(err)
}
defer l.Sync()
logger.DefaultLogger = l
l.Logf(logger.InfoLevel, "test")
l.Logf(logger.ErrorLevel, "test")
} | MaxSize: 1,
MaxBackups: 5, |
daemon.go | /*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osd
import (
"fmt"
"os"
"os/signal"
"path/filepath"
"regexp"
"strings"
"syscall"
"github.com/coreos/pkg/capnslog"
"github.com/pkg/errors"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
oposd "github.com/rook/rook/pkg/operator/ceph/cluster/osd"
"github.com/rook/rook/pkg/util/sys"
)
const (
pvcDataTypeDevice = "data"
pvcMetadataTypeDevice = "metadata"
pvcWalTypeDevice = "wal"
lvmCommandToCheck = "lvm"
)
var (
logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephosd")
)
// StartOSD starts an OSD on a device that was provisioned by ceph-volume
func StartOSD(context *clusterd.Context, osdType, osdID, osdUUID, lvPath string, pvcBackedOSD, lvBackedPV bool, cephArgs []string) error {
// ensure the config mount point exists
configDir := fmt.Sprintf("/var/lib/ceph/osd/ceph-%s", osdID)
err := os.Mkdir(configDir, 0750)
if err != nil {
logger.Errorf("failed to create config dir %q. %v", configDir, err)
}
// Update LVM config at runtime
if err := UpdateLVMConfig(context, pvcBackedOSD, lvBackedPV); err != nil {
return errors.Wrap(err, "failed to update lvm configuration file") // fail return here as validation provided by ceph-volume
}
var volumeGroupName string
if pvcBackedOSD && !lvBackedPV {
volumeGroupName := getVolumeGroupName(lvPath)
if volumeGroupName == "" {
return errors.Wrapf(err, "error fetching volume group name for OSD %q", osdID)
}
go handleTerminate(context, lvPath, volumeGroupName)
// It's fine to continue if deactivate fails since we will return error if activate fails
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-an", "-vv", volumeGroupName); err != nil {
logger.Errorf("failed to deactivate volume group for lv %q. output: %s. %v", lvPath, op, err)
return nil
}
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("vgchange", "-ay", "-vv", volumeGroupName); err != nil {
return errors.Wrapf(err, "failed to activate volume group for lv %q. output: %s", lvPath, op)
}
}
// activate the osd with ceph-volume
storeFlag := "--" + osdType
if err := context.Executor.ExecuteCommand("stdbuf", "-oL", "ceph-volume", "lvm", "activate", "--no-systemd", storeFlag, osdID, osdUUID); err != nil {
return errors.Wrap(err, "failed to activate osd")
}
// run the ceph-osd daemon
if err := context.Executor.ExecuteCommand("ceph-osd", cephArgs...); err != nil {
// Instead of returning, we want to allow the lvm release to happen below, so we just log the err
logger.Errorf("failed to start osd or shutting down. %v", err)
}
if pvcBackedOSD && !lvBackedPV {
if err := releaseLVMDevice(context, volumeGroupName); err != nil {
// Let's just report the error and not fail as a best-effort since some drivers will force detach anyway
// Failing to release the device does not means the detach will fail so let's proceed
logger.Errorf("failed to release device from lvm. %v", err)
return nil
}
}
return nil
}
func handleTerminate(context *clusterd.Context, lvPath, volumeGroupName string) {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, syscall.SIGTERM)
<-sigc
logger.Infof("shutdown signal received, exiting...")
err := killCephOSDProcess(context, lvPath)
if err != nil {
logger.Errorf("failed to kill ceph-osd process. %v", err)
}
}
func killCephOSDProcess(context *clusterd.Context, lvPath string) error {
pid, err := context.Executor.ExecuteCommandWithOutput("fuser", "-a", lvPath)
if err != nil {
return errors.Wrapf(err, "failed to retrieve process ID for %q", lvPath)
}
logger.Infof("process ID for ceph-osd: %s", pid)
// shut down the osd-ceph process so that lvm release does not show device in use error.
if pid != "" {
// The OSD needs to exit as quickly as possible in order for the IO requests
// to be redirected to other OSDs in the cluster. The OSD is designed to tolerate failures
// of any kind, including power loss or kill -9. The upstream Ceph tests have for many years
// been testing with kill -9 so this is expected to be safe. There is a fix upstream Ceph that will
// improve the shutdown time of the OSD. For cleanliness we should consider removing the -9
// once it is backported to Nautilus: https://github.com/ceph/ceph/pull/31677.
if err := context.Executor.ExecuteCommand("kill", "-9", pid); err != nil {
return errors.Wrap(err, "failed to kill ceph-osd process")
}
}
return nil
}
func configRawDevice(name string, context *clusterd.Context) (*sys.LocalDisk, error) {
rawDevice, err := clusterd.PopulateDeviceInfo(name, context.Executor)
if err != nil {
return nil, errors.Wrapf(err, "failed to get device info for %q", name)
}
// set the device type: data, block_db(metadata) or wal.
if strings.HasPrefix(name, "/mnt") {
rawDevice, err = clusterd.PopulateDeviceUdevInfo(rawDevice.KernelName, context.Executor, rawDevice)
if err != nil {
logger.Warningf("failed to get udev info for device %q. %v", name, err)
}
rawDevice.Type = pvcDataTypeDevice
} else if strings.HasPrefix(name, "/srv") {
rawDevice.Type = pvcMetadataTypeDevice
} else if strings.HasPrefix(name, "/wal") {
rawDevice.Type = pvcWalTypeDevice
}
return rawDevice, nil
}
// Provision provisions an OSD
func Provision(context *clusterd.Context, agent *OsdAgent, crushLocation, topologyAffinity string) error {
if agent.pvcBacked {
// Init KMS store, retrieve the KEK and store it as an env var for ceph-volume
err := setKEKinEnv(context, agent.clusterInfo)
if err != nil {
return errors.Wrap(err, "failed to set kek as an environment variable")
}
}
// Print dmsetup version
err := dmsetupVersion(context)
if err != nil {
return errors.Wrap(err, "failed to print device mapper version")
}
// set the initial orchestration status
status := oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
if err := client.WriteCephConfig(context, agent.clusterInfo); err != nil {
return errors.Wrap(err, "failed to generate ceph config")
}
logger.Infof("discovering hardware")
var rawDevices []*sys.LocalDisk
if agent.pvcBacked {
for i := range agent.devices {
rawDevice, err := configRawDevice(agent.devices[i].Name, context)
if err != nil {
return err
}
rawDevices = append(rawDevices, rawDevice)
}
} else {
// We still need to use 'lsblk' as the underlying way to discover devices
// Ideally, we would use the "ceph-volume inventory" command instead
// However, it suffers from some limitation such as exposing available partitions and LVs
// See: https://tracker.ceph.com/issues/43579
rawDevices, err = clusterd.DiscoverDevices(context.Executor)
if err != nil {
return errors.Wrap(err, "failed initial hardware discovery")
}
}
context.Devices = rawDevices
logger.Info("creating and starting the osds")
// determine the set of devices that can/should be used for OSDs.
devices, err := getAvailableDevices(context, agent)
if err != nil {
return errors.Wrap(err, "failed to get available devices")
}
// orchestration is about to start, update the status
status = oposd.OrchestrationStatus{Status: oposd.OrchestrationStatusOrchestrating, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
// start the desired OSDs on devices
logger.Infof("configuring osd devices: %+v", devices)
deviceOSDs, err := agent.configureCVDevices(context, devices)
if err != nil {
return errors.Wrap(err, "failed to configure devices")
}
// Let's fail if no OSDs were configured
// This likely means the filter for available devices passed (in PVC case)
// but the resulting device was already configured for another cluster (disk not wiped and leftover)
// So we need to make sure the list is filled up, otherwise fail
if len(deviceOSDs) == 0 {
logger.Warningf("skipping OSD configuration as no devices matched the storage settings for this node %q", agent.nodeName)
status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
return nil
}
// Populate CRUSH location for each OSD on the host
for i := range deviceOSDs {
deviceOSDs[i].Location = crushLocation
deviceOSDs[i].TopologyAffinity = topologyAffinity
}
logger.Infof("devices = %+v", deviceOSDs)
// Since we are done configuring the PVC we need to release it from LVM
// If we don't do this, the device will remain hold by LVM and we won't be able to detach it
// When running on PVC, the device is:
// * attached on the prepare pod
// * osd is mkfs
// * detached from the prepare pod
// * attached to the activate pod
// * then the OSD runs
if agent.pvcBacked && !deviceOSDs[0].SkipLVRelease && !deviceOSDs[0].LVBackedPV {
// Try to discover the VG of that LV
volumeGroupName := getVolumeGroupName(deviceOSDs[0].BlockPath)
// If empty the osd is using the ceph-volume raw mode
// so it's consumming a raw block device and LVM is not used
// so there is nothing to de-activate
if volumeGroupName != "" {
if err := releaseLVMDevice(context, volumeGroupName); err != nil {
return errors.Wrap(err, "failed to release device from lvm")
}
} else {
// TODO
// don't assume this and run a bluestore check on the device to be sure?
logger.Infof("ceph-volume raw mode used by block %q, no VG to de-activate", deviceOSDs[0].BlockPath)
}
}
// orchestration is completed, update the status
status = oposd.OrchestrationStatus{OSDs: deviceOSDs, Status: oposd.OrchestrationStatusCompleted, PvcBackedOSD: agent.pvcBacked}
oposd.UpdateNodeStatus(agent.kv, agent.nodeName, status)
return nil
}
func getAvailableDevices(context *clusterd.Context, agent *OsdAgent) (*DeviceOsdMapping, error) {
desiredDevices := agent.devices
logger.Debugf("desiredDevices are %+v", desiredDevices)
logger.Debug("context.Devices are:")
for _, disk := range context.Devices {
logger.Debugf("%+v", disk)
}
available := &DeviceOsdMapping{Entries: map[string]*DeviceOsdIDEntry{}}
for _, device := range context.Devices {
// Ignore 'dm' device since they are not handled by c-v properly
// see: https://tracker.ceph.com/issues/43209
if strings.HasPrefix(device.Name, sys.DeviceMapperPrefix) && device.Type == sys.LVMType {
logger.Infof("skipping 'dm' device %q", device.Name)
continue
}
// Ignore device with filesystem signature since c-v inventory
// cannot detect that correctly
// see: https://tracker.ceph.com/issues/43585
if device.Filesystem != "" {
// Allow further inspection of that device before skipping it
if device.Filesystem == "crypto_LUKS" && agent.pvcBacked {
if isCephEncryptedBlock(context, agent.clusterInfo.FSID, device.Name) {
logger.Infof("encrypted disk %q is an OSD part of this cluster, considering it", device.Name)
}
} else {
logger.Infof("skipping device %q because it contains a filesystem %q", device.Name, device.Filesystem)
continue
}
}
// If we detect a partition we have to make sure that ceph-volume will be able to consume it
// ceph-volume version 14.2.8 has the right code to support partitions
if device.Type == sys.PartType {
if !agent.clusterInfo.CephVersion.IsAtLeast(cephVolumeRawModeMinCephVersion) {
logger.Infof("skipping device %q because it is a partition and ceph version is too old, you need at least ceph %q", device.Name, cephVolumeRawModeMinCephVersion.String())
continue
}
device, err := clusterd.PopulateDeviceUdevInfo(device.Name, context.Executor, device)
if err != nil {
logger.Errorf("failed to get udev info of partition %q. %v", device.Name, err)
continue
}
}
// Check if the desired device is available
//
// We need to use the /dev path, provided by the NAME property from "lsblk --paths",
// especially when running on PVC and/or on dm device
// When running on PVC we use the real device name instead of the Kubernetes mountpoint
// When running on dm device we use the dm device name like "/dev/mapper/foo" instead of "/dev/dm-1"
// Otherwise ceph-volume inventory will fail on the udevadm check
// udevadm does not support device path different than /dev or /sys
//
// So earlier lsblk extracted the '/dev' path, hence the device.Name property
// device.Name can be 'xvdca', later this is formatted to '/dev/xvdca'
var err error
var isAvailable bool
rejectedReason := ""
if agent.pvcBacked {
block := fmt.Sprintf("/mnt/%s", agent.nodeName)
rawOsds, err := GetCephVolumeRawOSDs(context, agent.clusterInfo, agent.clusterInfo.FSID, block, agent.metadataDevice, "", false, true)
if err != nil {
isAvailable = false
rejectedReason = fmt.Sprintf("failed to detect if there is already an osd. %v", err)
} else if len(rawOsds) > 0 {
isAvailable = false
rejectedReason = "already in use by a raw OSD, no need to reconfigure"
} else {
isAvailable = true
}
} else {
isAvailable, rejectedReason, err = sys.CheckIfDeviceAvailable(context.Executor, device.RealPath, agent.pvcBacked)
if err != nil {
isAvailable = false
rejectedReason = fmt.Sprintf("failed to check if the device %q is available. %v", device.Name, err)
}
}
if !isAvailable {
logger.Infof("skipping device %q: %s.", device.Name, rejectedReason)
continue
} else {
logger.Infof("device %q is available.", device.Name)
}
var deviceInfo *DeviceOsdIDEntry
if agent.metadataDevice != "" && agent.metadataDevice == device.Name {
// current device is desired as the metadata device
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Metadata: []int{}}
} else if len(desiredDevices) == 1 && desiredDevices[0].Name == "all" {
// user has specified all devices, use the current one for data
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID}
} else if len(desiredDevices) > 0 {
var matched bool
var matchedDevice DesiredDevice
for _, desiredDevice := range desiredDevices {
if desiredDevice.IsFilter {
// the desired devices is a regular expression
matched, err = regexp.Match(desiredDevice.Name, []byte(device.Name))
if err != nil {
logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err)
continue
}
if matched {
logger.Infof("device %q matches device filter %q", device.Name, desiredDevice.Name)
}
} else if desiredDevice.IsDevicePathFilter {
pathnames := append(strings.Fields(device.DevLinks), filepath.Join("/dev", device.Name))
for _, pathname := range pathnames {
matched, err = regexp.Match(desiredDevice.Name, []byte(pathname))
if err != nil {
logger.Errorf("regex failed on device %q and filter %q. %v", device.Name, desiredDevice.Name, err)
continue
}
if matched {
logger.Infof("device %q (aliases: %q) matches device path filter %q", device.Name, device.DevLinks, desiredDevice.Name)
break
}
}
} else if device.Name == desiredDevice.Name {
logger.Infof("%q found in the desired devices", device.Name)
matched = true
} else if strings.HasPrefix(desiredDevice.Name, "/dev/") {
devLinks := strings.Split(device.DevLinks, " ")
for _, link := range devLinks {
if link == desiredDevice.Name {
logger.Infof("%q found in the desired devices (matched by link: %q)", device.Name, link)
matched = true
break
}
}
}
matchedDevice = desiredDevice
if matchedDevice.DeviceClass == "" {
classNotSet := true
if agent.pvcBacked {
crushDeviceClass := os.Getenv(oposd.CrushDeviceClassVarName)
if crushDeviceClass != "" {
matchedDevice.DeviceClass = crushDeviceClass
classNotSet = false
}
}
if classNotSet {
matchedDevice.DeviceClass = sys.GetDiskDeviceClass(device)
}
}
if matched {
break
}
}
if err == nil && matched {
// the current device matches the user specifies filter/list, use it for data
logger.Infof("device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Data: unassignedOSDID, Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks)}
// set that this is not an OSD but a metadata device
if device.Type == pvcMetadataTypeDevice {
logger.Infof("metadata device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{1}}
}
// set that this is not an OSD but a wal device
if device.Type == pvcWalTypeDevice {
logger.Infof("wal device %q is selected by the device filter/name %q", device.Name, matchedDevice.Name)
deviceInfo = &DeviceOsdIDEntry{Config: matchedDevice, PersistentDevicePaths: strings.Fields(device.DevLinks), Metadata: []int{2}}
}
} else {
logger.Infof("skipping device %q that does not match the device filter/list (%v). %v", device.Name, desiredDevices, err)
}
} else {
logger.Infof("skipping device %q until the admin specifies it can be used by an osd", device.Name)
}
if deviceInfo != nil {
// When running on PVC, we typically have a single device only
// So it's fine to name the first entry of the map "data" instead of the PVC name
// It is particularly useful when a metadata PVC is used because we need to identify it in the map
// So the entry must be named "metadata" so it can accessed later
if agent.pvcBacked {
if device.Type == pvcDataTypeDevice {
available.Entries[pvcDataTypeDevice] = deviceInfo
} else if device.Type == pvcMetadataTypeDevice {
available.Entries[pvcMetadataTypeDevice] = deviceInfo
} else if device.Type == pvcWalTypeDevice {
available.Entries[pvcWalTypeDevice] = deviceInfo
}
} else { | }
}
return available, nil
}
// releaseLVMDevice deactivates the LV to release the device.
func releaseLVMDevice(context *clusterd.Context, volumeGroupName string) error {
if op, err := context.Executor.ExecuteCommandWithCombinedOutput("lvchange", "-an", "-vv", volumeGroupName); err != nil {
return errors.Wrapf(err, "failed to deactivate LVM %s. output: %s", volumeGroupName, op)
}
logger.Info("successfully released device from lvm")
return nil
}
// getVolumeGroupName returns the Volume group name from the given Logical Volume Path
func getVolumeGroupName(lvPath string) string {
vgSlice := strings.Split(lvPath, "/")
// Assert that lvpath is in correct format `/dev/<vg name>/<lv name>` before extracting the vg name
if len(vgSlice) != 4 || vgSlice[2] == "" {
logger.Warningf("invalid LV Path: %q", lvPath)
return ""
}
return vgSlice[2]
} | available.Entries[device.Name] = deviceInfo
} |
syslog.go | // Package syslog provides the logdriver for forwarding server logs to syslog endpoints.
package syslog
import (
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
"os"
"strconv"
"strings"
"time"
syslog "github.com/RackSec/srslog"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/logger/loggerutils"
"github.com/docker/docker/pkg/urlutil"
"github.com/docker/go-connections/tlsconfig"
)
const (
name = "syslog"
secureProto = "tcp+tls"
)
var facilities = map[string]syslog.Priority{
"kern": syslog.LOG_KERN,
"user": syslog.LOG_USER,
"mail": syslog.LOG_MAIL,
"daemon": syslog.LOG_DAEMON,
"auth": syslog.LOG_AUTH,
"syslog": syslog.LOG_SYSLOG,
"lpr": syslog.LOG_LPR,
"news": syslog.LOG_NEWS,
"uucp": syslog.LOG_UUCP,
"cron": syslog.LOG_CRON,
"authpriv": syslog.LOG_AUTHPRIV,
"ftp": syslog.LOG_FTP,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
type syslogger struct {
writer *syslog.Writer
}
func init() {
if err := logger.RegisterLogDriver(name, New); err != nil {
logrus.Fatal(err)
}
if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil {
logrus.Fatal(err)
}
}
// rsyslog uses appname part of syslog message to fill in an %syslogtag% template
// attribute in rsyslog.conf. In order to be backward compatible to rfc3164
// tag will be also used as an appname
func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string {
timestamp := time.Now().Format(time.RFC3339)
pid := os.Getpid()
msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s",
p, 1, timestamp, hostname, tag, pid, tag, content)
return msg
}
// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances
// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium
// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution)
func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string {
timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00")
pid := os.Getpid()
msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s",
p, 1, timestamp, hostname, tag, pid, tag, content)
return msg
}
// New creates a syslog logger using the configuration passed in on
// the context. Supported context configuration variables are
// syslog-address, syslog-facility, syslog-format.
func New(info logger.Info) (logger.Logger, error) {
tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate)
if err != nil {
return nil, err
}
proto, address, err := parseAddress(info.Config["syslog-address"])
if err != nil {
return nil, err
}
facility, err := parseFacility(info.Config["syslog-facility"])
if err != nil {
return nil, err
}
syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto)
if err != nil {
return nil, err
}
var log *syslog.Writer
if proto == secureProto {
tlsConfig, tlsErr := parseTLSConfig(info.Config)
if tlsErr != nil {
return nil, tlsErr
}
log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig)
} else {
log, err = syslog.Dial(proto, address, facility, tag)
}
if err != nil {
return nil, err
}
log.SetFormatter(syslogFormatter)
log.SetFramer(syslogFramer)
return &syslogger{
writer: log,
}, nil
}
func (s *syslogger) Log(msg *logger.Message) error {
if msg.Source == "stderr" {
return s.writer.Err(string(msg.Line))
}
return s.writer.Info(string(msg.Line))
}
func (s *syslogger) Close() error {
return s.writer.Close()
}
func (s *syslogger) Name() string {
return name
}
func parseAddress(address string) (string, string, error) {
if address == "" {
return "", "", nil
}
if !urlutil.IsTransportURL(address) {
return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address)
}
url, err := url.Parse(address)
if err != nil {
return "", "", err
}
// unix and unixgram socket validation
if url.Scheme == "unix" || url.Scheme == "unixgram" {
if _, err := os.Stat(url.Path); err != nil {
return "", "", err
}
return url.Scheme, url.Path, nil
}
// here we process tcp|udp
host := url.Host
if _, _, err := net.SplitHostPort(host); err != nil {
if !strings.Contains(err.Error(), "missing port in address") {
return "", "", err
}
host = host + ":514"
}
return url.Scheme, host, nil
}
| // ValidateLogOpt looks for syslog specific log options
// syslog-address, syslog-facility.
func ValidateLogOpt(cfg map[string]string) error {
for key := range cfg {
switch key {
case "env":
case "labels":
case "syslog-address":
case "syslog-facility":
case "syslog-tls-ca-cert":
case "syslog-tls-cert":
case "syslog-tls-key":
case "syslog-tls-skip-verify":
case "tag":
case "syslog-format":
default:
return fmt.Errorf("unknown log opt '%s' for syslog log driver", key)
}
}
if _, _, err := parseAddress(cfg["syslog-address"]); err != nil {
return err
}
if _, err := parseFacility(cfg["syslog-facility"]); err != nil {
return err
}
if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil {
return err
}
return nil
}
func parseFacility(facility string) (syslog.Priority, error) {
if facility == "" {
return syslog.LOG_DAEMON, nil
}
if syslogFacility, valid := facilities[facility]; valid {
return syslogFacility, nil
}
fInt, err := strconv.Atoi(facility)
if err == nil && 0 <= fInt && fInt <= 23 {
return syslog.Priority(fInt << 3), nil
}
return syslog.Priority(0), errors.New("invalid syslog facility")
}
func parseTLSConfig(cfg map[string]string) (*tls.Config, error) {
_, skipVerify := cfg["syslog-tls-skip-verify"]
opts := tlsconfig.Options{
CAFile: cfg["syslog-tls-ca-cert"],
CertFile: cfg["syslog-tls-cert"],
KeyFile: cfg["syslog-tls-key"],
InsecureSkipVerify: skipVerify,
}
return tlsconfig.Client(opts)
}
func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) {
switch logFormat {
case "":
return syslog.UnixFormatter, syslog.DefaultFramer, nil
case "rfc3164":
return syslog.RFC3164Formatter, syslog.DefaultFramer, nil
case "rfc5424":
if proto == secureProto {
return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil
}
return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil
case "rfc5424micro":
if proto == secureProto {
return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil
}
return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil
default:
return nil, nil, errors.New("Invalid syslog format")
}
} | |
cats.controller.ts | import { Controller, Get, Post } from '@nestjs/common';
import { Request } from 'express';
@Controller('cats')
export class | {
@Post()
create(): string {
return '๋๋ ๊ณ ์์ด';
}
@Get()
findAll(): string {
return '๋๋ ์ผ์์ด!';
}
}
| CatsController |
generated.rs | // =================================================================
//
// * WARNING *
//
// This file is generated!
//
// Changes made to this file will be overwritten. If changes are
// required to the generated code, the service_crategen project
// must be updated to generate the changes.
//
// =================================================================
use std::error::Error;
use std::fmt;
use async_trait::async_trait;
use rusoto_core::credential::ProvideAwsCredentials;
use rusoto_core::region;
use rusoto_core::request::{BufferedHttpResponse, DispatchSignedRequest};
use rusoto_core::{Client, RusotoError};
use rusoto_core::param::{Params, ServiceParams};
use rusoto_core::proto;
use rusoto_core::signature::SignedRequest;
#[allow(unused_imports)]
use serde::{Deserialize, Serialize};
use serde_json;
/// <p>A request to add outputs to the specified flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AddFlowOutputsRequest {
/// <p>The flow that you want to add outputs to.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
/// <p>A list of outputs that you want to add.</p>
#[serde(rename = "Outputs")]
pub outputs: Vec<AddOutputRequest>,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct AddFlowOutputsResponse {
/// <p>The ARN of the flow that these outputs were added to.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
/// <p>The details of the newly added outputs.</p>
#[serde(rename = "Outputs")]
#[serde(skip_serializing_if = "Option::is_none")]
pub outputs: Option<Vec<Output>>,
}
/// <p>The output that you want to add to this flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct AddOutputRequest {
/// <p>The range of IP addresses that should be allowed to initiate output requests to this flow. These IP addresses should be in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.</p>
#[serde(rename = "CidrAllowList")]
#[serde(skip_serializing_if = "Option::is_none")]
pub cidr_allow_list: Option<Vec<String>>,
/// <p>A description of the output. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the end user.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The IP address from which video will be sent to output destinations.</p>
#[serde(rename = "Destination")]
#[serde(skip_serializing_if = "Option::is_none")]
pub destination: Option<String>,
/// <p>The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).</p>
#[serde(rename = "Encryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
/// <p>The maximum latency in milliseconds for Zixi-based streams.</p>
#[serde(rename = "MaxLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_latency: Option<i64>,
/// <p>The name of the output. This value must be unique within the current flow.</p>
#[serde(rename = "Name")]
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// <p>The port to use when content is distributed to this output.</p>
#[serde(rename = "Port")]
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<i64>,
/// <p>The protocol to use for the output.</p>
#[serde(rename = "Protocol")]
pub protocol: String,
/// <p>The remote ID for the Zixi-pull output stream.</p>
#[serde(rename = "RemoteId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub remote_id: Option<String>,
/// <p>The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams.</p>
#[serde(rename = "SmoothingLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub smoothing_latency: Option<i64>,
/// <p>The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.</p>
#[serde(rename = "StreamId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub stream_id: Option<String>,
}
/// <p>Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50).</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct CreateFlowRequest {
/// <p>The Availability Zone that you want to create the flow in. These options are limited to the Availability Zones within the current AWS Region.</p>
#[serde(rename = "AvailabilityZone")]
#[serde(skip_serializing_if = "Option::is_none")]
pub availability_zone: Option<String>,
/// <p>The entitlements that you want to grant on a flow.</p>
#[serde(rename = "Entitlements")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlements: Option<Vec<GrantEntitlementRequest>>,
/// <p>The name of the flow.</p>
#[serde(rename = "Name")]
pub name: String,
/// <p>The outputs that you want to add to this flow.</p>
#[serde(rename = "Outputs")]
#[serde(skip_serializing_if = "Option::is_none")]
pub outputs: Option<Vec<AddOutputRequest>>,
#[serde(rename = "Source")]
pub source: SetSourceRequest,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct CreateFlowResponse {
#[serde(rename = "Flow")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow: Option<Flow>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct DeleteFlowRequest {
/// <p>The ARN of the flow that you want to delete.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct DeleteFlowResponse {
/// <p>The ARN of the flow that was deleted.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
/// <p>The status of the flow when the DeleteFlow process begins.</p>
#[serde(rename = "Status")]
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct DescribeFlowRequest {
/// <p>The ARN of the flow that you want to describe.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct DescribeFlowResponse {
#[serde(rename = "Flow")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow: Option<Flow>,
#[serde(rename = "Messages")]
#[serde(skip_serializing_if = "Option::is_none")]
pub messages: Option<Messages>,
}
/// <p>Information about the encryption of the flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct Encryption {
/// <p>The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).</p>
#[serde(rename = "Algorithm")]
pub algorithm: String,
/// <p>A 128-bit, 16-byte hex value represented by a 32-character string, to be used with the key for encrypting content. This parameter is not valid for static key encryption.</p>
#[serde(rename = "ConstantInitializationVector")]
#[serde(skip_serializing_if = "Option::is_none")]
pub constant_initialization_vector: Option<String>,
/// <p>The value of one of the devices that you configured with your digital rights management (DRM) platform key provider. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "DeviceId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub device_id: Option<String>,
/// <p>The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).</p>
#[serde(rename = "KeyType")]
#[serde(skip_serializing_if = "Option::is_none")]
pub key_type: Option<String>,
/// <p>The AWS Region that the API Gateway proxy endpoint was created in. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "Region")]
#[serde(skip_serializing_if = "Option::is_none")]
pub region: Option<String>,
/// <p>An identifier for the content. The service sends this value to the key server to identify the current endpoint. The resource ID is also known as the content ID. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "ResourceId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
/// <p>The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).</p>
#[serde(rename = "RoleArn")]
pub role_arn: String,
/// <p>The ARN of the secret that you created in AWS Secrets Manager to store the encryption key. This parameter is required for static key encryption and is not valid for SPEKE encryption.</p>
#[serde(rename = "SecretArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub secret_arn: Option<String>,
/// <p>The URL from the API Gateway proxy that you set up to talk to your key server. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "Url")]
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
}
/// <p>The settings for a flow entitlement.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct Entitlement {
/// <p>Percentage from 0-100 of the data transfer cost to be billed to the subscriber.</p>
#[serde(rename = "DataTransferSubscriberFeePercent")]
#[serde(skip_serializing_if = "Option::is_none")]
pub data_transfer_subscriber_fee_percent: Option<i64>,
/// <p>A description of the entitlement.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The type of encryption that will be used on the output that is associated with this entitlement.</p>
#[serde(rename = "Encryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
/// <p>The ARN of the entitlement.</p>
#[serde(rename = "EntitlementArn")]
pub entitlement_arn: String,
/// <p>The name of the entitlement.</p>
#[serde(rename = "Name")]
pub name: String,
/// <p>The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flow using your content as the source.</p>
#[serde(rename = "Subscribers")]
pub subscribers: Vec<String>,
}
/// <p>The settings for a flow, including its source, outputs, and entitlements.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct Flow {
/// <p>The Availability Zone that you want to create the flow in. These options are limited to the Availability Zones within the current AWS.</p>
#[serde(rename = "AvailabilityZone")]
pub availability_zone: String,
/// <p>A description of the flow. This value is not used or seen outside of the current AWS Elemental MediaConnect account.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The IP address from which video will be sent to output destinations.</p>
#[serde(rename = "EgressIp")]
#[serde(skip_serializing_if = "Option::is_none")]
pub egress_ip: Option<String>,
/// <p>The entitlements in this flow.</p>
#[serde(rename = "Entitlements")]
pub entitlements: Vec<Entitlement>,
/// <p>The Amazon Resource Name (ARN), a unique identifier for any AWS resource, of the flow.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
/// <p>The name of the flow.</p>
#[serde(rename = "Name")]
pub name: String,
/// <p>The outputs in this flow.</p>
#[serde(rename = "Outputs")]
pub outputs: Vec<Output>,
#[serde(rename = "Source")]
pub source: Source,
/// <p>The current status of the flow.</p>
#[serde(rename = "Status")]
pub status: String,
}
/// <p>The entitlements that you want to grant on a flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct GrantEntitlementRequest {
/// <p>Percentage from 0-100 of the data transfer cost to be billed to the subscriber.</p>
#[serde(rename = "DataTransferSubscriberFeePercent")]
#[serde(skip_serializing_if = "Option::is_none")]
pub data_transfer_subscriber_fee_percent: Option<i64>,
/// <p>A description of the entitlement. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the subscriber or end user.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The type of encryption that will be used on the output that is associated with this entitlement.</p>
#[serde(rename = "Encryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
/// <p>The name of the entitlement. This value must be unique within the current flow.</p>
#[serde(rename = "Name")]
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// <p>The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flows using your content as the source.</p>
#[serde(rename = "Subscribers")]
pub subscribers: Vec<String>,
}
/// <p>A request to grant entitlements on a flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct GrantFlowEntitlementsRequest {
/// <p>The list of entitlements that you want to grant.</p>
#[serde(rename = "Entitlements")]
pub entitlements: Vec<GrantEntitlementRequest>,
/// <p>The flow that you want to grant entitlements on.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct GrantFlowEntitlementsResponse {
/// <p>The entitlements that were just granted.</p>
#[serde(rename = "Entitlements")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlements: Option<Vec<Entitlement>>,
/// <p>The ARN of the flow that these entitlements were granted to.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct ListEntitlementsRequest {
/// <p>The maximum number of results to return per API request. For example, you submit a ListEntitlements request with MaxResults set at 5. Although 20 items match your request, the service returns no more than the first 5 items. (The service also returns a NextToken value that you can use to fetch the next batch of results.) The service might return fewer results than the MaxResults value. If MaxResults is not included in the request, the service defaults to pagination with a maximum of 20 results per page.</p>
#[serde(rename = "MaxResults")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_results: Option<i64>,
/// <p>The token that identifies which batch of results that you want to see. For example, you submit a ListEntitlements request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListEntitlements request a second time and specify the NextToken value.</p>
#[serde(rename = "NextToken")]
#[serde(skip_serializing_if = "Option::is_none")]
pub next_token: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct ListEntitlementsResponse {
/// <p>A list of entitlements that have been granted to you from other AWS accounts.</p>
#[serde(rename = "Entitlements")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlements: Option<Vec<ListedEntitlement>>,
/// <p>The token that identifies which batch of results that you want to see. For example, you submit a ListEntitlements request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListEntitlements request a second time and specify the NextToken value.</p>
#[serde(rename = "NextToken")]
#[serde(skip_serializing_if = "Option::is_none")]
pub next_token: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct ListFlowsRequest {
/// <p>The maximum number of results to return per API request. For example, you submit a ListFlows request with MaxResults set at 5. Although 20 items match your request, the service returns no more than the first 5 items. (The service also returns a NextToken value that you can use to fetch the next batch of results.) The service might return fewer results than the MaxResults value. If MaxResults is not included in the request, the service defaults to pagination with a maximum of 10 results per page.</p>
#[serde(rename = "MaxResults")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_results: Option<i64>,
/// <p>The token that identifies which batch of results that you want to see. For example, you submit a ListFlows request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListFlows request a second time and specify the NextToken value.</p>
#[serde(rename = "NextToken")]
#[serde(skip_serializing_if = "Option::is_none")]
pub next_token: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct ListFlowsResponse {
/// <p>A list of flow summaries.</p>
#[serde(rename = "Flows")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flows: Option<Vec<ListedFlow>>,
/// <p>The token that identifies which batch of results that you want to see. For example, you submit a ListFlows request with MaxResults set at 5. The service returns the first batch of results (up to 5) and a NextToken value. To see the next batch of results, you can submit the ListFlows request a second time and specify the NextToken value.</p>
#[serde(rename = "NextToken")]
#[serde(skip_serializing_if = "Option::is_none")]
pub next_token: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct ListTagsForResourceRequest {
/// <p>The Amazon Resource Name (ARN) that identifies the AWS Elemental MediaConnect resource for which to list the tags.</p>
#[serde(rename = "ResourceArn")]
pub resource_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct ListTagsForResourceResponse {
/// <p>A map from tag keys to values. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.</p>
#[serde(rename = "Tags")]
#[serde(skip_serializing_if = "Option::is_none")]
pub tags: Option<::std::collections::HashMap<String, String>>,
}
/// <p>An entitlement that has been granted to you from other AWS accounts.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct ListedEntitlement {
/// <p>Percentage from 0-100 of the data transfer cost to be billed to the subscriber.</p>
#[serde(rename = "DataTransferSubscriberFeePercent")]
#[serde(skip_serializing_if = "Option::is_none")]
pub data_transfer_subscriber_fee_percent: Option<i64>,
/// <p>The ARN of the entitlement.</p>
#[serde(rename = "EntitlementArn")]
pub entitlement_arn: String,
/// <p>The name of the entitlement.</p>
#[serde(rename = "EntitlementName")]
pub entitlement_name: String,
}
/// <p>Provides a summary of a flow, including its ARN, Availability Zone, and source type.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct ListedFlow {
/// <p>The Availability Zone that the flow was created in.</p>
#[serde(rename = "AvailabilityZone")]
pub availability_zone: String,
/// <p>A description of the flow.</p>
#[serde(rename = "Description")]
pub description: String,
/// <p>The ARN of the flow.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
/// <p>The name of the flow.</p>
#[serde(rename = "Name")]
pub name: String,
/// <p>The type of source. This value is either owned (originated somewhere other than an AWS Elemental MediaConnect flow owned by another AWS account) or entitled (originated at an AWS Elemental MediaConnect flow owned by another AWS account).</p>
#[serde(rename = "SourceType")]
pub source_type: String,
/// <p>The current status of the flow.</p>
#[serde(rename = "Status")]
pub status: String,
}
/// <p>Messages that provide the state of the flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct Messages {
/// <p>A list of errors that might have been generated from processes on this flow.</p>
#[serde(rename = "Errors")]
pub errors: Vec<String>,
}
/// <p>The settings for an output.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct Output {
/// <p>Percentage from 0-100 of the data transfer cost to be billed to the subscriber.</p>
#[serde(rename = "DataTransferSubscriberFeePercent")]
#[serde(skip_serializing_if = "Option::is_none")]
pub data_transfer_subscriber_fee_percent: Option<i64>,
/// <p>A description of the output.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The address where you want to send the output.</p>
#[serde(rename = "Destination")]
#[serde(skip_serializing_if = "Option::is_none")]
pub destination: Option<String>,
/// <p>The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).</p>
#[serde(rename = "Encryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
/// <p>The ARN of the entitlement on the originator''s flow. This value is relevant only on entitled flows.</p>
#[serde(rename = "EntitlementArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlement_arn: Option<String>,
/// <p>The input ARN of the AWS Elemental MediaLive channel. This parameter is relevant only for outputs that were added by creating a MediaLive input.</p>
#[serde(rename = "MediaLiveInputArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub media_live_input_arn: Option<String>,
/// <p>The name of the output. This value must be unique within the current flow.</p>
#[serde(rename = "Name")]
pub name: String,
/// <p>The ARN of the output.</p>
#[serde(rename = "OutputArn")]
pub output_arn: String,
/// <p>The port to use when content is distributed to this output.</p>
#[serde(rename = "Port")]
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<i64>,
/// <p>Attributes related to the transport stream that are used in the output.</p>
#[serde(rename = "Transport")]
#[serde(skip_serializing_if = "Option::is_none")]
pub transport: Option<Transport>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct RemoveFlowOutputRequest {
/// <p>The flow that you want to remove an output from.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
/// <p>The ARN of the output that you want to remove.</p>
#[serde(rename = "OutputArn")]
pub output_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct RemoveFlowOutputResponse {
/// <p>The ARN of the flow that is associated with the output you removed.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
/// <p>The ARN of the output that was removed.</p>
#[serde(rename = "OutputArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub output_arn: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct RevokeFlowEntitlementRequest {
/// <p>The ARN of the entitlement that you want to revoke.</p>
#[serde(rename = "EntitlementArn")]
pub entitlement_arn: String,
/// <p>The flow that you want to revoke an entitlement from.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct RevokeFlowEntitlementResponse {
/// <p>The ARN of the entitlement that was revoked.</p>
#[serde(rename = "EntitlementArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlement_arn: Option<String>,
/// <p>The ARN of the flow that the entitlement was revoked from.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
}
/// <p>The settings for the source of the flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct SetSourceRequest {
/// <p>The type of encryption that is used on the content ingested from this source.</p>
#[serde(rename = "Decryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub decryption: Option<Encryption>,
/// <p>A description for the source. This value is not used or seen outside of the current AWS Elemental MediaConnect account.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The ARN of the entitlement that allows you to subscribe to this flow. The entitlement is set by the flow originator, and the ARN is generated as part of the originator's flow.</p>
#[serde(rename = "EntitlementArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlement_arn: Option<String>,
/// <p>The port that the flow will be listening on for incoming content.</p>
#[serde(rename = "IngestPort")]
#[serde(skip_serializing_if = "Option::is_none")]
pub ingest_port: Option<i64>,
/// <p>The smoothing max bitrate for RIST, RTP, and RTP-FEC streams.</p>
#[serde(rename = "MaxBitrate")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_bitrate: Option<i64>,
/// <p>The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams.</p>
#[serde(rename = "MaxLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_latency: Option<i64>,
/// <p>The name of the source.</p>
#[serde(rename = "Name")]
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
/// <p>The protocol that is used by the source.</p>
#[serde(rename = "Protocol")]
#[serde(skip_serializing_if = "Option::is_none")]
pub protocol: Option<String>,
/// <p>The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.</p>
#[serde(rename = "StreamId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub stream_id: Option<String>,
/// <p>The range of IP addresses that should be allowed to contribute content to your source. These IP addresses should be in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.</p>
#[serde(rename = "WhitelistCidr")]
#[serde(skip_serializing_if = "Option::is_none")]
pub whitelist_cidr: Option<String>,
}
/// <p>The settings for the source of the flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct Source {
/// <p>Percentage from 0-100 of the data transfer cost to be billed to the subscriber.</p>
#[serde(rename = "DataTransferSubscriberFeePercent")]
#[serde(skip_serializing_if = "Option::is_none")]
pub data_transfer_subscriber_fee_percent: Option<i64>,
/// <p>The type of encryption that is used on the content ingested from this source.</p>
#[serde(rename = "Decryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub decryption: Option<Encryption>,
/// <p>A description for the source. This value is not used or seen outside of the current AWS Elemental MediaConnect account.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The ARN of the entitlement that allows you to subscribe to content that comes from another AWS account. The entitlement is set by the content originator and the ARN is generated as part of the originator's flow.</p>
#[serde(rename = "EntitlementArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlement_arn: Option<String>,
/// <p>The IP address that the flow will be listening on for incoming content.</p>
#[serde(rename = "IngestIp")]
#[serde(skip_serializing_if = "Option::is_none")]
pub ingest_ip: Option<String>,
/// <p>The port that the flow will be listening on for incoming content.</p>
#[serde(rename = "IngestPort")]
#[serde(skip_serializing_if = "Option::is_none")]
pub ingest_port: Option<i64>,
/// <p>The name of the source.</p>
#[serde(rename = "Name")]
pub name: String,
/// <p>The ARN of the source.</p>
#[serde(rename = "SourceArn")]
pub source_arn: String,
/// <p>Attributes related to the transport stream that are used in the source.</p>
#[serde(rename = "Transport")]
#[serde(skip_serializing_if = "Option::is_none")]
pub transport: Option<Transport>,
/// <p>The range of IP addresses that should be allowed to contribute content to your source. These IP addresses should be in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.</p>
#[serde(rename = "WhitelistCidr")]
#[serde(skip_serializing_if = "Option::is_none")]
pub whitelist_cidr: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct StartFlowRequest {
/// <p>The ARN of the flow that you want to start.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct StartFlowResponse {
/// <p>The ARN of the flow that you started.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
/// <p>The status of the flow when the StartFlow process begins.</p>
#[serde(rename = "Status")]
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct StopFlowRequest {
/// <p>The ARN of the flow that you want to stop.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct StopFlowResponse {
/// <p>The ARN of the flow that you stopped.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
/// <p>The status of the flow when the StopFlow process begins.</p>
#[serde(rename = "Status")]
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<String>,
}
/// <p>The tags to add to the resource. A tag is an array of key-value pairs. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct TagResourceRequest {
/// <p>The Amazon Resource Name (ARN) that identifies the AWS Elemental MediaConnect resource to which to add tags.</p>
#[serde(rename = "ResourceArn")]
pub resource_arn: String,
/// <p>A map from tag keys to values. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.</p>
#[serde(rename = "Tags")]
pub tags: ::std::collections::HashMap<String, String>,
}
/// <p>Attributes related to the transport stream that are used in a source or output.</p>
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct Transport {
/// <p>The range of IP addresses that should be allowed to initiate output requests to this flow. These IP addresses should be in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.</p>
#[serde(rename = "CidrAllowList")]
#[serde(skip_serializing_if = "Option::is_none")]
pub cidr_allow_list: Option<Vec<String>>,
/// <p>The smoothing max bitrate for RIST, RTP, and RTP-FEC streams.</p>
#[serde(rename = "MaxBitrate")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_bitrate: Option<i64>,
/// <p>The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams.</p>
#[serde(rename = "MaxLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_latency: Option<i64>,
/// <p>The protocol that is used by the source or output.</p>
#[serde(rename = "Protocol")]
pub protocol: String,
/// <p>The remote ID for the Zixi-pull stream.</p>
#[serde(rename = "RemoteId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub remote_id: Option<String>,
/// <p>The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams.</p>
#[serde(rename = "SmoothingLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub smoothing_latency: Option<i64>,
/// <p>The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.</p>
#[serde(rename = "StreamId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub stream_id: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct UntagResourceRequest {
/// <p>The Amazon Resource Name (ARN) that identifies the AWS Elemental MediaConnect resource from which to delete tags.</p>
#[serde(rename = "ResourceArn")]
pub resource_arn: String,
/// <p>The keys of the tags to be removed.</p>
#[serde(rename = "TagKeys")]
pub tag_keys: Vec<String>,
}
/// <p>Information about the encryption of the flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct UpdateEncryption {
/// <p>The type of algorithm that is used for the encryption (such as aes128, aes192, or aes256).</p>
#[serde(rename = "Algorithm")]
#[serde(skip_serializing_if = "Option::is_none")]
pub algorithm: Option<String>,
/// <p>A 128-bit, 16-byte hex value represented by a 32-character string, to be used with the key for encrypting content. This parameter is not valid for static key encryption.</p>
#[serde(rename = "ConstantInitializationVector")]
#[serde(skip_serializing_if = "Option::is_none")]
pub constant_initialization_vector: Option<String>,
/// <p>The value of one of the devices that you configured with your digital rights management (DRM) platform key provider. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "DeviceId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub device_id: Option<String>,
/// <p>The type of key that is used for the encryption. If no keyType is provided, the service will use the default setting (static-key).</p>
#[serde(rename = "KeyType")]
#[serde(skip_serializing_if = "Option::is_none")]
pub key_type: Option<String>,
/// <p>The AWS Region that the API Gateway proxy endpoint was created in. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "Region")]
#[serde(skip_serializing_if = "Option::is_none")]
pub region: Option<String>,
/// <p>An identifier for the content. The service sends this value to the key server to identify the current endpoint. The resource ID is also known as the content ID. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "ResourceId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
/// <p>The ARN of the role that you created during setup (when you set up AWS Elemental MediaConnect as a trusted entity).</p>
#[serde(rename = "RoleArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub role_arn: Option<String>,
/// <p>The ARN of the secret that you created in AWS Secrets Manager to store the encryption key. This parameter is required for static key encryption and is not valid for SPEKE encryption.</p>
#[serde(rename = "SecretArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub secret_arn: Option<String>,
/// <p>The URL from the API Gateway proxy that you set up to talk to your key server. This parameter is required for SPEKE encryption and is not valid for static key encryption.</p>
#[serde(rename = "Url")]
#[serde(skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
}
/// <p>The entitlement fields that you want to update.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct UpdateFlowEntitlementRequest {
/// <p>A description of the entitlement. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the subscriber or end user.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The type of encryption that will be used on the output associated with this entitlement.</p>
#[serde(rename = "Encryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<UpdateEncryption>,
/// <p>The ARN of the entitlement that you want to update.</p>
#[serde(rename = "EntitlementArn")]
pub entitlement_arn: String,
/// <p>The flow that is associated with the entitlement that you want to update.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
/// <p>The AWS account IDs that you want to share your content with. The receiving accounts (subscribers) will be allowed to create their own flow using your content as the source.</p>
#[serde(rename = "Subscribers")]
#[serde(skip_serializing_if = "Option::is_none")]
pub subscribers: Option<Vec<String>>,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct UpdateFlowEntitlementResponse {
#[serde(rename = "Entitlement")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlement: Option<Entitlement>,
/// <p>The ARN of the flow that this entitlement was granted on.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
}
/// <p>The fields that you want to update in the output.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct UpdateFlowOutputRequest {
/// <p>The range of IP addresses that should be allowed to initiate output requests to this flow. These IP addresses should be in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.</p>
#[serde(rename = "CidrAllowList")]
#[serde(skip_serializing_if = "Option::is_none")]
pub cidr_allow_list: Option<Vec<String>>,
/// <p>A description of the output. This description appears only on the AWS Elemental MediaConnect console and will not be seen by the end user.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The IP address where you want to send the output.</p>
#[serde(rename = "Destination")]
#[serde(skip_serializing_if = "Option::is_none")]
pub destination: Option<String>,
/// <p>The type of key used for the encryption. If no keyType is provided, the service will use the default setting (static-key).</p>
#[serde(rename = "Encryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub encryption: Option<UpdateEncryption>,
/// <p>The flow that is associated with the output that you want to update.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
/// <p>The maximum latency in milliseconds for Zixi-based streams.</p>
#[serde(rename = "MaxLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_latency: Option<i64>,
/// <p>The ARN of the output that you want to update.</p>
#[serde(rename = "OutputArn")]
pub output_arn: String,
/// <p>The port to use when content is distributed to this output.</p>
#[serde(rename = "Port")]
#[serde(skip_serializing_if = "Option::is_none")]
pub port: Option<i64>,
/// <p>The protocol to use for the output.</p>
#[serde(rename = "Protocol")]
#[serde(skip_serializing_if = "Option::is_none")]
pub protocol: Option<String>,
/// <p>The remote ID for the Zixi-pull stream.</p>
#[serde(rename = "RemoteId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub remote_id: Option<String>,
/// <p>The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams.</p>
#[serde(rename = "SmoothingLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub smoothing_latency: Option<i64>,
/// <p>The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.</p>
#[serde(rename = "StreamId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub stream_id: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct UpdateFlowOutputResponse {
/// <p>The ARN of the flow that is associated with the updated output.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
#[serde(rename = "Output")]
#[serde(skip_serializing_if = "Option::is_none")]
pub output: Option<Output>,
}
/// <p>A request to update the source of a flow.</p>
#[derive(Default, Debug, Clone, PartialEq, Serialize)]
#[cfg_attr(feature = "deserialize_structs", derive(Deserialize))]
pub struct UpdateFlowSourceRequest {
/// <p>The type of encryption used on the content ingested from this source.</p>
#[serde(rename = "Decryption")]
#[serde(skip_serializing_if = "Option::is_none")]
pub decryption: Option<UpdateEncryption>,
/// <p>A description for the source. This value is not used or seen outside of the current AWS Elemental MediaConnect account.</p>
#[serde(rename = "Description")]
#[serde(skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
/// <p>The ARN of the entitlement that allows you to subscribe to this flow. The entitlement is set by the flow originator, and the ARN is generated as part of the originator's flow.</p>
#[serde(rename = "EntitlementArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub entitlement_arn: Option<String>,
/// <p>The flow that is associated with the source that you want to update.</p>
#[serde(rename = "FlowArn")]
pub flow_arn: String,
/// <p>The port that the flow will be listening on for incoming content.</p>
#[serde(rename = "IngestPort")]
#[serde(skip_serializing_if = "Option::is_none")]
pub ingest_port: Option<i64>,
/// <p>The smoothing max bitrate for RIST, RTP, and RTP-FEC streams.</p>
#[serde(rename = "MaxBitrate")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_bitrate: Option<i64>,
/// <p>The maximum latency in milliseconds. This parameter applies only to RIST-based and Zixi-based streams.</p>
#[serde(rename = "MaxLatency")]
#[serde(skip_serializing_if = "Option::is_none")]
pub max_latency: Option<i64>,
/// <p>The protocol that is used by the source.</p>
#[serde(rename = "Protocol")]
#[serde(skip_serializing_if = "Option::is_none")]
pub protocol: Option<String>,
/// <p>The ARN of the source that you want to update.</p>
#[serde(rename = "SourceArn")]
pub source_arn: String,
/// <p>The stream ID that you want to use for this transport. This parameter applies only to Zixi-based streams.</p>
#[serde(rename = "StreamId")]
#[serde(skip_serializing_if = "Option::is_none")]
pub stream_id: Option<String>,
/// <p>The range of IP addresses that should be allowed to contribute content to your source. These IP addresses should be in the form of a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16.</p>
#[serde(rename = "WhitelistCidr")]
#[serde(skip_serializing_if = "Option::is_none")]
pub whitelist_cidr: Option<String>,
}
#[derive(Default, Debug, Clone, PartialEq, Deserialize)]
#[cfg_attr(any(test, feature = "serialize_structs"), derive(Serialize))]
pub struct UpdateFlowSourceResponse {
/// <p>The ARN of the flow that you want to update.</p>
#[serde(rename = "FlowArn")]
#[serde(skip_serializing_if = "Option::is_none")]
pub flow_arn: Option<String>,
/// <p>The settings for the source of the flow.</p>
#[serde(rename = "Source")]
#[serde(skip_serializing_if = "Option::is_none")]
pub source: Option<Source>,
}
/// Errors returned by AddFlowOutputs
#[derive(Debug, PartialEq)]
pub enum AddFlowOutputsError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
AddFlowOutputs420(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl AddFlowOutputsError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<AddFlowOutputsError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"AddFlowOutputs420Exception" => {
return RusotoError::Service(AddFlowOutputsError::AddFlowOutputs420(err.msg))
}
"BadRequestException" => {
return RusotoError::Service(AddFlowOutputsError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(AddFlowOutputsError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(AddFlowOutputsError::InternalServerError(err.msg))
}
"NotFoundException" => {
return RusotoError::Service(AddFlowOutputsError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(AddFlowOutputsError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(AddFlowOutputsError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for AddFlowOutputsError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
AddFlowOutputsError::AddFlowOutputs420(ref cause) => write!(f, "{}", cause),
AddFlowOutputsError::BadRequest(ref cause) => write!(f, "{}", cause),
AddFlowOutputsError::Forbidden(ref cause) => write!(f, "{}", cause),
AddFlowOutputsError::InternalServerError(ref cause) => write!(f, "{}", cause),
AddFlowOutputsError::NotFound(ref cause) => write!(f, "{}", cause),
AddFlowOutputsError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
AddFlowOutputsError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for AddFlowOutputsError {}
/// Errors returned by CreateFlow
#[derive(Debug, PartialEq)]
pub enum CreateFlowError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
CreateFlow420(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl CreateFlowError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<CreateFlowError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(CreateFlowError::BadRequest(err.msg))
}
"CreateFlow420Exception" => {
return RusotoError::Service(CreateFlowError::CreateFlow420(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(CreateFlowError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(CreateFlowError::InternalServerError(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(CreateFlowError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(CreateFlowError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for CreateFlowError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
CreateFlowError::BadRequest(ref cause) => write!(f, "{}", cause),
CreateFlowError::CreateFlow420(ref cause) => write!(f, "{}", cause),
CreateFlowError::Forbidden(ref cause) => write!(f, "{}", cause),
CreateFlowError::InternalServerError(ref cause) => write!(f, "{}", cause),
CreateFlowError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
CreateFlowError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for CreateFlowError {}
/// Errors returned by DeleteFlow
#[derive(Debug, PartialEq)]
pub enum DeleteFlowError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl DeleteFlowError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DeleteFlowError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(DeleteFlowError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(DeleteFlowError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(DeleteFlowError::InternalServerError(err.msg))
}
"NotFoundException" => {
return RusotoError::Service(DeleteFlowError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(DeleteFlowError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(DeleteFlowError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for DeleteFlowError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DeleteFlowError::BadRequest(ref cause) => write!(f, "{}", cause),
DeleteFlowError::Forbidden(ref cause) => write!(f, "{}", cause),
DeleteFlowError::InternalServerError(ref cause) => write!(f, "{}", cause),
DeleteFlowError::NotFound(ref cause) => write!(f, "{}", cause),
DeleteFlowError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
DeleteFlowError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for DeleteFlowError {}
/// Errors returned by DescribeFlow
#[derive(Debug, PartialEq)]
pub enum DescribeFlowError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl DescribeFlowError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<DescribeFlowError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(DescribeFlowError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(DescribeFlowError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(DescribeFlowError::InternalServerError(err.msg))
}
"NotFoundException" => {
return RusotoError::Service(DescribeFlowError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(DescribeFlowError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(DescribeFlowError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for DescribeFlowError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
DescribeFlowError::BadRequest(ref cause) => write!(f, "{}", cause),
DescribeFlowError::Forbidden(ref cause) => write!(f, "{}", cause),
DescribeFlowError::InternalServerError(ref cause) => write!(f, "{}", cause),
DescribeFlowError::NotFound(ref cause) => write!(f, "{}", cause),
DescribeFlowError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
DescribeFlowError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for DescribeFlowError {}
/// Errors returned by GrantFlowEntitlements
#[derive(Debug, PartialEq)]
pub enum GrantFlowEntitlementsError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
GrantFlowEntitlements420(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl GrantFlowEntitlementsError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<GrantFlowEntitlementsError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(GrantFlowEntitlementsError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(GrantFlowEntitlementsError::Forbidden(err.msg))
}
"GrantFlowEntitlements420Exception" => {
return RusotoError::Service(
GrantFlowEntitlementsError::GrantFlowEntitlements420(err.msg),
)
}
"InternalServerErrorException" => {
return RusotoError::Service(GrantFlowEntitlementsError::InternalServerError(
err.msg,
))
}
"NotFoundException" => {
return RusotoError::Service(GrantFlowEntitlementsError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(GrantFlowEntitlementsError::ServiceUnavailable(
err.msg,
))
}
"TooManyRequestsException" => {
return RusotoError::Service(GrantFlowEntitlementsError::TooManyRequests(
err.msg,
))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for GrantFlowEntitlementsError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
GrantFlowEntitlementsError::BadRequest(ref cause) => write!(f, "{}", cause),
GrantFlowEntitlementsError::Forbidden(ref cause) => write!(f, "{}", cause),
GrantFlowEntitlementsError::GrantFlowEntitlements420(ref cause) => {
write!(f, "{}", cause)
}
GrantFlowEntitlementsError::InternalServerError(ref cause) => write!(f, "{}", cause),
GrantFlowEntitlementsError::NotFound(ref cause) => write!(f, "{}", cause),
GrantFlowEntitlementsError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
GrantFlowEntitlementsError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for GrantFlowEntitlementsError {}
/// Errors returned by ListEntitlements
#[derive(Debug, PartialEq)]
pub enum ListEntitlementsError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl ListEntitlementsError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListEntitlementsError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(ListEntitlementsError::BadRequest(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(ListEntitlementsError::InternalServerError(
err.msg,
))
}
"ServiceUnavailableException" => {
return RusotoError::Service(ListEntitlementsError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(ListEntitlementsError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for ListEntitlementsError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ListEntitlementsError::BadRequest(ref cause) => write!(f, "{}", cause),
ListEntitlementsError::InternalServerError(ref cause) => write!(f, "{}", cause),
ListEntitlementsError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
ListEntitlementsError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for ListEntitlementsError {}
/// Errors returned by ListFlows
#[derive(Debug, PartialEq)]
pub enum ListFlowsError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl ListFlowsError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListFlowsError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(ListFlowsError::BadRequest(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(ListFlowsError::InternalServerError(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(ListFlowsError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(ListFlowsError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for ListFlowsError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ListFlowsError::BadRequest(ref cause) => write!(f, "{}", cause),
ListFlowsError::InternalServerError(ref cause) => write!(f, "{}", cause),
ListFlowsError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
ListFlowsError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for ListFlowsError {}
/// Errors returned by ListTagsForResource
#[derive(Debug, PartialEq)]
pub enum ListTagsForResourceError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
}
impl ListTagsForResourceError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<ListTagsForResourceError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(ListTagsForResourceError::BadRequest(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(ListTagsForResourceError::InternalServerError(
err.msg,
))
}
"NotFoundException" => {
return RusotoError::Service(ListTagsForResourceError::NotFound(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for ListTagsForResourceError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ListTagsForResourceError::BadRequest(ref cause) => write!(f, "{}", cause),
ListTagsForResourceError::InternalServerError(ref cause) => write!(f, "{}", cause),
ListTagsForResourceError::NotFound(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for ListTagsForResourceError {}
/// Errors returned by RemoveFlowOutput
#[derive(Debug, PartialEq)]
pub enum RemoveFlowOutputError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl RemoveFlowOutputError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<RemoveFlowOutputError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(RemoveFlowOutputError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(RemoveFlowOutputError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(RemoveFlowOutputError::InternalServerError(
err.msg,
))
}
"NotFoundException" => {
return RusotoError::Service(RemoveFlowOutputError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(RemoveFlowOutputError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(RemoveFlowOutputError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for RemoveFlowOutputError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl Error for RemoveFlowOutputError {}
/// Errors returned by RevokeFlowEntitlement
#[derive(Debug, PartialEq)]
pub enum RevokeFlowEntitlementError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl RevokeFlowEntitlementError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<RevokeFlowEntitlementError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(RevokeFlowEntitlementError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(RevokeFlowEntitlementError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(RevokeFlowEntitlementError::InternalServerError(
err.msg,
))
}
"NotFoundException" => {
return RusotoError::Service(RevokeFlowEntitlementError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(RevokeFlowEntitlementError::ServiceUnavailable(
err.msg,
))
}
"TooManyRequestsException" => {
return RusotoError::Service(RevokeFlowEntitlementError::TooManyRequests(
err.msg,
))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for RevokeFlowEntitlementError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
RevokeFlowEntitlementError::BadRequest(ref cause) => write!(f, "{}", cause),
RevokeFlowEntitlementError::Forbidden(ref cause) => write!(f, "{}", cause),
RevokeFlowEntitlementError::InternalServerError(ref cause) => write!(f, "{}", cause),
RevokeFlowEntitlementError::NotFound(ref cause) => write!(f, "{}", cause),
RevokeFlowEntitlementError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
RevokeFlowEntitlementError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for RevokeFlowEntitlementError {}
/// Errors returned by StartFlow
#[derive(Debug, PartialEq)]
pub enum StartFlowError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl StartFlowError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<StartFlowError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(StartFlowError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(StartFlowError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(StartFlowError::InternalServerError(err.msg))
}
"NotFoundException" => {
return RusotoError::Service(StartFlowError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(StartFlowError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(StartFlowError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for StartFlowError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
StartFlowError::BadRequest(ref cause) => write!(f, "{}", cause),
StartFlowError::Forbidden(ref cause) => write!(f, "{}", cause),
StartFlowError::InternalServerError(ref cause) => write!(f, "{}", cause),
StartFlowError::NotFound(ref cause) => write!(f, "{}", cause),
StartFlowError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
StartFlowError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for StartFlowError {}
/// Errors returned by StopFlow
#[derive(Debug, PartialEq)]
pub enum StopFlowError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl StopFlowError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<StopFlowError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(StopFlowError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(StopFlowError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(StopFlowError::InternalServerError(err.msg))
}
"NotFoundException" => {
return RusotoError::Service(StopFlowError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(StopFlowError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(StopFlowError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for StopFlowError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
StopFlowError::BadRequest(ref cause) => write!(f, "{}", cause),
StopFlowError::Forbidden(ref cause) => write!(f, "{}", cause),
StopFlowError::InternalServerError(ref cause) => write!(f, "{}", cause),
StopFlowError::NotFound(ref cause) => write!(f, "{}", cause),
StopFlowError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
StopFlowError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for StopFlowError {}
/// Errors returned by TagResource
#[derive(Debug, PartialEq)]
pub enum TagResourceError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
}
impl TagResourceError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<TagResourceError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(TagResourceError::BadRequest(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(TagResourceError::InternalServerError(err.msg))
}
"NotFoundException" => {
return RusotoError::Service(TagResourceError::NotFound(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for TagResourceError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
TagResourceError::BadRequest(ref cause) => write!(f, "{}", cause),
TagResourceError::InternalServerError(ref cause) => write!(f, "{}", cause),
TagResourceError::NotFound(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for TagResourceError {}
/// Errors returned by UntagResource
#[derive(Debug, PartialEq)]
pub enum UntagResourceError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
}
impl UntagResourceError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UntagResourceError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(UntagResourceError::BadRequest(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(UntagResourceError::InternalServerError(err.msg))
}
"NotFoundException" => {
return RusotoError::Service(UntagResourceError::NotFound(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for UntagResourceError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UntagResourceError::BadRequest(ref cause) => write!(f, "{}", cause),
UntagResourceError::InternalServerError(ref cause) => write!(f, "{}", cause),
UntagResourceError::NotFound(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for UntagResourceError {}
/// Errors returned by UpdateFlowEntitlement
#[derive(Debug, PartialEq)]
pub enum UpdateFlowEntitlementError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl UpdateFlowEntitlementError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateFlowEntitlementError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(UpdateFlowEntitlementError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(UpdateFlowEntitlementError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(UpdateFlowEntitlementError::InternalServerError(
err.msg,
))
}
"NotFoundException" => {
return RusotoError::Service(UpdateFlowEntitlementError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(UpdateFlowEntitlementError::ServiceUnavailable(
err.msg,
))
}
"TooManyRequestsException" => {
return RusotoError::Service(UpdateFlowEntitlementError::TooManyRequests(
err.msg,
))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for UpdateFlowEntitlementError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UpdateFlowEntitlementError::BadRequest(ref cause) => write!(f, "{}", cause),
UpdateFlowEntitlementError::Forbidden(ref cause) => write!(f, "{}", cause),
UpdateFlowEntitlementError::InternalServerError(ref cause) => write!(f, "{}", cause),
UpdateFlowEntitlementError::NotFound(ref cause) => write!(f, "{}", cause),
UpdateFlowEntitlementError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
UpdateFlowEntitlementError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for UpdateFlowEntitlementError {}
/// Errors returned by UpdateFlowOutput
#[derive(Debug, PartialEq)]
pub enum UpdateFlowOutputError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl UpdateFlowOutputError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateFlowOutputError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(UpdateFlowOutputError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(UpdateFlowOutputError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(UpdateFlowOutputError::InternalServerError(
err.msg,
))
}
"NotFoundException" => {
return RusotoError::Service(UpdateFlowOutputError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(UpdateFlowOutputError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(UpdateFlowOutputError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for UpdateFlowOutputError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UpdateFlowOutputError::BadRequest(ref cause) => write!(f, "{}", cause),
UpdateFlowOutputError::Forbidden(ref cause) => write!(f, "{}", cause),
UpdateFlowOutputError::InternalServerError(ref cause) => write!(f, "{}", cause),
UpdateFlowOutputError::NotFound(ref cause) => write!(f, "{}", cause),
UpdateFlowOutputError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
UpdateFlowOutputError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for UpdateFlowOutputError {}
/// Errors returned by UpdateFlowSource
#[derive(Debug, PartialEq)]
pub enum UpdateFlowSourceError {
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
BadRequest(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
Forbidden(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
InternalServerError(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
NotFound(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
ServiceUnavailable(String),
/// <p>Exception raised by AWS Elemental MediaConnect. See the error message and documentation for the operation for more information on the cause of this exception.</p>
TooManyRequests(String),
}
impl UpdateFlowSourceError {
pub fn from_response(res: BufferedHttpResponse) -> RusotoError<UpdateFlowSourceError> {
if let Some(err) = proto::json::Error::parse_rest(&res) {
match err.typ.as_str() {
"BadRequestException" => {
return RusotoError::Service(UpdateFlowSourceError::BadRequest(err.msg))
}
"ForbiddenException" => {
return RusotoError::Service(UpdateFlowSourceError::Forbidden(err.msg))
}
"InternalServerErrorException" => {
return RusotoError::Service(UpdateFlowSourceError::InternalServerError(
err.msg,
))
}
"NotFoundException" => {
return RusotoError::Service(UpdateFlowSourceError::NotFound(err.msg))
}
"ServiceUnavailableException" => {
return RusotoError::Service(UpdateFlowSourceError::ServiceUnavailable(err.msg))
}
"TooManyRequestsException" => {
return RusotoError::Service(UpdateFlowSourceError::TooManyRequests(err.msg))
}
"ValidationException" => return RusotoError::Validation(err.msg),
_ => {}
}
}
RusotoError::Unknown(res)
}
}
impl fmt::Display for UpdateFlowSourceError {
#[allow(unused_variables)]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
UpdateFlowSourceError::BadRequest(ref cause) => write!(f, "{}", cause),
UpdateFlowSourceError::Forbidden(ref cause) => write!(f, "{}", cause),
UpdateFlowSourceError::InternalServerError(ref cause) => write!(f, "{}", cause),
UpdateFlowSourceError::NotFound(ref cause) => write!(f, "{}", cause),
UpdateFlowSourceError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
UpdateFlowSourceError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
}
}
impl Error for UpdateFlowSourceError {}
/// Trait representing the capabilities of the AWS MediaConnect API. AWS MediaConnect clients implement this trait.
#[async_trait]
pub trait MediaConnect {
/// <p>Adds outputs to an existing flow. You can create up to 20 outputs per flow.</p>
async fn add_flow_outputs(
&self,
input: AddFlowOutputsRequest,
) -> Result<AddFlowOutputsResponse, RusotoError<AddFlowOutputsError>>;
/// <p>Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50).</p>
async fn create_flow(
&self,
input: CreateFlowRequest,
) -> Result<CreateFlowResponse, RusotoError<CreateFlowError>>;
/// <p>Deletes a flow. Before you can delete a flow, you must stop the flow.</p>
async fn delete_flow(
&self,
input: DeleteFlowRequest,
) -> Result<DeleteFlowResponse, RusotoError<DeleteFlowError>>;
/// <p>Displays the details of a flow. The response includes the flow ARN, name, and Availability Zone, as well as details about the source, outputs, and entitlements.</p>
async fn describe_flow(
&self,
input: DescribeFlowRequest,
) -> Result<DescribeFlowResponse, RusotoError<DescribeFlowError>>;
/// <p>Grants entitlements to an existing flow.</p>
async fn grant_flow_entitlements(
&self,
input: GrantFlowEntitlementsRequest,
) -> Result<GrantFlowEntitlementsResponse, RusotoError<GrantFlowEntitlementsError>>;
/// <p>Displays a list of all entitlements that have been granted to this account. This request returns 20 results per page.</p>
async fn list_entitlements(
&self,
input: ListEntitlementsRequest,
) -> Result<ListEntitlementsResponse, RusotoError<ListEntitlementsError>>;
/// <p>Displays a list of flows that are associated with this account. This request returns a paginated result.</p>
async fn list_flows(
&self,
input: ListFlowsRequest,
) -> Result<ListFlowsResponse, RusotoError<ListFlowsError>>;
/// <p>List all tags on an AWS Elemental MediaConnect resource</p>
async fn list_tags_for_resource(
&self,
input: ListTagsForResourceRequest,
) -> Result<ListTagsForResourceResponse, RusotoError<ListTagsForResourceError>>;
/// <p>Removes an output from an existing flow. This request can be made only on an output that does not have an entitlement associated with it. If the output has an entitlement, you must revoke the entitlement instead. When an entitlement is revoked from a flow, the service automatically removes the associated output.</p>
async fn remove_flow_output(
&self,
input: RemoveFlowOutputRequest,
) -> Result<RemoveFlowOutputResponse, RusotoError<RemoveFlowOutputError>>;
/// <p>Revokes an entitlement from a flow. Once an entitlement is revoked, the content becomes unavailable to the subscriber and the associated output is removed.</p>
async fn revoke_flow_entitlement(
&self,
input: RevokeFlowEntitlementRequest,
) -> Result<RevokeFlowEntitlementResponse, RusotoError<RevokeFlowEntitlementError>>;
/// <p>Starts a flow.</p>
async fn start_flow(
&self,
input: StartFlowRequest,
) -> Result<StartFlowResponse, RusotoError<StartFlowError>>;
/// <p>Stops a flow.</p>
async fn stop_flow(
&self,
input: StopFlowRequest,
) -> Result<StopFlowResponse, RusotoError<StopFlowError>>;
/// <p>Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.</p>
async fn tag_resource(
&self,
input: TagResourceRequest,
) -> Result<(), RusotoError<TagResourceError>>;
/// <p>Deletes specified tags from a resource.</p>
async fn untag_resource(
&self,
input: UntagResourceRequest,
) -> Result<(), RusotoError<UntagResourceError>>;
/// <p>You can change an entitlement's description, subscribers, and encryption. If you change the subscribers, the service will remove the outputs that are are used by the subscribers that are removed.</p>
async fn update_flow_entitlement(
&self,
input: UpdateFlowEntitlementRequest,
) -> Result<UpdateFlowEntitlementResponse, RusotoError<UpdateFlowEntitlementError>>;
/// <p>Updates an existing flow output.</p>
async fn update_flow_output(
&self,
input: UpdateFlowOutputRequest,
) -> Result<UpdateFlowOutputResponse, RusotoError<UpdateFlowOutputError>>;
/// <p>Updates the source of a flow.</p>
async fn update_flow_source(
&self,
input: UpdateFlowSourceRequest,
) -> Result<UpdateFlowSourceResponse, RusotoError<UpdateFlowSourceError>>;
}
/// A client for the AWS MediaConnect API.
#[derive(Clone)]
pub struct MediaConnectClient {
client: Client,
region: region::Region,
}
impl MediaConnectClient {
/// Creates a client backed by the default tokio event loop.
///
/// The client will use the default credentials provider and tls client.
pub fn new(region: region::Region) -> MediaConnectClient {
MediaConnectClient {
client: Client::shared(),
region,
}
}
pub fn new_with<P, D>(
request_dispatcher: D,
credentials_provider: P,
region: region::Region,
) -> MediaConnectClient
where
P: ProvideAwsCredentials + Send + Sync + 'static,
D: DispatchSignedRequest + Send + Sync + 'static,
{
MediaConnectClient {
client: Client::new_with(credentials_provider, request_dispatcher),
region,
}
}
pub fn new_with_client(client: Client, region: region::Region) -> MediaConnectClient {
MediaConnectClient { client, region }
}
}
#[async_trait]
impl MediaConnect for MediaConnectClient {
/// <p>Adds outputs to an existing flow. You can create up to 20 outputs per flow.</p>
async fn add_flow_outputs(
&self,
input: AddFlowOutputsRequest,
) -> Result<AddFlowOutputsResponse, RusotoError<AddFlowOutputsError>> {
let request_uri = format!("/v1/flows/{flow_arn}/outputs", flow_arn = input.flow_arn);
let mut request = SignedRequest::new("POST", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let encoded = Some(serde_json::to_vec(&input).unwrap());
request.set_payload(encoded);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 201 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<AddFlowOutputsResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(AddFlowOutputsError::from_response(response))
}
}
/// <p>Creates a new flow. The request must include one source. The request optionally can include outputs (up to 20) and entitlements (up to 50).</p>
async fn create_flow(
&self,
input: CreateFlowRequest,
) -> Result<CreateFlowResponse, RusotoError<CreateFlowError>> {
let request_uri = "/v1/flows";
let mut request = SignedRequest::new("POST", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let encoded = Some(serde_json::to_vec(&input).unwrap());
request.set_payload(encoded);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 201 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<CreateFlowResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(CreateFlowError::from_response(response))
}
}
/// <p>Deletes a flow. Before you can delete a flow, you must stop the flow.</p>
async fn delete_flow(
&self,
input: DeleteFlowRequest,
) -> Result<DeleteFlowResponse, RusotoError<DeleteFlowError>> {
let request_uri = format!("/v1/flows/{flow_arn}", flow_arn = input.flow_arn);
let mut request = SignedRequest::new("DELETE", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<DeleteFlowResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(DeleteFlowError::from_response(response))
}
}
/// <p>Displays the details of a flow. The response includes the flow ARN, name, and Availability Zone, as well as details about the source, outputs, and entitlements.</p>
async fn describe_flow(
&self,
input: DescribeFlowRequest,
) -> Result<DescribeFlowResponse, RusotoError<DescribeFlowError>> {
let request_uri = format!("/v1/flows/{flow_arn}", flow_arn = input.flow_arn);
let mut request = SignedRequest::new("GET", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 200 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<DescribeFlowResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(DescribeFlowError::from_response(response))
}
}
/// <p>Grants entitlements to an existing flow.</p>
async fn grant_flow_entitlements(
&self,
input: GrantFlowEntitlementsRequest,
) -> Result<GrantFlowEntitlementsResponse, RusotoError<GrantFlowEntitlementsError>> {
let request_uri = format!(
"/v1/flows/{flow_arn}/entitlements",
flow_arn = input.flow_arn
);
let mut request = SignedRequest::new("POST", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let encoded = Some(serde_json::to_vec(&input).unwrap());
request.set_payload(encoded);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 200 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<GrantFlowEntitlementsResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(GrantFlowEntitlementsError::from_response(response))
}
}
/// <p>Displays a list of all entitlements that have been granted to this account. This request returns 20 results per page.</p>
async fn list_entitlements(
&self,
input: ListEntitlementsRequest,
) -> Result<ListEntitlementsResponse, RusotoError<ListEntitlementsError>> {
let request_uri = "/v1/entitlements";
let mut request = SignedRequest::new("GET", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut params = Params::new();
if let Some(ref x) = input.max_results {
params.put("maxResults", x);
}
if let Some(ref x) = input.next_token {
params.put("nextToken", x);
}
request.set_params(params);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 200 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<ListEntitlementsResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(ListEntitlementsError::from_response(response))
}
}
/// <p>Displays a list of flows that are associated with this account. This request returns a paginated result.</p>
async fn list_flows(
&self,
input: ListFlowsRequest,
) -> Result<ListFlowsResponse, RusotoError<ListFlowsError>> {
let request_uri = "/v1/flows";
let mut request = SignedRequest::new("GET", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut params = Params::new();
if let Some(ref x) = input.max_results {
params.put("maxResults", x);
}
if let Some(ref x) = input.next_token {
params.put("nextToken", x);
}
request.set_params(params);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 200 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<ListFlowsResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(ListFlowsError::from_response(response))
}
}
/// <p>List all tags on an AWS Elemental MediaConnect resource</p>
async fn list_tags_for_resource(
&self,
input: ListTagsForResourceRequest,
) -> Result<ListTagsForResourceResponse, RusotoError<ListTagsForResourceError>> {
let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn);
let mut request = SignedRequest::new("GET", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 200 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<ListTagsForResourceResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(ListTagsForResourceError::from_response(response))
}
}
/// <p>Removes an output from an existing flow. This request can be made only on an output that does not have an entitlement associated with it. If the output has an entitlement, you must revoke the entitlement instead. When an entitlement is revoked from a flow, the service automatically removes the associated output.</p>
async fn remove_flow_output(
&self,
input: RemoveFlowOutputRequest,
) -> Result<RemoveFlowOutputResponse, RusotoError<RemoveFlowOutputError>> {
let request_uri = format!(
"/v1/flows/{flow_arn}/outputs/{output_arn}",
flow_arn = input.flow_arn,
output_arn = input.output_arn
);
let mut request = SignedRequest::new("DELETE", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<RemoveFlowOutputResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(RemoveFlowOutputError::from_response(response))
}
}
/// <p>Revokes an entitlement from a flow. Once an entitlement is revoked, the content becomes unavailable to the subscriber and the associated output is removed.</p>
async fn revoke_flow_entitlement(
&self,
input: RevokeFlowEntitlementRequest,
) -> Result<RevokeFlowEntitlementResponse, RusotoError<RevokeFlowEntitlementError>> {
let request_uri = format!(
"/v1/flows/{flow_arn}/entitlements/{entitlement_arn}",
entitlement_arn = input.entitlement_arn,
flow_arn = input.flow_arn
);
let mut request = SignedRequest::new("DELETE", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<RevokeFlowEntitlementResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(RevokeFlowEntitlementError::from_response(response))
}
}
/// <p>Starts a flow.</p>
async fn start_flow(
&self,
input: StartFlowRequest,
) -> Result<StartFlowResponse, RusotoError<StartFlowError>> {
let request_uri = format!("/v1/flows/start/{flow_arn}", flow_arn = input.flow_arn);
let mut request = SignedRequest::new("POST", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<StartFlowResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(StartFlowError::from_response(response))
}
}
/// <p>Stops a flow.</p>
async fn stop_flow(
&self,
input: StopFlowRequest,
) -> Result<StopFlowResponse, RusotoError<StopFlowError>> {
let request_uri = format!("/v1/flows/stop/{flow_arn}", flow_arn = input.flow_arn);
let mut request = SignedRequest::new("POST", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<StopFlowResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(StopFlowError::from_response(response))
}
}
/// <p>Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource are not specified in the request parameters, they are not changed. When a resource is deleted, the tags associated with that resource are deleted as well.</p>
async fn tag_resource(
&self,
input: TagResourceRequest,
) -> Result<(), RusotoError<TagResourceError>> {
let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn);
let mut request = SignedRequest::new("POST", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let encoded = Some(serde_json::to_vec(&input).unwrap());
request.set_payload(encoded);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 204 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = ::std::mem::drop(response);
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(TagResourceError::from_response(response))
}
}
/// <p>Deletes specified tags from a resource.</p>
async fn untag_resource(
&self,
input: UntagResourceRequest,
) -> Result<(), RusotoError<UntagResourceError>> {
let request_uri = format!("/tags/{resource_arn}", resource_arn = input.resource_arn);
let mut request = SignedRequest::new("DELETE", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let mut params = Params::new();
for item in input.tag_keys.iter() {
params.put("tagKeys", item);
}
request.set_params(params);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 204 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = ::std::mem::drop(response);
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(UntagResourceError::from_response(response))
}
}
/// <p>You can change an entitlement's description, subscribers, and encryption. If you change the subscribers, the service will remove the outputs that are are used by the subscribers that are removed.</p>
async fn update_flow_entitlement(
&self,
input: UpdateFlowEntitlementRequest,
) -> Result<UpdateFlowEntitlementResponse, RusotoError<UpdateFlowEntitlementError>> {
let request_uri = format!(
"/v1/flows/{flow_arn}/entitlements/{entitlement_arn}",
entitlement_arn = input.entitlement_arn,
flow_arn = input.flow_arn
);
let mut request = SignedRequest::new("PUT", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let encoded = Some(serde_json::to_vec(&input).unwrap());
request.set_payload(encoded);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<UpdateFlowEntitlementResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(UpdateFlowEntitlementError::from_response(response))
}
}
/// <p>Updates an existing flow output.</p>
async fn update_flow_output(
&self,
input: UpdateFlowOutputRequest,
) -> Result<UpdateFlowOutputResponse, RusotoError<UpdateFlowOutputError>> {
let request_uri = format!(
"/v1/flows/{flow_arn}/outputs/{output_arn}",
flow_arn = input.flow_arn,
output_arn = input.output_arn
);
let mut request = SignedRequest::new("PUT", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let encoded = Some(serde_json::to_vec(&input).unwrap());
request.set_payload(encoded);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<UpdateFlowOutputResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(UpdateFlowOutputError::from_response(response))
}
}
/// <p>Updates the source of a flow.</p>
async fn update_flow_source(
&self,
input: UpdateFlowSourceRequest,
) -> Result<UpdateFlowSourceResponse, RusotoError<UpdateFlowSourceError>> {
let request_uri = format!(
"/v1/flows/{flow_arn}/source/{source_arn}",
flow_arn = input.flow_arn,
source_arn = input.source_arn
);
let mut request = SignedRequest::new("PUT", "mediaconnect", &self.region, &request_uri);
request.set_content_type("application/x-amz-json-1.1".to_owned());
let encoded = Some(serde_json::to_vec(&input).unwrap());
request.set_payload(encoded);
let mut response = self
.client
.sign_and_dispatch(request)
.await
.map_err(RusotoError::from)?;
if response.status.as_u16() == 202 {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
let result = proto::json::ResponsePayload::new(&response)
.deserialize::<UpdateFlowSourceResponse, _>()?;
Ok(result)
} else {
let response = response.buffer().await.map_err(RusotoError::HttpDispatch)?;
Err(UpdateFlowSourceError::from_response(response))
}
}
}
| {
match *self {
RemoveFlowOutputError::BadRequest(ref cause) => write!(f, "{}", cause),
RemoveFlowOutputError::Forbidden(ref cause) => write!(f, "{}", cause),
RemoveFlowOutputError::InternalServerError(ref cause) => write!(f, "{}", cause),
RemoveFlowOutputError::NotFound(ref cause) => write!(f, "{}", cause),
RemoveFlowOutputError::ServiceUnavailable(ref cause) => write!(f, "{}", cause),
RemoveFlowOutputError::TooManyRequests(ref cause) => write!(f, "{}", cause),
}
} |
recv_with_dropped_sender.rs | #[cfg(feature = "std")]
fn main() |
#[cfg(not(feature = "std"))]
fn main() {
panic!("This example is only for when the \"sync\" feature is used");
}
| {
let (sender, receiver) = oneshot::channel::<u128>();
std::mem::drop(sender);
receiver.recv().unwrap_err();
} |
option_darwin.go | package option |
var DefaultClientOptions = []gatt.Option{
gatt.MacDeviceRole(gatt.CentralManager),
}
var DefaultServerOptions = []gatt.Option{
gatt.MacDeviceRole(gatt.PeripheralManager),
} |
import "github.com/Jon-Bright/gatt" |
debug.rs | use crate::utility::files::{get_default_settings_dir, get_user_settings_dir};
use amethyst::prelude::Config;
use serde::{Deserialize, Serialize};
use std::path::Path;
#[derive(Debug, Deserialize, Serialize, Default)]
#[serde(default)]
#[serde(deny_unknown_fields)]
pub struct DebugSettings {
/// An array of values that 'time_scale' can have.
/// Debug controls will allow switching between these values,
/// to slow time down or speed it up.
pub time_scale_presets: Vec<f32>,
/// How fast the clock is ticking. A value of 1.0 means time is
/// behaving normally, higher values mean time is sped up and
/// 0.0 means time is frozen.
pub time_scale: f32,
/// Number of seconds to leave between frames when rewinding time.
pub seconds_per_rewind_frame: f32,
/// Enable this when debugging, to save time when rapidly iterating.
/// It saves you from having to navigate the menu every time you start the game.
/// If true, the game will open in the editor state.
/// If false, it will open on the main menu.
pub skip_straight_to_editor: bool,
/// Whether or not to display debug frames indicating the player's discrete position.
pub display_debug_frames: bool,
}
impl DebugSettings {
/// Increase the time scale. Everything in the world will move more quickly.
/// Return a tuple containing the old scale and the new scale.
/// If the time is already operating at the fastest speed, the time scale will not change.
pub fn increase_speed(&mut self) -> (f32, f32) {
let old_time_scale = self.time_scale;
let new_time_scale = self
.time_scale_presets
.iter()
.find(|&&scale| scale > self.time_scale);
if let Some(new_time_scale) = new_time_scale {
self.time_scale = *new_time_scale;
(old_time_scale, self.time_scale)
} else {
(self.time_scale, self.time_scale)
}
}
/// Decrease the time scale. Everything in the world will move more slowly.
/// Return a tuple containing the old scale and the new scale.
/// If the time is already operating at the slowest speed, the time scale will not change.
pub fn decrease_speed(&mut self) -> (f32, f32) {
let old_time_scale = self.time_scale;
let new_time_scale = self
.time_scale_presets
.iter()
.rev()
.find(|&&scale| scale < self.time_scale);
if let Some(new_time_scale) = new_time_scale {
self.time_scale = *new_time_scale;
(old_time_scale, self.time_scale)
} else {
(self.time_scale, self.time_scale)
}
}
}
/// Loads the most relevant instance of `DebugSettings`.
///
/// If the user `DebugSettings` file exists, tries to load from user settings first. If that fails,
/// log an error and try to load from default settings.
///
/// If the default `DebugSettings` file fails to load, fall back to the Default trait implementation
/// as a last resort (ie: `DebugSettings::default()`).
#[must_use]
pub fn load_debug_settings() -> DebugSettings {
let user_settings_file = get_user_settings_dir().join("debug.ron");
if user_settings_file.exists() {
load_debug_user_settings(&user_settings_file)
} else {
load_debug_default_settings()
}
}
fn load_debug_user_settings(file_path: &Path) -> DebugSettings |
fn load_debug_default_settings() -> DebugSettings {
let file = get_default_settings_dir().join("debug.ron");
DebugSettings::load(&file).unwrap_or_else(
|error| {
error!(
"Failed to load the default debug settings file from {:?}! Falling back to Default implementation. Error: {:?}",
file, error
);
DebugSettings::default()
},
)
}
| {
DebugSettings::load(&file_path).unwrap_or_else(|error| {
error!(
"Failed to load the user-specific debug settings file from {:?}! Falling back to default settings file. Error: {:?}",
file_path, error
);
load_debug_default_settings()
})
} |
_rotation_forest.py | # -*- coding: utf-8 -*-
"""RotationForest vector classifier.
Rotation Forest, sktime implementation for continuous values only.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["RotationForest"]
import time
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state, check_X_y
from sktime.base._base import _clone_estimator
from sktime.exceptions import NotFittedError
from sktime.utils.validation import check_n_jobs
class | (BaseEstimator):
"""Rotation Forest Classifier.
Implementation of the Rotation Forest classifier described in Rodriguez et al
(2013). [1]_
Intended as a benchmark for time series data and a base classifier for
transformation based appraoches such as ShapeletTransformClassifier, this sktime
implementation only works with continuous attributes.
Parameters
----------
n_estimators : int, default=200
Number of estimators to build for the ensemble.
min_group : int, default=3
The minimum size of a group.
max_group : int, default=3
The maximum size of a group.
remove_proportion : float, default=0.5
The proportion of cases to be removed.
base_estimator : BaseEstimator or None, default="None"
Base estimator for the ensemble. By default uses the sklearn
DecisionTreeClassifier using entropy as a splitting measure.
time_limit_in_minutes : int, default=0
Time contract to limit build time in minutes, overriding n_estimators.
Default of 0 means n_estimators is used.
contract_max_n_estimators : int, default=500
Max number of estimators when time_limit_in_minutes is set.
save_transformed_data : bool, default=False
Save the data transformed in fit for use in _get_train_probs.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random number generation.
Attributes
----------
n_classes : int
The number of classes.
n_instances : int
The number of train cases.
n_atts : int
The number of attributes in each train case.
classes_ : list
The classes labels.
estimators_ : list of shape (n_estimators) of BaseEstimator
The collections of estimators trained in fit.
transformed_data : list of shape (n_estimators) of ndarray
The transformed dataset for all classifiers. Only saved when
save_transformed_data is true.
See Also
--------
ShapeletTransformClassifier
Notes
-----
For the Java version, see
`TSML <https://github.com/uea-machine-learning/tsml/blob/master/src/main/java
/weka/classifiers/meta/RotationForest.java>`_.
References
----------
.. [1] Rodriguez, Juan Josรฉ, Ludmila I. Kuncheva, and Carlos J. Alonso. "Rotation
forest: A new classifier ensemble method." IEEE transactions on pattern analysis
and machine intelligence 28.10 (2006).
.. [2] Bagnall, A., et al. "Is rotation forest the best classifier for problems
with continuous features?." arXiv preprint arXiv:1809.06705 (2018).
Examples
--------
>>> from sktime.contrib.vector_classifiers._rotation_forest import RotationForest
>>> from sktime.datasets import load_unit_test
>>> from sktime.datatypes._panel._convert import from_nested_to_3d_numpy
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> X_train = from_nested_to_3d_numpy(X_train)
>>> X_test = from_nested_to_3d_numpy(X_test)
>>> clf = RotationForest(n_estimators=10)
>>> clf.fit(X_train, y_train)
RotationForest(...)
>>> y_pred = clf.predict(X_test)
"""
def __init__(
self,
n_estimators=200,
min_group=3,
max_group=3,
remove_proportion=0.5,
base_estimator=None,
time_limit_in_minutes=0.0,
contract_max_n_estimators=500,
save_transformed_data=False,
n_jobs=1,
random_state=None,
):
self.n_estimators = n_estimators
self.min_group = min_group
self.max_group = max_group
self.remove_proportion = remove_proportion
self.base_estimator = base_estimator
self.time_limit_in_minutes = time_limit_in_minutes
self.contract_max_n_estimators = contract_max_n_estimators
self.save_transformed_data = save_transformed_data
self.n_jobs = n_jobs
self.random_state = random_state
self.n_classes = 0
self.n_instances = 0
self.n_atts = 0
self.classes_ = []
self.estimators_ = []
self.transformed_data = []
self._n_estimators = n_estimators
self._base_estimator = base_estimator
self._min = 0
self._ptp = 0
self._useful_atts = []
self._pcas = []
self._groups = []
self._class_dictionary = {}
self._n_jobs = n_jobs
self._n_atts = 0
# We need to add is-fitted state when inheriting from scikit-learn
self._is_fitted = False
super(RotationForest, self).__init__()
def fit(self, X, y):
"""Fit a forest of trees on cases (X,y), where y is the target variable.
Parameters
----------
X : ndarray of shape = [n_instances,n_attributes]
The training input samples.
y : array-like, shape = [n_instances]
The class labels.
Returns
-------
self : object
"""
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
X, y = check_X_y(X, y)
self._n_jobs = check_n_jobs(self.n_jobs)
self.n_instances, self.n_atts = X.shape
self.classes_ = np.unique(y)
self.n_classes = self.classes_.shape[0]
for index, classVal in enumerate(self.classes_):
self._class_dictionary[classVal] = index
time_limit = self.time_limit_in_minutes * 60
start_time = time.time()
train_time = 0
if self.base_estimator is None:
self._base_estimator = DecisionTreeClassifier(criterion="entropy")
# replace missing values with 0 and remove useless attributes
X = np.nan_to_num(X, False, 0, 0, 0)
self._useful_atts = ~np.all(X[1:] == X[:-1], axis=0)
X = X[:, self._useful_atts]
self._n_atts = X.shape[1]
# normalise attributes
self._min = X.min(axis=0)
self._ptp = X.max(axis=0) - self._min
X = (X - self._min) / self._ptp
X_cls_split = [X[np.where(y == i)] for i in self.classes_]
if time_limit > 0:
self._n_estimators = 0
self.estimators_ = []
self._pcas = []
self._groups = []
while (
train_time < time_limit
and self._n_estimators < self.contract_max_n_estimators
):
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_cls_split,
y,
i,
)
for i in range(self._n_jobs)
)
estimators, pcas, groups, transformed_data = zip(*fit)
self.estimators_ += estimators
self._pcas += pcas
self._groups += groups
self.transformed_data += transformed_data
self._n_estimators += self._n_jobs
train_time = time.time() - start_time
else:
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_cls_split,
y,
i,
)
for i in range(self._n_estimators)
)
self.estimators_, self._pcas, self._groups, self.transformed_data = zip(
*fit
)
self._is_fitted = True
return self
def predict(self, X):
"""Predict for all cases in X. Built on top of predict_proba.
Parameters
----------
X : ndarray of shape = [n_instances,n_attributes]
Returns
-------
output : array of shape = [n_test_instances]
"""
rng = check_random_state(self.random_state)
return np.array(
[
self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]
for prob in self.predict_proba(X)
]
)
def predict_proba(self, X):
"""Probability estimates for each class for all cases in X.
Parameters
----------
X : ndarray of shape = [n_instances,n_attributes]
Returns
-------
output : array of shape = [n_test_instances, num_classes] of
probabilities
"""
if not self._is_fitted:
raise NotFittedError(
f"This instance of {self.__class__.__name__} has not "
f"been fitted yet; please call `fit` first."
)
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
# replace missing values with 0 and remove useless attributes
X = np.nan_to_num(X, False, 0, 0, 0)
X = X[:, self._useful_atts]
# normalise the data.
X = (X - self._min) / self._ptp
y_probas = Parallel(n_jobs=self._n_jobs)(
delayed(self._predict_proba_for_estimator)(
X,
self.estimators_[i],
self._pcas[i],
self._groups[i],
)
for i in range(self._n_estimators)
)
output = np.sum(y_probas, axis=0) / (
np.ones(self.n_classes) * self._n_estimators
)
return output
def _get_train_probs(self, X, y):
if not self._is_fitted:
raise NotFittedError(
f"This instance of {self.__class__.__name__} has not "
f"been fitted yet; please call `fit` first."
)
if isinstance(X, np.ndarray) and len(X.shape) == 3 and X.shape[1] == 1:
X = np.reshape(X, (X.shape[0], -1))
elif not isinstance(X, np.ndarray) or len(X.shape) > 2:
raise ValueError(
"RotationForest is not a time series classifier. "
"A 2d numpy array is required."
)
n_instances, n_atts = X.shape
if n_instances != self.n_instances or n_atts != self.n_atts:
raise ValueError(
"n_instances, n_dims, series_length mismatch. X should be "
"the same as the training data used in fit for generating train "
"probabilities."
)
if not self.save_transformed_data:
raise ValueError("Currently only works with saved transform data from fit.")
p = Parallel(n_jobs=self._n_jobs)(
delayed(self._train_probas_for_estimator)(
y,
i,
)
for i in range(self._n_estimators)
)
y_probas, oobs = zip(*p)
results = np.sum(y_probas, axis=0)
divisors = np.zeros(n_instances)
for oob in oobs:
for inst in oob:
divisors[inst] += 1
for i in range(n_instances):
results[i] = (
np.ones(self.n_classes) * (1 / self.n_classes)
if divisors[i] == 0
else results[i] / (np.ones(self.n_classes) * divisors[i])
)
return results
def _fit_estimator(self, X, X_cls_split, y, idx):
rs = 255 if self.random_state == 0 else self.random_state
rs = (
None
if self.random_state is None
else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max
)
rng = check_random_state(rs)
groups = self._generate_groups(rng)
pcas = []
# construct the slices to fit the PCAs too.
for group in groups:
classes = rng.choice(
range(self.n_classes),
size=rng.randint(1, self.n_classes + 1),
replace=False,
)
# randomly add the classes with the randomly selected attributes.
X_t = np.zeros((0, len(group)))
for cls_idx in classes:
c = X_cls_split[cls_idx]
X_t = np.concatenate((X_t, c[:, group]), axis=0)
sample_ind = rng.choice(
X_t.shape[0],
int(X_t.shape[0] * self.remove_proportion),
replace=False,
)
X_t = X_t[sample_ind]
# try to fit the PCA if it fails, remake it, and add 10 random data instances.
while True:
# ignore err state on PCA because we account if it fails.
with np.errstate(divide="ignore", invalid="ignore"):
# differences between os occasionally. seems to happen when there
# are low amounts of cases in the fit
pca = PCA(random_state=rs).fit(X_t)
if not np.isnan(pca.explained_variance_ratio_).all():
break
X_t = np.concatenate(
(X_t, rng.random_sample((10, X_t.shape[1]))), axis=0
)
pcas.append(pca)
# merge all the pca_transformed data into one instance and build a classifier on it.
X_t = np.concatenate(
[pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1
)
tree = _clone_estimator(self._base_estimator, random_state=rs)
tree.fit(X_t, y)
return tree, pcas, groups, X_t if self.save_transformed_data else None
def _predict_proba_for_estimator(self, X, clf, pcas, groups):
X_t = np.concatenate(
[pcas[i].transform(X[:, group]) for i, group in enumerate(groups)], axis=1
)
probas = clf.predict_proba(X_t)
if probas.shape[1] != self.n_classes:
new_probas = np.zeros((probas.shape[0], self.n_classes))
for i, cls in enumerate(clf.classes_):
cls_idx = self._class_dictionary[cls]
new_probas[:, cls_idx] = probas[:, i]
probas = new_probas
return probas
def _train_probas_for_estimator(self, y, idx):
rs = 255 if self.random_state == 0 else self.random_state
rs = (
None
if self.random_state is None
else (rs * 37 * (idx + 1)) % np.iinfo(np.int32).max
)
rng = check_random_state(rs)
indices = range(self.n_instances)
subsample = rng.choice(self.n_instances, size=self.n_instances)
oob = [n for n in indices if n not in subsample]
clf = _clone_estimator(self._base_estimator, rs)
clf.fit(self.transformed_data[idx][subsample], y[subsample])
probas = clf.predict_proba(self.transformed_data[idx][oob])
if probas.shape[1] != self.n_classes:
new_probas = np.zeros((probas.shape[0], self.n_classes))
for i, cls in enumerate(clf.classes_):
cls_idx = self._class_dictionary[cls]
new_probas[:, cls_idx] = probas[:, i]
probas = new_probas
results = np.zeros((self.n_instances, self.n_classes))
for n, proba in enumerate(probas):
results[oob[n]] += proba
return [results, oob]
def _generate_groups(self, rng):
permutation = rng.permutation((np.arange(0, self._n_atts)))
# select the size of each group.
group_size_count = np.zeros(self.max_group - self.min_group + 1)
n_attributes = 0
n_groups = 0
while n_attributes < self._n_atts:
n = rng.randint(group_size_count.shape[0])
group_size_count[n] += 1
n_attributes += self.min_group + n
n_groups += 1
groups = []
current_attribute = 0
current_size = 0
for i in range(0, n_groups):
while group_size_count[current_size] == 0:
current_size += 1
group_size_count[current_size] -= 1
n = self.min_group + current_size
groups.append(np.zeros(n, dtype=int))
for k in range(0, n):
if current_attribute < permutation.shape[0]:
groups[i][k] = permutation[current_attribute]
else:
groups[i][k] = permutation[rng.randint(permutation.shape[0])]
current_attribute += 1
return groups
| RotationForest |
formschema.go | package main
import (
"gopkg.in/juju/environschema.v1"
"gopkg.in/macaroon-bakery.v2/httpbakery/form"
)
var schemaResponse = form.SchemaResponse{ |
var schemaFields = environschema.Fields{
"username": environschema.Attr{
Description: "username",
Type: environschema.Tstring,
Mandatory: true,
},
"password": environschema.Attr{
Description: "password",
Type: environschema.Tstring,
Mandatory: true,
Secret: true,
},
} | Schema: schemaFields,
} |
min_max_size.rs | // Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
use simple_logger::SimpleLogger;
use tao::{
dpi::LogicalSize,
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop}, |
#[allow(clippy::single_match)]
fn main() {
SimpleLogger::new().init().unwrap();
let event_loop = EventLoop::new();
let window = WindowBuilder::new().build(&event_loop).unwrap();
window.set_min_inner_size(Some(LogicalSize::new(400.0, 200.0)));
window.set_max_inner_size(Some(LogicalSize::new(800.0, 400.0)));
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Wait;
println!("{:?}", event);
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => *control_flow = ControlFlow::Exit,
_ => (),
}
});
} | window::WindowBuilder,
}; |
sarif_test.go | package sarif_test
import (
"bytes"
"regexp"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/securego/gosec/v2"
"github.com/securego/gosec/v2/report/sarif"
)
var _ = Describe("Sarif Formatter", func() {
BeforeEach(func() {
})
Context("when converting to Sarif issues", func() {
It("sarif formatted report should contain the result", func() {
buf := new(bytes.Buffer)
reportInfo := gosec.NewReportInfo([]*gosec.Issue{}, &gosec.Metrics{}, map[string][]gosec.Error{}).WithVersion("v2.7.0")
err := sarif.WriteReport(buf, reportInfo, []string{})
result := buf.String()
Expect(err).ShouldNot(HaveOccurred())
Expect(result).To(ContainSubstring("\"results\": ["))
})
It("sarif formatted report should contain the suppressed results", func() {
ruleID := "G101"
cwe := gosec.GetCweByRule(ruleID)
suppressedIssue := gosec.Issue{
File: "/home/src/project/test.go",
Line: "1",
Col: "1",
RuleID: ruleID,
What: "test",
Confidence: gosec.High,
Severity: gosec.High,
Code: "1: testcode",
Cwe: cwe,
Suppressions: []gosec.SuppressionInfo{
{
Kind: "kind",
Justification: "justification",
},
},
}
reportInfo := gosec.NewReportInfo([]*gosec.Issue{&suppressedIssue}, &gosec.Metrics{}, map[string][]gosec.Error{}).WithVersion("v2.7.0")
buf := new(bytes.Buffer)
err := sarif.WriteReport(buf, reportInfo, []string{})
result := buf.String()
Expect(err).ShouldNot(HaveOccurred())
hasResults, _ := regexp.MatchString(`"results": \[(\s*){`, result)
Expect(hasResults).To(BeTrue())
hasSuppressions, _ := regexp.MatchString(`"suppressions": \[(\s*){`, result) | It("sarif formatted report should contain the formatted one line code snippet", func() {
ruleID := "G101"
cwe := gosec.GetCweByRule(ruleID)
code := "68: \t\t}\n69: \t\tvar data = template.HTML(v.TmplFile)\n70: \t\tisTmpl := true\n"
expectedCode := "var data = template.HTML(v.TmplFile)"
issue := gosec.Issue{
File: "/home/src/project/test.go",
Line: "69",
Col: "14",
RuleID: ruleID,
What: "test",
Confidence: gosec.High,
Severity: gosec.High,
Code: code,
Cwe: cwe,
Suppressions: []gosec.SuppressionInfo{
{
Kind: "kind",
Justification: "justification",
},
},
}
reportInfo := gosec.NewReportInfo([]*gosec.Issue{&issue}, &gosec.Metrics{}, map[string][]gosec.Error{}).WithVersion("v2.7.0")
sarifReport, err := sarif.GenerateReport([]string{}, reportInfo)
Expect(err).ShouldNot(HaveOccurred())
Expect(sarifReport.Runs[0].Results[0].Locations[0].PhysicalLocation.Region.Snippet.Text).Should(Equal(expectedCode))
})
It("sarif formatted report should contain the formatted multiple line code snippet", func() {
ruleID := "G101"
cwe := gosec.GetCweByRule(ruleID)
code := "68: }\n69: var data = template.HTML(v.TmplFile)\n70: isTmpl := true\n"
expectedCode := "var data = template.HTML(v.TmplFile)\nisTmpl := true\n"
issue := gosec.Issue{
File: "/home/src/project/test.go",
Line: "69-70",
Col: "14",
RuleID: ruleID,
What: "test",
Confidence: gosec.High,
Severity: gosec.High,
Code: code,
Cwe: cwe,
Suppressions: []gosec.SuppressionInfo{
{
Kind: "kind",
Justification: "justification",
},
},
}
reportInfo := gosec.NewReportInfo([]*gosec.Issue{&issue}, &gosec.Metrics{}, map[string][]gosec.Error{}).WithVersion("v2.7.0")
sarifReport, err := sarif.GenerateReport([]string{}, reportInfo)
Expect(err).ShouldNot(HaveOccurred())
Expect(sarifReport.Runs[0].Results[0].Locations[0].PhysicalLocation.Region.Snippet.Text).Should(Equal(expectedCode))
})
})
}) | Expect(hasSuppressions).To(BeTrue())
}) |
parser.go | package wttr
import (
"errors"
"fmt"
"github.com/pksieminski/wttr.in-go/internal/regroup"
"regexp"
"strings"
)
type ParseError struct {
Err error
}
func (e *ParseError) Error() string {
return fmt.Sprintf("weather response parsing error: %s", e.Err)
}
func ParseWeather(resp string) (*Weather, error) {
lines := strings.Split(resp, "\n")
if len(lines) < 8 {
return nil, &ParseError{Err: errors.New("returned weather body is too short")}
}
temp, err := parseTemperature(lines[3])
if err != nil {
return nil, err
}
speed, err := parseWindSpeed(lines[4])
if err != nil {
return nil, err
}
return &Weather{
Location: lines[0],
Description: parseDescription(lines[2]),
Temperature: temp,
WindSpeed: speed,
}, nil
}
func parseDescription(line string) string {
return removeIndent(line)
}
func parseTemperature(line string) (int, error) {
re := regexp.MustCompile("(?P<sign>[+-])?(?P<value>\\d+)(?:\\([+-]?\\d+\\))?.+ยฐC")
groups := regroup.MatchGroups(re, line)
temp, err := groups.GetInt("value")
if err != nil {
return 0, &ParseError{Err: errors.New("temperature parse error")}
} | if sign, _ := groups.Get("sign"); sign == "-" {
temp = -temp
}
return temp, nil
}
func parseWindSpeed(line string) (int, error) {
re := regexp.MustCompile("(?P<value>\\d+).+km/h")
groups := regroup.MatchGroups(re, line)
speed, err := groups.GetInt("value")
if err != nil {
return 0, &ParseError{Err: errors.New("wind speed parse error")}
}
return speed, nil
}
func removeIndent(line string) string {
return line[16:]
} | |
find_blender.py | import platform
# print(platform.system())
operating_system = platform.system().lower()
if operating_system == 'darwin':
from .blender_utils_macos import get_installed_blender_versions
operating_system_name = 'macos'
elif operating_system == 'linux':
from .blender_utils_linux import get_installed_blender_versions
operating_system_name = 'linux'
elif operating_system == 'windows':
from .blender_utils_windows import get_installed_blender_versions
operating_system_name = 'windows'
else:
raise Exception("Unimplemented for OS {}".format(operating_system))
from .blender_utils_web import get_blender_version_download_links
def find_blender(version):
# TODO: add fuzzy version matching, ie. '>=2.80', '~2.80', '<2.80', etc.
installed_versions = get_installed_blender_versions()
if version in installed_versions:
return installed_versions[version]
else:
print("blender version '{}' not found; found {} version(s):".format(version, len(installed_versions)))
for v, path in installed_versions.items():
print(" {}: {}".format(v, path))
print("searching web archive...")
versions = get_blender_version_download_links(version, operating_system_name)
print("found {} download(s) for blender version '{}', platform '{}':".format(len(versions), version, operating_system_name))
for url in versions:
print(" {}".format(url))
if __name__ == '__main__':
for version, exec_path in get_installed_blender_versions().items():
print("found blender {version}: {path}".format(version=version,
path=exec_path)) | if blender:
print("Found blender: '{}'".format(blender))
else:
print("No matching blender version installed :(") | blender = find_blender('2.80') |
shootout-nbody.rs | // The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2011-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::num::Float;
const PI: f64 = 3.141592653589793;
const SOLAR_MASS: f64 = 4.0 * PI * PI;
const YEAR: f64 = 365.24;
const N_BODIES: uint = 5;
static BODIES: [Planet;N_BODIES] = [
// Sun
Planet {
x: 0.0, y: 0.0, z: 0.0,
vx: 0.0, vy: 0.0, vz: 0.0,
mass: SOLAR_MASS,
},
// Jupiter
Planet {
x: 4.84143144246472090e+00,
y: -1.16032004402742839e+00,
z: -1.03622044471123109e-01,
vx: 1.66007664274403694e-03 * YEAR,
vy: 7.69901118419740425e-03 * YEAR,
vz: -6.90460016972063023e-05 * YEAR,
mass: 9.54791938424326609e-04 * SOLAR_MASS,
},
// Saturn
Planet {
x: 8.34336671824457987e+00,
y: 4.12479856412430479e+00,
z: -4.03523417114321381e-01,
vx: -2.76742510726862411e-03 * YEAR,
vy: 4.99852801234917238e-03 * YEAR,
vz: 2.30417297573763929e-05 * YEAR,
mass: 2.85885980666130812e-04 * SOLAR_MASS,
},
// Uranus
Planet {
x: 1.28943695621391310e+01,
y: -1.51111514016986312e+01,
z: -2.23307578892655734e-01,
vx: 2.96460137564761618e-03 * YEAR,
vy: 2.37847173959480950e-03 * YEAR,
vz: -2.96589568540237556e-05 * YEAR,
mass: 4.36624404335156298e-05 * SOLAR_MASS,
},
// Neptune
Planet {
x: 1.53796971148509165e+01,
y: -2.59193146099879641e+01,
z: 1.79258772950371181e-01,
vx: 2.68067772490389322e-03 * YEAR,
vy: 1.62824170038242295e-03 * YEAR,
vz: -9.51592254519715870e-05 * YEAR,
mass: 5.15138902046611451e-05 * SOLAR_MASS,
},
];
struct Planet {
x: f64, y: f64, z: f64,
vx: f64, vy: f64, vz: f64,
mass: f64,
}
impl Copy for Planet {}
fn | (bodies: &mut [Planet;N_BODIES], dt: f64, steps: int) {
for _ in range(0, steps) {
let mut b_slice = bodies.as_mut_slice();
loop {
let bi = match shift_mut_ref(&mut b_slice) {
Some(bi) => bi,
None => break
};
for bj in b_slice.iter_mut() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let d2 = dx * dx + dy * dy + dz * dz;
let mag = dt / (d2 * d2.sqrt());
let massj_mag = bj.mass * mag;
bi.vx -= dx * massj_mag;
bi.vy -= dy * massj_mag;
bi.vz -= dz * massj_mag;
let massi_mag = bi.mass * mag;
bj.vx += dx * massi_mag;
bj.vy += dy * massi_mag;
bj.vz += dz * massi_mag;
}
bi.x += dt * bi.vx;
bi.y += dt * bi.vy;
bi.z += dt * bi.vz;
}
}
}
fn energy(bodies: &[Planet;N_BODIES]) -> f64 {
let mut e = 0.0;
let mut bodies = bodies.iter();
loop {
let bi = match bodies.next() {
Some(bi) => bi,
None => break
};
e += (bi.vx * bi.vx + bi.vy * bi.vy + bi.vz * bi.vz) * bi.mass / 2.0;
for bj in bodies.clone() {
let dx = bi.x - bj.x;
let dy = bi.y - bj.y;
let dz = bi.z - bj.z;
let dist = (dx * dx + dy * dy + dz * dz).sqrt();
e -= bi.mass * bj.mass / dist;
}
}
e
}
fn offset_momentum(bodies: &mut [Planet;N_BODIES]) {
let mut px = 0.0;
let mut py = 0.0;
let mut pz = 0.0;
for bi in bodies.iter() {
px += bi.vx * bi.mass;
py += bi.vy * bi.mass;
pz += bi.vz * bi.mass;
}
let sun = &mut bodies[0];
sun.vx = - px / SOLAR_MASS;
sun.vy = - py / SOLAR_MASS;
sun.vz = - pz / SOLAR_MASS;
}
fn main() {
let n = if std::os::getenv("RUST_BENCH").is_some() {
5000000
} else {
std::os::args().as_slice().get(1)
.and_then(|arg| arg.parse())
.unwrap_or(1000)
};
let mut bodies = BODIES;
offset_momentum(&mut bodies);
println!("{:.9}", energy(&bodies));
advance(&mut bodies, 0.01, n);
println!("{:.9}", energy(&bodies));
}
/// Pop a mutable reference off the head of a slice, mutating the slice to no
/// longer contain the mutable reference. This is a safe operation because the
/// two mutable borrows are entirely disjoint.
fn shift_mut_ref<'a, T>(r: &mut &'a mut [T]) -> Option<&'a mut T> {
use std::mem;
use std::raw::Repr;
if r.len() == 0 { return None }
unsafe {
let mut raw = r.repr();
let ret = raw.data as *mut T;
raw.data = raw.data.offset(1);
raw.len -= 1;
*r = mem::transmute(raw);
Some(unsafe { &mut *ret })
}
}
| advance |
app.module.ts | import {NgModule} from '@angular/core';
import {FormsModule, ReactiveFormsModule} from '@angular/forms';
import {HttpClientModule} from '@angular/common/http';
import {
MatButtonModule,
MatDialogModule,
MatDividerModule,
MatInputModule,
MatPaginatorModule,
MatSelectModule,
MatSortModule,
MatTableModule,
} from '@angular/material';
import {BrowserModule} from '@angular/platform-browser';
import {BrowserAnimationsModule} from '@angular/platform-browser/animations';
import {AppRoutingModule} from './app-routing.module';
import {AppComponent} from './app.component';
import {JournalBackendMockService} from './core/journal-backend-mock.service';
import {JournalComponent} from './journals/journal.component';
import {QuestionJournalDialogComponent} from './journals/question-journal/question-journal-dialog/question-journal-dialog.component';
import {QuestionJournalComponent} from './journals/question-journal/question-journal.component';
import {JournalMainService} from './core/journal-main.service';
import {QuestionJournalMainService} from './journals/question-journal/core/question-journal-main.service';
import {QuestionJournalBackendMockService} from './journals/question-journal/core/question-journal-backend-mock.service';
@NgModule({
declarations: [
AppComponent,
QuestionJournalComponent,
QuestionJournalDialogComponent,
JournalComponent,
],
imports: [
BrowserAnimationsModule,
HttpClientModule, | MatDividerModule,
MatTableModule,
MatButtonModule,
MatPaginatorModule,
MatSortModule,
MatInputModule,
MatSelectModule,
ReactiveFormsModule,
BrowserModule,
AppRoutingModule,
FormsModule,
MatDialogModule,
],
entryComponents: [QuestionJournalDialogComponent],
providers: [
JournalBackendMockService,
JournalMainService,
QuestionJournalMainService,
QuestionJournalBackendMockService,
],
bootstrap: [AppComponent],
})
export class AppModule {
} | |
utils.py | from __future__ import absolute_import, division
from tensorflow.python import debug as tf_debug
import keras.backend as K
def keras_set_tf_debug():
sess = K.get_session() | sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
K.set_session(sess) |
|
sort_test.go | package mergesort_test
import (
"github.com/droxer/mergesort"
"math/rand"
"reflect"
"testing"
"time"
)
func init() {
seed := time.Now().Unix()
rand.Seed(seed)
}
func perm(n int) (out []int) {
for _, v := range rand.Perm(n) {
out = append(out, v)
}
return
}
| orig := []int{38, 27, 43, 3, 9, 82, 10}
mergesort.Sort(orig)
if !reflect.DeepEqual(orig, expected) {
t.Fatalf("expected %v, actual is %v", expected, orig)
}
}
func BenchmarkMSort100(b *testing.B) {
benchmarkMSort(100, b)
}
func BenchmarkMSort1000(b *testing.B) {
benchmarkMSort(1000, b)
}
func BenchmarkMSort10000(b *testing.B) {
benchmarkMSort(10000, b)
}
func benchmarkMSort(i int, b *testing.B) {
for j := 0; j < b.N; j++ {
b.StopTimer()
values := perm(i)
b.StartTimer()
mergesort.Sort(values)
}
} | func TestMSort(t *testing.T) {
expected := []int{3, 9, 10, 27, 38, 43, 82} |
25.8b2dccd8a7e1f91a5040.js | webpackJsonp([25],{"SG+P":function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0});var r,i,o,a,s,u,c,d,f,l,g,h,p,m,v,b,k,C,_,y=function(){function e(e){var t=this;this._defaults=e,this._worker=null,this._idleCheckInterval=setInterval(function(){return t._checkIfIdle()},3e4),this._lastUsedTime=0,this._configChangeListener=this._defaults.onDidChange(function(){return t._stopWorker()})}return e.prototype._stopWorker=function(){this._worker&&(this._worker.dispose(),this._worker=null),this._client=null},e.prototype.dispose=function(){clearInterval(this._idleCheckInterval),this._configChangeListener.dispose(),this._stopWorker()},e.prototype._checkIfIdle=function(){this._worker&&(Date.now()-this._lastUsedTime>12e4&&this._stopWorker())},e.prototype._getClient=function(){return this._lastUsedTime=Date.now(),this._client||(this._worker=monaco.editor.createWebWorker({moduleId:"vs/language/json/jsonWorker",label:this._defaults.languageId,createData:{languageSettings:this._defaults.diagnosticsOptions,languageId:this._defaults.languageId,enableSchemaRequest:this._defaults.diagnosticsOptions.enableSchemaRequest}}),this._client=this._worker.getProxy()),this._client},e.prototype.getLanguageServiceWorker=function(){for(var e,t=this,n=[],r=0;r<arguments.length;r++)n[r]=arguments[r];return this._getClient().then(function(t){e=t}).then(function(e){return t._worker.withSyncedResources(n)}).then(function(t){return e})},e}();!function(e){e.create=function(e,t){return{line:e,character:t}},e.is=function(e){var t=e;return Q.objectLiteral(t)&&Q.number(t.line)&&Q.number(t.character)}}(r||(r={})),function(e){e.create=function(e,t,n,i){if(Q.number(e)&&Q.number(t)&&Q.number(n)&&Q.number(i))return{start:r.create(e,t),end:r.create(n,i)};if(r.is(e)&&r.is(t))return{start:e,end:t};throw new Error("Range#create called with invalid arguments["+e+", "+t+", "+n+", "+i+"]")},e.is=function(e){var t=e;return Q.objectLiteral(t)&&r.is(t.start)&&r.is(t.end)}}(i||(i={})),function(e){e.create=function(e,t){return{uri:e,range:t}},e.is=function(e){var t=e;return Q.defined(t)&&i.is(t.range)&&(Q.string(t.uri)||Q.undefined(t.uri))}}(o||(o={})),function(e){e.create=function(e,t,n,r){return{targetUri:e,targetRange:t,targetSelectionRange:n,originSelectionRange:r}},e.is=function(e){var t=e;return Q.defined(t)&&i.is(t.targetRange)&&Q.string(t.targetUri)&&(i.is(t.targetSelectionRange)||Q.undefined(t.targetSelectionRange))&&(i.is(t.originSelectionRange)||Q.undefined(t.originSelectionRange))}}(a||(a={})),function(e){e.create=function(e,t,n,r){return{red:e,green:t,blue:n,alpha:r}},e.is=function(e){var t=e;return Q.number(t.red)&&Q.number(t.green)&&Q.number(t.blue)&&Q.number(t.alpha)}}(s||(s={})),function(e){e.create=function(e,t){return{range:e,color:t}},e.is=function(e){var t=e;return i.is(t.range)&&s.is(t.color)}}(u||(u={})),function(e){e.create=function(e,t,n){return{label:e,textEdit:t,additionalTextEdits:n}},e.is=function(e){var t=e;return Q.string(t.label)&&(Q.undefined(t.textEdit)||m.is(t))&&(Q.undefined(t.additionalTextEdits)||Q.typedArray(t.additionalTextEdits,m.is))}}(c||(c={})),function(e){e.Comment="comment",e.Imports="imports",e.Region="region"}(d||(d={})),function(e){e.create=function(e,t,n,r,i){var o={startLine:e,endLine:t};return Q.defined(n)&&(o.startCharacter=n),Q.defined(r)&&(o.endCharacter=r),Q.defined(i)&&(o.kind=i),o},e.is=function(e){var t=e;return Q.number(t.startLine)&&Q.number(t.startLine)&&(Q.undefined(t.startCharacter)||Q.number(t.startCharacter))&&(Q.undefined(t.endCharacter)||Q.number(t.endCharacter))&&(Q.undefined(t.kind)||Q.string(t.kind))}}(f||(f={})),function(e){e.create=function(e,t){return{location:e,message:t}},e.is=function(e){var t=e;return Q.defined(t)&&o.is(t.location)&&Q.string(t.message)}}(l||(l={})),function(e){e.Error=1,e.Warning=2,e.Information=3,e.Hint=4}(g||(g={})),function(e){e.create=function(e,t,n,r,i,o){var a={range:e,message:t};return Q.defined(n)&&(a.severity=n),Q.defined(r)&&(a.code=r),Q.defined(i)&&(a.source=i),Q.defined(o)&&(a.relatedInformation=o),a},e.is=function(e){var t=e;return Q.defined(t)&&i.is(t.range)&&Q.string(t.message)&&(Q.number(t.severity)||Q.undefined(t.severity))&&(Q.number(t.code)||Q.string(t.code)||Q.undefined(t.code))&&(Q.string(t.source)||Q.undefined(t.source))&&(Q.undefined(t.relatedInformation)||Q.typedArray(t.relatedInformation,l.is))}}(h||(h={})),function(e){e.create=function(e,t){for(var n=[],r=2;r<arguments.length;r++)n[r-2]=arguments[r];var i={title:e,command:t};return Q.defined(n)&&n.length>0&&(i.arguments=n),i},e.is=function(e){var t=e;return Q.defined(t)&&Q.string(t.title)&&Q.string(t.command)}}(p||(p={})),function(e){e.replace=function(e,t){return{range:e,newText:t}},e.insert=function(e,t){return{range:{start:e,end:e},newText:t}},e.del=function(e){return{range:e,newText:""}},e.is=function(e){var t=e;return Q.objectLiteral(t)&&Q.string(t.newText)&&i.is(t.range)}}(m||(m={})),function(e){e.create=function(e,t){return{textDocument:e,edits:t}},e.is=function(e){var t=e;return Q.defined(t)&&E.is(t.textDocument)&&Array.isArray(t.edits)}}(v||(v={})),function(e){e.create=function(e,t){var n={kind:"create",uri:e};return void 0===t||void 0===t.overwrite&&void 0===t.ignoreIfExists||(n.options=t),n},e.is=function(e){var t=e;return t&&"create"===t.kind&&Q.string(t.uri)&&(void 0===t.options||(void 0===t.options.overwrite||Q.boolean(t.options.overwrite))&&(void 0===t.options.ignoreIfExists||Q.boolean(t.options.ignoreIfExists)))}}(b||(b={})),function(e){e.create=function(e,t,n){var r={kind:"rename",oldUri:e,newUri:t};return void 0===n||void 0===n.overwrite&&void 0===n.ignoreIfExists||(r.options=n),r},e.is=function(e){var t=e;return t&&"rename"===t.kind&&Q.string(t.oldUri)&&Q.string(t.newUri)&&(void 0===t.options||(void 0===t.options.overwrite||Q.boolean(t.options.overwrite))&&(void 0===t.options.ignoreIfExists||Q.boolean(t.options.ignoreIfExists)))}}(k||(k={})),function(e){e.create=function(e,t){var n={kind:"delete",uri:e};return void 0===t||void 0===t.recursive&&void 0===t.ignoreIfNotExists||(n.options=t),n},e.is=function(e){var t=e;return t&&"delete"===t.kind&&Q.string(t.uri)&&(void 0===t.options||(void 0===t.options.recursive||Q.boolean(t.options.recursive))&&(void 0===t.options.ignoreIfNotExists||Q.boolean(t.options.ignoreIfNotExists)))}}(C||(C={})),function(e){e.is=function(e){var t=e;return t&&(void 0!==t.changes||void 0!==t.documentChanges)&&(void 0===t.documentChanges||t.documentChanges.every(function(e){return Q.string(e.kind)?b.is(e)||k.is(e)||C.is(e):v.is(e)}))}}(_||(_={}));var w,E,x,S,I,A,T,M,P,R,F,j,D,L,O,W,N,U=function(){function e(e){this.edits=e}return e.prototype.insert=function(e,t){this.edits.push(m.insert(e,t))},e.prototype.replace=function(e,t){this.edits.push(m.replace(e,t))},e.prototype.delete=function(e){this.edits.push(m.del(e))},e.prototype.add=function(e){this.edits.push(e)},e.prototype.all=function(){return this.edits},e.prototype.clear=function(){this.edits.splice(0,this.edits.length)},e}();!function(){function e(e){var t=this;this._textEditChanges=Object.create(null),e&&(this._workspaceEdit=e,e.documentChanges?e.documentChanges.forEach(function(e){if(v.is(e)){var n=new U(e.edits);t._textEditChanges[e.textDocument.uri]=n}}):e.changes&&Object.keys(e.changes).forEach(function(n){var r=new U(e.changes[n]);t._textEditChanges[n]=r}))}Object.defineProperty(e.prototype,"edit",{get:function(){return this._workspaceEdit},enumerable:!0,configurable:!0}),e.prototype.getTextEditChange=function(e){if(E.is(e)){if(this._workspaceEdit||(this._workspaceEdit={documentChanges:[]}),!this._workspaceEdit.documentChanges)throw new Error("Workspace edit is not configured for document changes.");var t=e;if(!(r=this._textEditChanges[t.uri])){var n={textDocument:t,edits:i=[]};this._workspaceEdit.documentChanges.push(n),r=new U(i),this._textEditChanges[t.uri]=r}return r}if(this._workspaceEdit||(this._workspaceEdit={changes:Object.create(null)}),!this._workspaceEdit.changes)throw new Error("Workspace edit is not configured for normal text edit changes.");var r;if(!(r=this._textEditChanges[e])){var i=[];this._workspaceEdit.changes[e]=i,r=new U(i),this._textEditChanges[e]=r}return r},e.prototype.createFile=function(e,t){this.checkDocumentChanges(),this._workspaceEdit.documentChanges.push(b.create(e,t))},e.prototype.renameFile=function(e,t,n){this.checkDocumentChanges(),this._workspaceEdit.documentChanges.push(k.create(e,t,n))},e.prototype.deleteFile=function(e,t){this.checkDocumentChanges(),this._workspaceEdit.documentChanges.push(C.create(e,t))},e.prototype.checkDocumentChanges=function(){if(!this._workspaceEdit||!this._workspaceEdit.documentChanges)throw new Error("Workspace edit is not configured for document changes.")}}();!function(e){e.create=function(e){return{uri:e}},e.is=function(e){var t=e;return Q.defined(t)&&Q.string(t.uri)}}(w||(w={})),function(e){e.create=function(e,t){return{uri:e,version:t}},e.is=function(e){var t=e;return Q.defined(t)&&Q.string(t.uri)&&(null===t.version||Q.number(t.version))}}(E||(E={})),function(e){e.create=function(e,t,n,r){return{uri:e,languageId:t,version:n,text:r}},e.is=function(e){var t=e;return Q.defined(t)&&Q.string(t.uri)&&Q.string(t.languageId)&&Q.number(t.version)&&Q.string(t.text)}}(x||(x={})),function(e){e.PlainText="plaintext",e.Markdown="markdown"}(S||(S={})),function(e){e.is=function(t){var n=t;return n===e.PlainText||n===e.Markdown}}(S||(S={})),function(e){e.is=function(e){var t=e;return Q.objectLiteral(e)&&S.is(t.kind)&&Q.string(t.value)}}(I||(I={})),function(e){e.Text=1,e.Method=2,e.Function=3,e.Constructor=4,e.Field=5,e.Variable=6,e.Class=7,e.Interface=8,e.Module=9,e.Property=10,e.Unit=11,e.Value=12,e.Enum=13,e.Keyword=14,e.Snippet=15,e.Color=16,e.File=17,e.Reference=18,e.Folder=19,e.EnumMember=20,e.Constant=21,e.Struct=22,e.Event=23,e.Operator=24,e.TypeParameter=25}(A||(A={})),function(e){e.PlainText=1,e.Snippet=2}(T||(T={})),function(e){e.create=function(e){return{label:e}}}(M||(M={})),function(e){e.create=function(e,t){return{items:e||[],isIncomplete:!!t}}}(P||(P={})),function(e){e.fromPlainText=function(e){return e.replace(/[\\`*_{}[\]()#+\-.!]/g,"\\$&")},e.is=function(e){var t=e;return Q.string(t)||Q.objectLiteral(t)&&Q.string(t.language)&&Q.string(t.value)}}(R||(R={})),function(e){e.is=function(e){var t=e;return!!t&&Q.objectLiteral(t)&&(I.is(t.contents)||R.is(t.contents)||Q.typedArray(t.contents,R.is))&&(void 0===e.range||i.is(e.range))}}(F||(F={})),function(e){e.create=function(e,t){return t?{label:e,documentation:t}:{label:e}}}(j||(j={})),function(e){e.create=function(e,t){for(var n=[],r=2;r<arguments.length;r++)n[r-2]=arguments[r];var i={label:e};return Q.defined(t)&&(i.documentation=t),Q.defined(n)?i.parameters=n:i.parameters=[],i}}(D||(D={})),function(e){e.Text=1,e.Read=2,e.Write=3}(L||(L={})),function(e){e.create=function(e,t){var n={range:e};return Q.number(t)&&(n.kind=t),n}}(O||(O={})),function(e){e.File=1,e.Module=2,e.Namespace=3,e.Package=4,e.Class=5,e.Method=6,e.Property=7,e.Field=8,e.Constructor=9,e.Enum=10,e.Interface=11,e.Function=12,e.Variable=13,e.Constant=14,e.String=15,e.Number=16,e.Boolean=17,e.Array=18,e.Object=19,e.Key=20,e.Null=21,e.EnumMember=22,e.Struct=23,e.Event=24,e.Operator=25,e.TypeParameter=26}(W||(W={})),function(e){e.create=function(e,t,n,r,i){var o={name:e,kind:t,location:{uri:r,range:n}};return i&&(o.containerName=i),o}}(N||(N={}));var V,K,z,H,q,B=function(){return function(){}}();!function(e){e.create=function(e,t,n,r,i,o){var a={name:e,detail:t,kind:n,range:r,selectionRange:i};return void 0!==o&&(a.children=o),a},e.is=function(e){var t=e;return t&&Q.string(t.name)&&Q.number(t.kind)&&i.is(t.range)&&i.is(t.selectionRange)&&(void 0===t.detail||Q.string(t.detail))&&(void 0===t.deprecated||Q.boolean(t.deprecated))&&(void 0===t.children||Array.isArray(t.children))}}(B||(B={})),function(e){e.QuickFix="quickfix",e.Refactor="refactor",e.RefactorExtract="refactor.extract",e.RefactorInline="refactor.inline",e.RefactorRewrite="refactor.rewrite",e.Source="source",e.SourceOrganizeImports="source.organizeImports"}(V||(V={})),function(e){e.create=function(e,t){var n={diagnostics:e};return void 0!==t&&null!==t&&(n.only=t),n},e.is=function(e){var t=e;return Q.defined(t)&&Q.typedArray(t.diagnostics,h.is)&&(void 0===t.only||Q.typedArray(t.only,Q.string))}}(K||(K={})),function(e){e.create=function(e,t,n){var r={title:e};return p.is(t)?r.command=t:r.edit=t,void 0!==n&&(r.kind=n),r},e.is=function(e){var t=e;return t&&Q.string(t.title)&&(void 0===t.diagnostics||Q.typedArray(t.diagnostics,h.is))&&(void 0===t.kind||Q.string(t.kind))&&(void 0!==t.edit||void 0!==t.command)&&(void 0===t.command||p.is(t.command))&&(void 0===t.edit||_.is(t.edit))}}(z||(z={})),function(e){e.create=function(e,t){var n={range:e};return Q.defined(t)&&(n.data=t),n},e.is=function(e){var t=e;return Q.defined(t)&&i.is(t.range)&&(Q.undefined(t.command)||p.is(t.command))}}(H||(H={})),function(e){e.create=function(e,t){return{tabSize:e,insertSpaces:t}},e.is=function(e){var t=e;return Q.defined(t)&&Q.number(t.tabSize)&&Q.boolean(t.insertSpaces)}}(q||(q={}));var $=function(){return function(){}}();!function(e){e.create=function(e,t,n){return{range:e,target:t,data:n}},e.is=function(e){var t=e;return Q.defined(t)&&i.is(t.range)&&(Q.undefined(t.target)||Q.string(t.target))}}($||($={}));var G,J;!function(e){e.create=function(e,t,n,r){return new X(e,t,n,r)},e.is=function(e){var t=e;return!!(Q.defined(t)&&Q.string(t.uri)&&(Q.undefined(t.languageId)||Q.string(t.languageId))&&Q.number(t.lineCount)&&Q.func(t.getText)&&Q.func(t.positionAt)&&Q.func(t.offsetAt))},e.applyEdits=function(e,t){for(var n=e.getText(),r=function e(t,n){if(t.length<=1)return t;var r=t.length/2|0,i=t.slice(0,r),o=t.slice(r);e(i,n),e(o,n);for(var a=0,s=0,u=0;a<i.length&&s<o.length;){var c=n(i[a],o[s]);t[u++]=c<=0?i[a++]:o[s++]}for(;a<i.length;)t[u++]=i[a++];for(;s<o.length;)t[u++]=o[s++];return t}(t,function(e,t){var n=e.range.start.line-t.range.start.line;return 0===n?e.range.start.character-t.range.start.character:n}),i=n.length,o=r.length-1;o>=0;o--){var a=r[o],s=e.offsetAt(a.range.start),u=e.offsetAt(a.range.end);if(!(u<=i))throw new Error("Overlapping edit");n=n.substring(0,s)+a.newText+n.substring(u,n.length),i=s}return n}}(G||(G={})),function(e){e.Manual=1,e.AfterDelay=2,e.FocusOut=3}(J||(J={}));var Q,X=function(){function e(e,t,n,r){this._uri=e,this._languageId=t,this._version=n,this._content=r,this._lineOffsets=null}return Object.defineProperty(e.prototype,"uri",{get:function(){return this._uri},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"languageId",{get:function(){return this._languageId},enumerable:!0,configurable:!0}),Object.defineProperty(e.prototype,"version",{get:function(){return this._version},enumerable:!0,configurable:!0}),e.prototype.getText=function(e){if(e){var t=this.offsetAt(e.start),n=this.offsetAt(e.end);return this._content.substring(t,n)}return this._content},e.prototype.update=function(e,t){this._content=e.text,this._version=t,this._lineOffsets=null},e.prototype.getLineOffsets=function(){if(null===this._lineOffsets){for(var e=[],t=this._content,n=!0,r=0;r<t.length;r++){n&&(e.push(r),n=!1);var i=t.charAt(r);n="\r"===i||"\n"===i,"\r"===i&&r+1<t.length&&"\n"===t.charAt(r+1)&&r++}n&&t.length>0&&e.push(t.length),this._lineOffsets=e}return this._lineOffsets},e.prototype.positionAt=function(e){e=Math.max(Math.min(e,this._content.length),0);var t=this.getLineOffsets(),n=0,i=t.length;if(0===i)return r.create(0,e);for(;n<i;){var o=Math.floor((n+i)/2);t[o]>e?i=o:n=o+1}var a=n-1;return r.create(a,e-t[a])},e.prototype.offsetAt=function(e){var t=this.getLineOffsets();if(e.line>=t.length)return this._content.length;if(e.line<0)return 0;var n=t[e.line],r=e.line+1<t.length?t[e.line+1]:this._content.length;return Math.max(Math.min(n+e.character,r),n)},Object.defineProperty(e.prototype,"lineCount",{get:function(){return this.getLineOffsets().length},enumerable:!0,configurable:!0}),e}();!function(e){var t=Object.prototype.toString;e.defined=function(e){return void 0!==e},e.undefined=function(e){return void 0===e},e.boolean=function(e){return!0===e||!1===e},e.string=function(e){return"[object String]"===t.call(e)},e.number=function(e){return"[object Number]"===t.call(e)},e.func=function(e){return"[object Function]"===t.call(e)},e.objectLiteral=function(e){return null!==e&&"object"==typeof e},e.typedArray=function(e,t){return Array.isArray(e)&&e.every(t)}}(Q||(Q={}));monaco.Uri;var Y=monaco.Range,Z=function(){function e(e,t,n){var r=this;this._languageId=e,this._worker=t,this._disposables=[],this._listener=Object.create(null);var i=function(e){var t,n=e.getModeId();n===r._languageId&&(r._listener[e.uri.toString()]=e.onDidChangeContent(function(){clearTimeout(t),t=setTimeout(function(){return r._doValidate(e.uri,n)},500)}),r._doValidate(e.uri,n))},o=function(e){monaco.editor.setModelMarkers(e,r._languageId,[]);var t=e.uri.toString(),n=r._listener[t];n&&(n.dispose(),delete r._listener[t])};this._disposables.push(monaco.editor.onDidCreateModel(i)),this._disposables.push(monaco.editor.onWillDisposeModel(function(e){o(e),r._resetSchema(e.uri)})),this._disposables.push(monaco.editor.onDidChangeModelLanguage(function(e){o(e.model),i(e.model),r._resetSchema(e.model.uri)})),this._disposables.push(n.onDidChange(function(e){monaco.editor.getModels().forEach(function(e){e.getModeId()===r._languageId&&(o(e),i(e))})})),this._disposables.push({dispose:function(){for(var e in monaco.editor.getModels().forEach(o),r._listener)r._listener[e].dispose()}}),monaco.editor.getModels().forEach(i)}return e.prototype.dispose=function(){this._disposables.forEach(function(e){return e&&e.dispose()}),this._disposables=[]},e.prototype._resetSchema=function(e){this._worker().then(function(t){t.resetSchema(e.toString())})},e.prototype._doValidate=function(e,t){this._worker(e).then(function(n){return n.doValidation(e.toString()).then(function(n){var r=n.map(function(e){return n="number"==typeof(t=e).code?String(t.code):t.code,{severity:function(e){switch(e){case g.Error:return monaco.MarkerSeverity.Error;case g.Warning:return monaco.MarkerSeverity.Warning;case g.Information:return monaco.MarkerSeverity.Info;case g.Hint:return monaco.MarkerSeverity.Hint;default:return monaco.MarkerSeverity.Info}}(t.severity),startLineNumber:t.range.start.line+1,startColumn:t.range.start.character+1,endLineNumber:t.range.end.line+1,endColumn:t.range.end.character+1,message:t.message,code:n,source:t.source};var t,n}),i=monaco.editor.getModel(e);i&&i.getModeId()===t&&monaco.editor.setModelMarkers(i,t,r)})}).then(void 0,function(e){console.error(e)})},e}();function ee(e){if(e)return{character:e.column-1,line:e.lineNumber-1}}function te(e){if(e)return{start:{line:e.startLineNumber-1,character:e.startColumn-1},end:{line:e.endLineNumber-1,character:e.endColumn-1}}}function ne(e){if(e)return new Y(e.start.line+1,e.start.character+1,e.end.line+1,e.end.character+1)}function re(e){if(e)return{range:ne(e.range),text:e.newText}}var ie=function(){function e(e){this._worker=e}return Object.defineProperty(e.prototype,"triggerCharacters",{get:function(){return[" ",":"]},enumerable:!0,configurable:!0}),e.prototype.provideCompletionItems=function(e,t,n,r){var i=e.uri;return this._worker(i).then(function(e){return e.doComplete(i.toString(),ee(t))}).then(function(n){if(n){var r=e.getWordUntilPosition(t),i=new Y(t.lineNumber,r.startColumn,t.lineNumber,r.endColumn),o=n.items.map(function(e){var t={label:e.label,insertText:e.insertText||e.label,sortText:e.sortText,filterText:e.filterText,documentation:e.documentation,detail:e.detail,range:i,kind:function(e){var t=monaco.languages.CompletionItemKind;switch(e){case A.Text:return t.Text;case A.Method:return t.Method;case A.Function:return t.Function;case A.Constructor:return t.Constructor;case A.Field:return t.Field;case A.Variable:return t.Variable;case A.Class:return t.Class;case A.Interface:return t.Interface;case A.Module:return t.Module;case A.Property:return t.Property;case A.Unit:return t.Unit;case A.Value:return t.Value;case A.Enum:return t.Enum;case A.Keyword:return t.Keyword;case A.Snippet:return t.Snippet;case A.Color:return t.Color;case A.File:return t.File;case A.Reference:return t.Reference}return t.Property}(e.kind)};return e.textEdit&&(t.range=ne(e.textEdit.range),t.insertText=e.textEdit.newText),e.additionalTextEdits&&(t.additionalTextEdits=e.additionalTextEdits.map(re)),e.insertTextFormat===T.Snippet&&(t.insertTextRules=monaco.languages.CompletionItemInsertTextRule.InsertAsSnippet),t});return{isIncomplete:n.isIncomplete,suggestions:o}}})},e}();function oe(e){return"string"==typeof e?{value:e}:(t=e)&&"object"==typeof t&&"string"==typeof t.kind?"plaintext"===e.kind?{value:e.value.replace(/[\\`*_{}[\]()#+\-.!]/g,"\\$&")}:{value:e.value}:{value:"```"+e.language+"\n"+e.value+"\n```\n"};var t}var ae=function(){function e(e){this._worker=e}return e.prototype.provideHover=function(e,t,n){var r=e.uri;return this._worker(r).then(function(e){return e.doHover(r.toString(),ee(t))}).then(function(e){if(e)return{range:ne(e.range),contents:function(e){if(e)return Array.isArray(e)?e.map(oe):[oe(e)]}(e.contents)}})},e}();var se=function(){function e(e){this._worker=e}return e.prototype.provideDocumentSymbols=function(e,t){var n=e.uri;return this._worker(n).then(function(e){return e.findDocumentSymbols(n.toString())}).then(function(e){if(e)return e.map(function(e){return{name:e.name,detail:"",containerName:e.containerName,kind:function(e){var t=monaco.languages.SymbolKind;switch(e){case W.File:return t.Array;case W.Module:return t.Module;case W.Namespace:return t.Namespace;case W.Package:return t.Package;case W.Class:return t.Class;case W.Method:return t.Method;case W.Property:return t.Property;case W.Field:return t.Field;case W.Constructor:return t.Constructor;case W.Enum:return t.Enum;case W.Interface:return t.Interface;case W.Function:return t.Function;case W.Variable:return t.Variable;case W.Constant:return t.Constant;case W.String:return t.String;case W.Number:return t.Number;case W.Boolean:return t.Boolean;case W.Array:return t.Array}return t.Function}(e.kind),range:ne(e.location.range),selectionRange:ne(e.location.range),tags:[]}})})},e}();function ue(e){return{tabSize:e.tabSize,insertSpaces:e.insertSpaces}}var ce,de=function(){function e(e){this._worker=e}return e.prototype.provideDocumentFormattingEdits=function(e,t,n){var r=e.uri;return this._worker(r).then(function(e){return e.format(r.toString(),null,ue(t)).then(function(e){if(e&&0!==e.length)return e.map(re)})})},e}(),fe=function(){function e(e){this._worker=e}return e.prototype.provideDocumentRangeFormattingEdits=function(e,t,n,r){var i=e.uri;return this._worker(i).then(function(e){return e.format(i.toString(),te(t),ue(n)).then(function(e){if(e&&0!==e.length)return e.map(re)})})},e}(),le=function(){function e(e){this._worker=e}return e.prototype.provideDocumentColors=function(e,t){var n=e.uri;return this._worker(n).then(function(e){return e.findDocumentColors(n.toString())}).then(function(e){if(e)return e.map(function(e){return{color:e.color,range:ne(e.range)}})})},e.prototype.provideColorPresentations=function(e,t,n){var r=e.uri;return this._worker(r).then(function(e){return e.getColorPresentations(r.toString(),t.color,te(t.range))}).then(function(e){if(e)return e.map(function(e){var t={label:e.label};return e.textEdit&&(t.textEdit=re(e.textEdit)),e.additionalTextEdits&&(t.additionalTextEdits=e.additionalTextEdits.map(re)),t})})},e}(),ge=function(){function e(e){this._worker=e}return e.prototype.provideFoldingRanges=function(e,t,n){var r=e.uri;return this._worker(r).then(function(e){return e.provideFoldingRanges(r.toString(),t)}).then(function(e){if(e)return e.map(function(e){var t={start:e.startLine+1,end:e.endLine+1};return void 0!==e.kind&&(t.kind=function(e){switch(e){case d.Comment:return monaco.languages.FoldingRangeKind.Comment;case d.Imports:return monaco.languages.FoldingRangeKind.Imports;case d.Region:return monaco.languages.FoldingRangeKind.Region}return}(e.kind)),t})})},e}();function he(e,t){void 0===t&&(t=!1);var n=0,r=e.length,i="",o=0,a=16,s=0,u=0,c=0,d=0,f=0;function | (t,r){for(var i=0,o=0;i<t||!r;){var a=e.charCodeAt(n);if(a>=48&&a<=57)o=16*o+a-48;else if(a>=65&&a<=70)o=16*o+a-65+10;else{if(!(a>=97&&a<=102))break;o=16*o+a-97+10}n++,i++}return i<t&&(o=-1),o}function g(){if(i="",f=0,o=n,u=s,d=c,n>=r)return o=r,a=17;var t=e.charCodeAt(n);if(pe(t)){do{n++,i+=String.fromCharCode(t),t=e.charCodeAt(n)}while(pe(t));return a=15}if(me(t))return n++,i+=String.fromCharCode(t),13===t&&10===e.charCodeAt(n)&&(n++,i+="\n"),s++,c=n,a=14;switch(t){case 123:return n++,a=1;case 125:return n++,a=2;case 91:return n++,a=3;case 93:return n++,a=4;case 58:return n++,a=6;case 44:return n++,a=5;case 34:return n++,i=function(){for(var t="",i=n;;){if(n>=r){t+=e.substring(i,n),f=2;break}var o=e.charCodeAt(n);if(34===o){t+=e.substring(i,n),n++;break}if(92!==o){if(o>=0&&o<=31){if(me(o)){t+=e.substring(i,n),f=2;break}f=6}n++}else{if(t+=e.substring(i,n),++n>=r){f=2;break}switch(o=e.charCodeAt(n++)){case 34:t+='"';break;case 92:t+="\\";break;case 47:t+="/";break;case 98:t+="\b";break;case 102:t+="\f";break;case 110:t+="\n";break;case 114:t+="\r";break;case 116:t+="\t";break;case 117:var a=l(4,!0);a>=0?t+=String.fromCharCode(a):f=4;break;default:f=5}i=n}}return t}(),a=10;case 47:var g=n-1;if(47===e.charCodeAt(n+1)){for(n+=2;n<r&&!me(e.charCodeAt(n));)n++;return i=e.substring(g,n),a=12}if(42===e.charCodeAt(n+1)){n+=2;for(var p=r-1,m=!1;n<p;){var v=e.charCodeAt(n);if(42===v&&47===e.charCodeAt(n+1)){n+=2,m=!0;break}n++,me(v)&&(13===v&&10===e.charCodeAt(n)&&n++,s++,c=n)}return m||(n++,f=1),i=e.substring(g,n),a=13}return i+=String.fromCharCode(t),n++,a=16;case 45:if(i+=String.fromCharCode(t),++n===r||!ve(e.charCodeAt(n)))return a=16;case 48:case 49:case 50:case 51:case 52:case 53:case 54:case 55:case 56:case 57:return i+=function(){var t=n;if(48===e.charCodeAt(n))n++;else for(n++;n<e.length&&ve(e.charCodeAt(n));)n++;if(n<e.length&&46===e.charCodeAt(n)){if(!(++n<e.length&&ve(e.charCodeAt(n))))return f=3,e.substring(t,n);for(n++;n<e.length&&ve(e.charCodeAt(n));)n++}var r=n;if(n<e.length&&(69===e.charCodeAt(n)||101===e.charCodeAt(n)))if((++n<e.length&&43===e.charCodeAt(n)||45===e.charCodeAt(n))&&n++,n<e.length&&ve(e.charCodeAt(n))){for(n++;n<e.length&&ve(e.charCodeAt(n));)n++;r=n}else f=3;return e.substring(t,r)}(),a=11;default:for(;n<r&&h(t);)n++,t=e.charCodeAt(n);if(o!==n){switch(i=e.substring(o,n)){case"true":return a=8;case"false":return a=9;case"null":return a=7}return a=16}return i+=String.fromCharCode(t),n++,a=16}}function h(e){if(pe(e)||me(e))return!1;switch(e){case 125:case 93:case 123:case 91:case 34:case 58:case 44:case 47:return!1}return!0}return{setPosition:function(e){n=e,i="",o=0,a=16,f=0},getPosition:function(){return n},scan:t?function(){var e;do{e=g()}while(e>=12&&e<=15);return e}:g,getToken:function(){return a},getTokenValue:function(){return i},getTokenOffset:function(){return o},getTokenLength:function(){return n-o},getTokenStartLine:function(){return u},getTokenStartCharacter:function(){return o-d},getTokenError:function(){return f}}}function pe(e){return 32===e||9===e||11===e||12===e||160===e||5760===e||e>=8192&&e<=8203||8239===e||8287===e||12288===e||65279===e}function me(e){return 10===e||13===e||8232===e||8233===e}function ve(e){return e>=48&&e<=57}!function(e){e.DEFAULT={allowTrailingComma:!1}}(ce||(ce={}));var be=he;function ke(e){return{getInitialState:function(){return new Pe(null,null,!1)},tokenize:function(t,n,r,i){return function(e,t,n,r,i){void 0===r&&(r=0);var o=0,a=!1;switch(n.scanError){case 2:t='"'+t,o=1;break;case 1:t="/*"+t,o=2}var s,u,c=be(t),d=n.lastWasColon;u={tokens:[],endState:n.clone()};for(;;){var f=r+c.getPosition(),l="";if(17===(s=c.scan()))break;if(f===r+c.getPosition())throw new Error("Scanner did not advance, next 3 characters are: "+t.substr(c.getPosition(),3));switch(a&&(f-=o),a=o>0,s){case 1:case 2:l=Ce,d=!1;break;case 3:case 4:l=_e,d=!1;break;case 6:l=ye,d=!0;break;case 5:l=we,d=!1;break;case 8:case 9:l=Ee,d=!1;break;case 7:l=xe,d=!1;break;case 10:l=d?Se:Ae,d=!1;break;case 11:l=Ie,d=!1}if(e)switch(s){case 12:l=Me;break;case 13:l=Te}u.endState=new Pe(n.getStateData(),c.getTokenError(),d),u.tokens.push({startIndex:f,scopes:l})}return u}(e,t,n,r)}}}var Ce="delimiter.bracket.json",_e="delimiter.array.json",ye="delimiter.colon.json",we="delimiter.comma.json",Ee="keyword.json",xe="keyword.json",Se="string.value.json",Ie="number.json",Ae="string.key.json",Te="comment.block.json",Me="comment.line.json",Pe=function(){function e(e,t,n){this._state=e,this.scanError=t,this.lastWasColon=n}return e.prototype.clone=function(){return new e(this._state,this.scanError,this.lastWasColon)},e.prototype.equals=function(t){return t===this||!!(t&&t instanceof e)&&(this.scanError===t.scanError&&this.lastWasColon===t.lastWasColon)},e.prototype.getStateData=function(){return this._state},e.prototype.setStateData=function(e){this._state=e},e}();function Re(e){return{dispose:function(){return Fe(e)}}}function Fe(e){for(;e.length;)e.pop().dispose()}t.setupMode=function(e){var t=[],n=[],r=new y(e);t.push(r);var i=function(){for(var e=[],t=0;t<arguments.length;t++)e[t]=arguments[t];return r.getLanguageServiceWorker.apply(r,e)};function o(){var t=e.languageId,r=e.modeConfiguration;Fe(n),r.documentFormattingEdits&&n.push(monaco.languages.registerDocumentFormattingEditProvider(t,new de(i))),r.documentRangeFormattingEdits&&n.push(monaco.languages.registerDocumentRangeFormattingEditProvider(t,new fe(i))),r.completionItems&&n.push(monaco.languages.registerCompletionItemProvider(t,new ie(i))),r.hovers&&n.push(monaco.languages.registerHoverProvider(t,new ae(i))),r.documentSymbols&&n.push(monaco.languages.registerDocumentSymbolProvider(t,new se(i))),r.tokens&&n.push(monaco.languages.setTokensProvider(t,ke(!0))),r.colors&&n.push(monaco.languages.registerColorProvider(t,new le(i))),r.foldingRanges&&n.push(monaco.languages.registerFoldingRangeProvider(t,new ge(i))),r.diagnostics&&n.push(new Z(t,i,e))}o(),t.push(monaco.languages.setLanguageConfiguration(e.languageId,je));var a=e.modeConfiguration;return e.onDidChange(function(e){e.modeConfiguration!==a&&(a=e.modeConfiguration,o())}),t.push(Re(n)),Re(t)};var je={wordPattern:/(-?\d*\.\d\w*)|([^\[\{\]\}\:\"\,\s]+)/g,comments:{lineComment:"//",blockComment:["/*","*/"]},brackets:[["{","}"],["[","]"]],autoClosingPairs:[{open:"{",close:"}",notIn:["string"]},{open:"[",close:"]",notIn:["string"]},{open:'"',close:'"',notIn:["string"]}]}}}); | l |
routes.go | package routes
import (
"github.com/gin-gonic/gin"
"app/controllers/users"
"app/controllers/welcome"
"app/middlewares/cors"
"app/middlewares/example"
)
func InitRoutes(router gin.Engine) | {
//use middleware
router.Use(cors.CORSMiddleware())
//root route
router.GET("/", welcome.Welcome)
router.GET("/users", users.FetchUsers)
//route group
v1 := router.Group("/api/v1").Use(example.Example())
{
v1.GET("/", welcome.Hello)
}
} |
|
block.rs | // Copyright ยฉ 2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::cell::RefCell;
#[cfg(not(test))]
use crate::virtio::Error as VirtioError;
#[cfg(not(test))]
use crate::virtio::VirtioTransport;
#[cfg(not(test))]
const QUEUE_SIZE: usize = 16;
#[repr(C)]
#[repr(align(16))]
#[derive(Default)]
#[cfg(not(test))]
/// A virtio qeueue entry descriptor
struct Desc {
addr: u64,
length: u32,
flags: u16,
next: u16,
}
#[repr(C)]
#[repr(align(2))]
#[derive(Default)]
#[cfg(not(test))]
/// The virtio available ring
struct AvailRing {
flags: u16,
idx: u16,
ring: [u16; QUEUE_SIZE],
}
#[repr(C)]
#[repr(align(4))]
#[derive(Default)]
#[cfg(not(test))]
/// The virtio used ring
struct UsedRing {
flags: u16,
idx: u16,
ring: [UsedElem; QUEUE_SIZE],
}
#[repr(C)]
#[derive(Default)]
#[cfg(not(test))]
/// A single element in the used ring
struct UsedElem {
id: u32,
len: u32,
}
#[repr(C)]
#[repr(align(64))]
#[cfg(not(test))]
/// Device driver for virtio block over any transport
pub struct VirtioBlockDevice<'a> {
transport: &'a mut VirtioTransport,
state: RefCell<DriverState>,
}
#[repr(C)]
#[repr(align(64))]
#[derive(Default)]
#[cfg(not(test))]
struct DriverState {
descriptors: [Desc; QUEUE_SIZE],
avail: AvailRing,
used: UsedRing,
next_head: usize,
}
pub enum Error {
DEVICE_ERROR,
BlockError,
BlockIOError,
#[cfg(not(test))]
BlockNotSupported,
}
#[repr(C)]
#[cfg(not(test))]
/// Header used for virtio block requests
struct BlockRequestHeader {
request: u32,
reserved: u32,
sector: u64,
}
#[repr(C)]
#[cfg(not(test))]
/// Footer used for virtio block requests
struct BlockRequestFooter {
status: u8,
} | /// exactly 512 bytes long.
fn read(&self, sector: u64, data: &mut [u8]) -> Result<(), Error>;
}
pub trait SectorWrite {
/// Write a single sector (512 bytes) from the block device. `data` must be
/// exactly 512 bytes long.
fn write(&self, sector: u64, data: &mut [u8]) -> Result<(), Error>;
fn flush(&self) -> Result<(), Error>;
}
#[cfg(not(test))]
#[derive(PartialEq, Copy, Clone)]
enum RequestType {
Read = 0,
Write = 1,
Flush = 4,
}
#[cfg(not(test))]
impl<'a> VirtioBlockDevice<'a> {
pub fn new(transport: &'a mut VirtioTransport) -> VirtioBlockDevice<'a> {
VirtioBlockDevice {
transport,
state: RefCell::new(DriverState::default()),
}
}
pub fn reset(&self) {
self.transport.reset()
}
pub fn init(&mut self) -> Result<(), VirtioError> {
const VIRTIO_SUBSYSTEM_BLOCK: u32 = 0x2;
const VIRTIO_F_VERSION_1: u64 = 1 << 32;
const VIRTIO_STATUS_RESET: u32 = 0;
const VIRTIO_STATUS_ACKNOWLEDGE: u32 = 1;
const VIRTIO_STATUS_DRIVER: u32 = 2;
const VIRTIO_STATUS_FEATURES_OK: u32 = 8;
const VIRTIO_STATUS_DRIVER_OK: u32 = 4;
const VIRTIO_STATUS_FAILED: u32 = 128;
// Initialise the transport
self.transport.init(VIRTIO_SUBSYSTEM_BLOCK)?;
// Reset device
self.transport.set_status(VIRTIO_STATUS_RESET);
// Acknowledge
self.transport.add_status(VIRTIO_STATUS_ACKNOWLEDGE);
// And advertise driver
self.transport.add_status(VIRTIO_STATUS_DRIVER);
// Request device features
let device_features = self.transport.get_features();
if device_features & VIRTIO_F_VERSION_1 != VIRTIO_F_VERSION_1 {
self.transport.add_status(VIRTIO_STATUS_FAILED);
return Err(VirtioError::VirtioLegacyOnly);
}
// Don't support any advanced features for now
let supported_features = VIRTIO_F_VERSION_1;
// Report driver features
self.transport
.set_features(device_features & supported_features);
self.transport.add_status(VIRTIO_STATUS_FEATURES_OK);
if self.transport.get_status() & VIRTIO_STATUS_FEATURES_OK != VIRTIO_STATUS_FEATURES_OK {
self.transport.add_status(VIRTIO_STATUS_FAILED);
return Err(VirtioError::VirtioFeatureNegotiationFailed);
}
// Program queues
self.transport.set_queue(0);
let max_queue = self.transport.get_queue_max_size();
// Hardcoded queue size to QUEUE_SIZE at the moment
if max_queue < QUEUE_SIZE as u16 {
self.transport.add_status(VIRTIO_STATUS_FAILED);
return Err(VirtioError::VirtioQueueTooSmall);
}
self.transport.set_queue_size(QUEUE_SIZE as u16);
// Update all queue parts
let state = self.state.borrow_mut();
let addr = state.descriptors.as_ptr() as u64;
self.transport.set_descriptors_address(addr);
let addr = (&state.avail as *const _) as u64;
self.transport.set_avail_ring(addr);
let addr = (&state.used as *const _) as u64;
self.transport.set_used_ring(addr);
// Confirm queue
self.transport.set_queue_enable();
// Report driver ready
self.transport.add_status(VIRTIO_STATUS_DRIVER_OK);
Ok(())
}
// Number of sectors that this device holds
pub fn get_capacity(&self) -> u64 {
u64::from(self.transport.read_device_config(0))
| u64::from(self.transport.read_device_config(4)) << 32
}
fn request(
&self,
sector: u64,
data: Option<&mut [u8]>,
request: RequestType,
) -> Result<(), Error> {
if request != RequestType::Flush {
assert_eq!(512, data.as_ref().unwrap().len());
}
const VIRTQ_DESC_F_NEXT: u16 = 1;
const VIRTQ_DESC_F_WRITE: u16 = 2;
const VIRTIO_BLK_S_OK: u8 = 0;
const VIRTIO_BLK_S_IOERR: u8 = 1;
const VIRTIO_BLK_S_UNSUPP: u8 = 2;
let header = BlockRequestHeader {
request: request as u32,
reserved: 0,
sector,
};
let footer = BlockRequestFooter { status: 0 };
let mut state = self.state.borrow_mut();
let next_head = state.next_head;
let mut d = &mut state.descriptors[next_head];
let next_desc = (next_head + 1) % QUEUE_SIZE;
d.addr = (&header as *const _) as u64;
d.length = core::mem::size_of::<BlockRequestHeader>() as u32;
d.flags = VIRTQ_DESC_F_NEXT;
d.next = next_desc as u16;
let mut d = &mut state.descriptors[next_desc];
let next_desc = (next_desc + 1) % QUEUE_SIZE;
if request != RequestType::Flush {
d.addr = data.unwrap().as_ptr() as u64;
d.length = core::mem::size_of::<[u8; 512]>() as u32;
}
d.flags = VIRTQ_DESC_F_NEXT
| if request == RequestType::Read {
VIRTQ_DESC_F_WRITE
} else {
0
};
d.next = next_desc as u16;
let mut d = &mut state.descriptors[next_desc];
d.addr = (&footer as *const _) as u64;
d.length = core::mem::size_of::<BlockRequestFooter>() as u32;
d.flags = VIRTQ_DESC_F_WRITE;
d.next = 0;
// Update ring to point to head of chain. Fence. Then update idx
let avail_index = state.avail.idx;
state.avail.ring[(avail_index % QUEUE_SIZE as u16) as usize] = state.next_head as u16;
core::sync::atomic::fence(core::sync::atomic::Ordering::Acquire);
state.avail.idx = state.avail.idx.wrapping_add(1);
// Next free descriptor to use
state.next_head = (next_desc + 1) % QUEUE_SIZE;
// Notify queue has been updated
self.transport.notify_queue(0);
// Check for the completion of the request
while unsafe { core::ptr::read_volatile(&state.used.idx) } != state.avail.idx {
core::sync::atomic::fence(core::sync::atomic::Ordering::Acquire);
}
match footer.status {
VIRTIO_BLK_S_OK => Ok(()),
VIRTIO_BLK_S_IOERR => Err(Error::BlockIOError),
VIRTIO_BLK_S_UNSUPP => Err(Error::BlockNotSupported),
_ => Err(Error::BlockNotSupported),
}
}
}
#[cfg(not(test))]
impl<'a> SectorRead for VirtioBlockDevice<'a> {
fn read(&self, sector: u64, data: &mut [u8]) -> Result<(), Error> {
self.request(sector, Some(data), RequestType::Read)
}
}
#[cfg(not(test))]
impl<'a> SectorWrite for VirtioBlockDevice<'a> {
fn write(&self, sector: u64, data: &mut [u8]) -> Result<(), Error> {
self.request(sector, Some(data), RequestType::Write)
}
fn flush(&self) -> Result<(), Error> {
self.request(0, None, RequestType::Flush)
}
} |
pub trait SectorRead {
/// Read a single sector (512 bytes) from the block device. `data` must be |
login-fido2.component.ts | import { Component, ChangeDetectionStrategy, OnInit } from "@angular/core";
import { LoginService } from "../login.service";
@Component({
selector: "gt-login-fido2",
templateUrl: "./login-fido2.component.html",
styleUrls: ["./login-fido2.component.scss"],
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class | implements OnInit {
useTOTP$ = this.loginService.useTOTP$;
error$ = this.loginService.error$;
authInProg$ = this.loginService.authInProg$;
constructor(private loginService: LoginService) {}
switchMethod() {
this.loginService.switchMethod();
}
ngOnInit() {
this.loginService.authenticateFIDO2().subscribe();
}
retryAuth() {
this.loginService.authenticateFIDO2().subscribe();
}
}
| LoginFido2Component |
test_ln.rs | use common::util::*;
use std::path::PathBuf;
#[test]
fn test_symlink_existing_file() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_existing_file";
let link = "test_symlink_existing_file_link";
at.touch(file);
ucmd.args(&["-s", file, link]).succeeds().no_stderr();
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
}
#[test]
fn test_symlink_dangling_file() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_dangling_file";
let link = "test_symlink_dangling_file_link";
ucmd.args(&["-s", file, link]).succeeds().no_stderr();
assert!(!at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
}
#[test]
fn test_symlink_existing_directory() {
let (at, mut ucmd) = at_and_ucmd!();
let dir = "test_symlink_existing_dir";
let link = "test_symlink_existing_dir_link";
at.mkdir(dir);
ucmd.args(&["-s", dir, link]).succeeds().no_stderr();
assert!(at.dir_exists(dir));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), dir);
}
#[test]
fn test_symlink_dangling_directory() {
let (at, mut ucmd) = at_and_ucmd!();
let dir = "test_symlink_dangling_dir";
let link = "test_symlink_dangling_dir_link";
ucmd.args(&["-s", dir, link]).succeeds().no_stderr();
assert!(!at.dir_exists(dir));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), dir);
}
#[test]
fn test_symlink_circular() {
let (at, mut ucmd) = at_and_ucmd!();
let link = "test_symlink_circular";
ucmd.args(&["-s", link]).succeeds().no_stderr();
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), link);
}
#[test]
fn test_symlink_dont_overwrite() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_dont_overwrite";
let link = "test_symlink_dont_overwrite_link";
at.touch(file);
at.touch(link);
ucmd.args(&["-s", file, link]).fails();
assert!(at.file_exists(file));
assert!(at.file_exists(link));
assert!(!at.is_symlink(link));
}
#[test]
fn test_symlink_overwrite_force() {
let (at, mut ucmd) = at_and_ucmd!();
let file_a = "test_symlink_overwrite_force_a";
let file_b = "test_symlink_overwrite_force_b";
let link = "test_symlink_overwrite_force_link";
// Create symlink
at.symlink_file(file_a, link);
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file_a);
// Force overwrite of existing symlink
ucmd.args(&["--force", "-s", file_b, link]).succeeds();
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file_b);
}
#[test]
fn test_symlink_interactive() {
let scene = TestScenario::new(util_name!());
let at = &scene.fixtures;
let file = "test_symlink_interactive_file";
let link = "test_symlink_interactive_file_link";
at.touch(file);
at.touch(link);
scene
.ucmd()
.args(&["-i", "-s", file, link])
.pipe_in("n")
.succeeds()
.no_stderr();
assert!(at.file_exists(file));
assert!(!at.is_symlink(link));
scene
.ucmd()
.args(&["-i", "-s", file, link])
.pipe_in("Yesh")
.succeeds()
.no_stderr();
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
}
#[test]
fn test_symlink_simple_backup() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_simple_backup";
let link = "test_symlink_simple_backup_link";
at.touch(file);
at.symlink_file(file, link);
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
ucmd.args(&["-b", "-s", file, link]).succeeds().no_stderr();
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
let backup = &format!("{}~", link);
assert!(at.is_symlink(backup));
assert_eq!(at.resolve_link(backup), file);
}
#[test]
fn test_symlink_custom_backup_suffix() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_custom_backup_suffix";
let link = "test_symlink_custom_backup_suffix_link";
let suffix = "super-suffix-of-the-century";
at.touch(file);
at.symlink_file(file, link);
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
let arg = &format!("--suffix={}", suffix);
ucmd.args(&["-b", arg, "-s", file, link])
.succeeds()
.no_stderr();
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
let backup = &format!("{}{}", link, suffix);
assert!(at.is_symlink(backup));
assert_eq!(at.resolve_link(backup), file);
}
#[test]
fn test_symlink_backup_numbering() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_backup_numbering";
let link = "test_symlink_backup_numbering_link";
at.touch(file);
at.symlink_file(file, link);
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
ucmd.args(&["-s", "--backup=t", file, link])
.succeeds()
.no_stderr();
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
let backup = &format!("{}.~1~", link);
assert!(at.is_symlink(backup));
assert_eq!(at.resolve_link(backup), file);
}
#[test]
fn test_symlink_existing_backup() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_existing_backup";
let link = "test_symlink_existing_backup_link";
let link_backup = "test_symlink_existing_backup_link.~1~";
let resulting_backup = "test_symlink_existing_backup_link.~2~";
// Create symlink and verify
at.touch(file);
at.symlink_file(file, link);
assert!(at.file_exists(file));
assert!(at.is_symlink(link));
assert_eq!(at.resolve_link(link), file);
// Create backup symlink and verify
at.symlink_file(file, link_backup);
assert!(at.file_exists(file));
assert!(at.is_symlink(link_backup));
assert_eq!(at.resolve_link(link_backup), file);
ucmd.args(&["-s", "--backup=nil", file, link])
.succeeds()
.no_stderr();
assert!(at.file_exists(file));
assert!(at.is_symlink(link_backup));
assert_eq!(at.resolve_link(link_backup), file);
assert!(at.is_symlink(resulting_backup));
assert_eq!(at.resolve_link(resulting_backup), file);
}
#[test]
fn test_symlink_target_dir() {
let (at, mut ucmd) = at_and_ucmd!();
let dir = "test_ln_target_dir_dir";
let file_a = "test_ln_target_dir_file_a";
let file_b = "test_ln_target_dir_file_b";
at.touch(file_a);
at.touch(file_b);
at.mkdir(dir);
ucmd.args(&["-s", "-t", dir, file_a, file_b])
.succeeds()
.no_stderr();
let file_a_link = &format!("{}/{}", dir, file_a);
assert!(at.is_symlink(file_a_link));
assert_eq!(at.resolve_link(file_a_link), file_a);
let file_b_link = &format!("{}/{}", dir, file_b);
assert!(at.is_symlink(file_b_link));
assert_eq!(at.resolve_link(file_b_link), file_b);
}
#[test]
fn test_symlink_target_dir_from_dir() {
let (at, mut ucmd) = at_and_ucmd!();
let dir = "test_ln_target_dir_dir";
let from_dir = "test_ln_target_dir_from_dir";
let filename_a = "test_ln_target_dir_file_a";
let filename_b = "test_ln_target_dir_file_b";
let file_a = &format!("{}/{}", from_dir, filename_a);
let file_b = &format!("{}/{}", from_dir, filename_b);
at.mkdir(from_dir);
at.touch(file_a);
at.touch(file_b);
at.mkdir(dir);
ucmd.args(&["-s", "-t", dir, file_a, file_b])
.succeeds()
.no_stderr();
let file_a_link = &format!("{}/{}", dir, filename_a);
assert!(at.is_symlink(file_a_link));
assert_eq!(&at.resolve_link(file_a_link), file_a);
let file_b_link = &format!("{}/{}", dir, filename_b);
assert!(at.is_symlink(file_b_link));
assert_eq!(&at.resolve_link(file_b_link), file_b);
}
#[test]
fn test_symlink_overwrite_dir_fail() |
#[test]
fn test_symlink_errors() {
let (at, mut ucmd) = at_and_ucmd!();
let dir = "test_symlink_errors_dir";
let file_a = "test_symlink_errors_file_a";
let file_b = "test_symlink_errors_file_b";
at.mkdir(dir);
at.touch(file_a);
at.touch(file_b);
// $ ln -T -t a b
// ln: cannot combine --target-directory (-t) and --no-target-directory (-T)
ucmd.args(&["-T", "-t", dir, file_a, file_b])
.fails()
.stderr_is(
"ln: error: cannot combine --target-directory (-t) and --no-target-directory \
(-T)\n",
);
}
#[test]
fn test_symlink_verbose() {
let scene = TestScenario::new(util_name!());
let at = &scene.fixtures;
let file_a = "test_symlink_verbose_file_a";
let file_b = "test_symlink_verbose_file_b";
at.touch(file_a);
scene
.ucmd()
.args(&["-v", file_a, file_b])
.succeeds()
.stdout_only(format!("'{}' -> '{}'\n", file_b, file_a));
at.touch(file_b);
scene
.ucmd()
.args(&["-v", "-b", file_a, file_b])
.succeeds()
.stdout_only(format!(
"'{}' -> '{}' (backup: '{}~')\n",
file_b, file_a, file_b
));
}
#[test]
fn test_symlink_target_only() {
let (at, mut ucmd) = at_and_ucmd!();
let dir = "test_symlink_target_only";
at.mkdir(dir);
assert!(ucmd.args(&["-s", "-t", dir]).fails().stderr.len() > 0);
}
#[test]
fn test_symlink_implicit_target_dir() {
let (at, mut ucmd) = at_and_ucmd!();
let dir = "test_symlink_implicit_target_dir";
// On windows, slashes aren't allowed in symlink targets, so use
// PathBuf to construct `file` instead of simple "dir/file".
let filename = "test_symlink_implicit_target_file";
let path = PathBuf::from(dir).join(filename);
let file = &path.to_string_lossy();
at.mkdir(dir);
at.touch(file);
ucmd.args(&["-s", file]).succeeds().no_stderr();
assert!(at.file_exists(filename));
assert!(at.is_symlink(filename));
assert_eq!(at.resolve_link(filename), *file);
}
#[test]
fn test_symlink_to_dir_2args() {
let (at, mut ucmd) = at_and_ucmd!();
let filename = "test_symlink_to_dir_2args_file";
let from_file = &format!("{}/{}", at.as_string(), filename);
let to_dir = "test_symlink_to_dir_2args_to_dir";
let to_file = &format!("{}/{}", to_dir, filename);
at.mkdir(to_dir);
at.touch(from_file);
ucmd.args(&["-s", from_file, to_dir]).succeeds().no_stderr();
assert!(at.file_exists(to_file));
assert!(at.is_symlink(to_file));
assert_eq!(at.resolve_link(to_file), filename);
}
#[test]
fn test_symlink_missing_destination() {
let (at, mut ucmd) = at_and_ucmd!();
let file = "test_symlink_missing_destination";
at.touch(file);
ucmd.args(&["-s", "-T", file]).fails().stderr_is(format!(
"ln: error: missing destination file operand after '{}'",
file
));
}
| {
let (at, mut ucmd) = at_and_ucmd!();
let path_a = "test_symlink_overwrite_dir_a";
let path_b = "test_symlink_overwrite_dir_b";
at.touch(path_a);
at.mkdir(path_b);
assert!(
ucmd.args(&["-s", "-T", path_a, path_b])
.fails()
.stderr
.len()
> 0
);
} |
tuples.rs | use decent_synquote_alternative as synquote;
use proc_macro2::{Ident, Span, TokenStream};
use synquote::parser::*;
use synquote::token_builder::*;
use crate::structs_and_enums::{FieldMutator, FieldMutatorKind};
use crate::{Common, MakeMutatorSettings};
pub fn make_basic_tuple_mutator(tb: &mut TokenBuilder, nbr_elements: usize) {
make_tuple_type_structure(tb, nbr_elements);
declare_tuple_mutator(tb, nbr_elements);
declare_tuple_mutator_helper_types(tb, nbr_elements);
impl_mutator_trait(tb, nbr_elements);
impl_default_mutator_for_tuple(tb, nbr_elements);
}
#[allow(non_snake_case)]
pub fn | (tb: &mut TokenBuilder, nbr_elements: usize) {
let cm = Common::new(nbr_elements);
let Ti = cm.Ti.as_ref();
// T0, T1, ...
let type_params = join_ts!(0..nbr_elements, i, Ti(i), separator: ",");
let type_params_static_bound = join_ts!(0..nbr_elements, i, Ti(i) ": 'static", separator: ",");
let tuple_owned = ts!("(" type_params ",)");
let tuple_ref = ts!("(" join_ts!(0..nbr_elements, i, "&'a" Ti(i) ",") ")");
let tuple_mut = ts!("(" join_ts!(0..nbr_elements, i, "&'a mut" Ti(i) ",") ")");
let PhantomData = ts!(cm.PhantomData "<(" type_params ",)>");
extend_ts!(tb,
"#[doc(hidden)]
pub struct" cm.TupleN_ident "<" type_params_static_bound "> {
_phantom: " PhantomData ",
}
impl<" type_params_static_bound "> " cm.RefTypes " for " cm.TupleN_ident "<" type_params "> {
type Owned = " tuple_owned ";
type Ref<'a> = " tuple_ref ";
type Mut<'a> = " tuple_mut ";
#[no_coverage]
fn get_ref_from_mut<'a>(v: &'a Self::Mut<'a>) -> Self::Ref<'a> {
(" join_ts!(0..nbr_elements, i, "v." i ",") ")
}
}
"
"impl<" type_params_static_bound "> " cm.TupleStructure "<" cm.TupleN_ident "<" type_params "> > for" tuple_owned "{
#[no_coverage]
fn get_ref<'a>(&'a self) -> " tuple_ref " {
(" join_ts!(0..nbr_elements, i, "&self." i ",") ")
}
#[no_coverage]
fn get_mut<'a>(&'a mut self) -> " tuple_mut " {
(" join_ts!(0..nbr_elements, i, "&mut self." i ",") ")
}
#[no_coverage]
fn new(t: " tuple_owned ") -> Self {
t
}
}"
);
}
#[allow(non_snake_case)]
pub(crate) fn impl_tuple_structure_trait(tb: &mut TokenBuilder, struc: &Struct) {
let nbr_elements = struc.struct_fields.len();
let cm = Common::new(nbr_elements);
let field_types = join_ts!(&struc.struct_fields, field, field.ty, separator: ",");
// let Ti = |i: usize| ident!("T" i);
let TupleKind = cm.TupleN_path.clone();
let generics_no_eq = struc.generics.removing_eq_type();
let generics_no_eq_nor_bounds = struc.generics.removing_bounds_and_eq_type();
let tuple_owned = ts!("(" join_ts!(&struc.struct_fields, field, field.ty ",") ")");
let tuple_ref = ts!("(" join_ts!(&struc.struct_fields, field, "&'a" field.ty ",") ")");
let tuple_mut = ts!("(" join_ts!(&struc.struct_fields, field, "&'a mut" field.ty ",") ")");
let mut where_clause = struc.where_clause.clone().unwrap_or_default();
where_clause.add_clause_items(join_ts!(&struc.generics.type_params, tp,
tp.type_ident ": 'static,"
));
extend_ts!(tb,
"impl" generics_no_eq cm.TupleStructure "<" TupleKind "<" field_types "> >
for" struc.ident generics_no_eq_nor_bounds where_clause "{
#[no_coverage]
fn get_ref<'a>(&'a self) -> " tuple_ref " {
(" join_ts!(&struc.struct_fields, field, "&self." field.access() ",") ")
}
#[no_coverage]
fn get_mut<'a>(&'a mut self) -> " tuple_mut " {
(" join_ts!(&struc.struct_fields, field, "&mut self." field.access() ",") ")
}
#[no_coverage]
fn new(t:" tuple_owned ") -> Self {
Self {"
join_ts!(struc.struct_fields.iter().enumerate(), (i, field),
field.access() ": t." i ","
)
"}
}
}"
);
}
pub(crate) fn impl_default_mutator_for_struct_with_0_field(tb: &mut TokenBuilder, struc: &Struct) {
assert!(struc.struct_fields.is_empty());
let cm = Common::new(0);
let generics_no_eq = struc.generics.removing_eq_type();
let generics_no_eq_nor_bounds = struc.generics.removing_bounds_and_eq_type();
// add T: DefaultMutator for each generic type parameter to the existing where clause
let mut where_clause = struc.where_clause.clone().unwrap_or_default();
where_clause.add_clause_items(join_ts!(&struc.generics.type_params, ty_param,
ty_param ":" cm.DefaultMutator ","
));
let init = struc.kind.map(|kind| ts!(kind.open() kind.close()));
extend_ts!(tb,
"impl " generics_no_eq cm.DefaultMutator "for" struc.ident generics_no_eq_nor_bounds where_clause "{
type Mutator = " cm.UnitMutator "<Self>;
#[no_coverage]
fn default_mutator() -> Self::Mutator {
Self::Mutator::new(" struc.ident init ")
}
}
");
}
#[allow(non_snake_case)]
pub(crate) fn impl_default_mutator_for_struct(tb: &mut TokenBuilder, struc: &Struct, settings: &MakeMutatorSettings) {
let nbr_elements = struc.struct_fields.len();
let cm = Common::new(nbr_elements);
let TupleNMutator = cm.TupleNMutator.as_ref()(nbr_elements);
let field_types = join_ts!(&struc.struct_fields, field, field.ty, separator: ",");
let field_mutators = vec![struc
.struct_fields
.iter()
.enumerate()
.map(|(i, field)| {
let mut mutator = None;
for attribute in field.attributes.iter() {
if let Some((m, init)) = super::read_field_default_mutator_attribute(attribute.clone()) {
mutator = Some((m, init));
}
}
if let Some(m) = mutator {
FieldMutator {
i,
j: None,
field: field.clone(),
kind: FieldMutatorKind::Prescribed(m.0.clone(), m.1),
}
} else {
FieldMutator {
i,
j: None,
field: field.clone(),
kind: FieldMutatorKind::Generic,
}
}
})
.collect::<Vec<_>>()];
let TupleKind = cm.TupleN_path.clone();
let TupleN_and_generics = ts!(TupleKind "<" field_types ">");
let TupleMutatorWrapper = ts!(
cm.TupleMutatorWrapper "<"
TupleNMutator "<"
join_ts!(field_mutators.iter().flatten(), m,
m.mutator_stream(&cm)
, separator: ",")
">,"
TupleN_and_generics
">"
);
use crate::structs_and_enums::{make_mutator_type_and_impl, CreateWrapperMutatorParams};
let params = CreateWrapperMutatorParams {
cm: &cm,
visibility: &struc.visibility,
type_ident: &struc.ident,
type_generics: &struc.generics,
type_where_clause: &struc.where_clause,
field_mutators: &field_mutators,
InnerMutator: &TupleMutatorWrapper,
new_impl: &ts!(
"
#[no_coverage]
pub fn new("
join_ts!(struc.struct_fields.iter().zip(field_mutators.iter().flatten()), (field, mutator),
ident!("mutator_" field.access()) ":" mutator.mutator_stream(&cm)
, separator: ",")
") -> Self {
Self {
mutator : " cm.TupleMutatorWrapper "::new(" TupleNMutator "::new("
join_ts!(struc.struct_fields.iter(), field,
ident!("mutator_" field.access())
, separator: ",")
"))
}
}"
),
default_impl: &ts!("
#[no_coverage]
fn default() -> Self {
Self { mutator : <_>::default() }
}
"),
settings,
};
extend_ts!(tb, make_mutator_type_and_impl(params));
}
#[allow(non_snake_case)]
fn declare_tuple_mutator(tb: &mut TokenBuilder, nbr_elements: usize) {
let cm = Common::new(nbr_elements);
let Mi = cm.Mi.as_ref();
let mutator_type_params = join_ts!(0..nbr_elements, i, ident!("M" i), separator: ",");
let type_params = ts!(mutator_type_params);
let mutator_type_params_replacing_one_by_m = |replacing: usize| -> TokenStream {
join_ts!(0..nbr_elements, i,
if i == replacing {
ident!("M")
} else {
Mi(i)
}
, separator: ",")
};
extend_ts!(tb,
"#[derive(" cm.Default ")]"
"pub struct" cm.TupleNMutator_ident "<" type_params ">"
"{"
join_ts!(0..nbr_elements, i,
"pub" ident!("mutator_" i) ":" ident!("M" i) ","
)
"rng :" cm.fastrand_Rng
"}
impl < " type_params " >" cm.TupleNMutator_ident "<" type_params "> {
#[no_coverage]
pub fn new(" join_ts!(0..nbr_elements, i, ident!("mutator_" i) ":" ident!("M" i), separator: ",") ") -> Self {
Self {"
join_ts!(0..nbr_elements, i,
ident!("mutator_" i) ","
)
"rng: <_>::default() ,"
"}
}"
join_ts!(0..nbr_elements, i,
"#[no_coverage]
pub fn" ident!("replacing_mutator_" i) " < M > ( self , mutator : M )
->" cm.TupleNMutator_ident "<" mutator_type_params_replacing_one_by_m(i) " >" "
{
" cm.TupleNMutator_ident " {"
join_ts!(0..nbr_elements, j,
ident!("mutator_" j) ":" if i == j { ts!("mutator") } else { ts!("self ." ident!("mutator_" j)) } ","
)
"rng : self.rng ,
}
}"
)
"}"
)
}
#[allow(non_snake_case)]
fn declare_tuple_mutator_helper_types(tb: &mut TokenBuilder, nbr_elements: usize) {
let cm = Common::new(nbr_elements);
let Ti = cm.Ti.as_ref();
let ti = cm.ti.as_ref();
let tuple_type_params = join_ts!(0..nbr_elements, i, ident!("T" i), separator: ",");
extend_ts!(tb,
"#[derive(" cm.Clone ", " cm.Debug ", " cm.PartialEq ")]
pub struct Cache <" tuple_type_params "> {"
join_ts!(0..nbr_elements, i,
ti(i) ":" ident!("T" i) ","
)
"cplx : f64,
vose_alias : " cm.VoseAlias "
}
#[derive(" cm.Clone ", " cm.Debug ")]
pub enum InnerMutationStep {"
join_ts!(0..nbr_elements, i,
Ti(i)
, separator: ",")
"}
#[derive(" cm.Clone ", " cm.Debug ")]
pub struct MutationStep < " tuple_type_params " > {"
join_ts!(0..nbr_elements, i,
ti(i) ":" Ti(i) ","
)
"inner : " cm.Vec " < InnerMutationStep > ,
vose_alias : Option<" cm.VoseAlias ">
}
#[derive(" cm.Clone ", " cm.Debug ")]
pub struct ArbitraryStep < " tuple_type_params " > {"
join_ts!(0..nbr_elements, i,
ti(i) ":" Ti(i)
, separator: ",")
"}
pub struct UnmutateToken < " tuple_type_params " > {"
join_ts!(0..nbr_elements, i,
"pub" ti(i) ":" cm.Option "<" Ti(i) "> ,"
)
"
}
impl < " tuple_type_params " > " cm.Default " for UnmutateToken < " tuple_type_params " > {
#[no_coverage]
fn default() -> Self {
Self {"
join_ts!(0..nbr_elements, i,
ti(i) ":" cm.None ","
)
"
}
}
}
"
)
}
#[allow(non_snake_case)]
fn impl_mutator_trait(tb: &mut TokenBuilder, nbr_elements: usize) {
let cm = Common::new(nbr_elements);
let tuple_type_params = join_ts!(0..nbr_elements, i, ident!("T" i), separator: ",");
let mutator_type_params = join_ts!(0..nbr_elements, i, ident!("M" i), separator: ",");
let type_params = ts!(tuple_type_params "," mutator_type_params);
let ti = cm.ti.as_ref();
let Ti = cm.Ti.as_ref();
let Mi = cm.Mi.as_ref();
let mutator_i = cm.mutator_i.as_ref();
let ti_value = cm.ti_value.as_ref();
// let tuple_owned = ts!("(" join_ts!(0..nbr_elements, i, Ti(i), separator: ",") ")");
let tuple_ref = ts!("(" join_ts!(0..nbr_elements, i, "&'a" Ti(i) ",") ")");
let tuple_mut = ts!("(" join_ts!(0..nbr_elements, i, "&'a mut" Ti(i) ",") ")");
let SelfAsTupleMutator = ts!("<Self as " cm.TupleMutator "<T, " cm.TupleN_ident "<" tuple_type_params "> >>");
let TupleNAsRefTypes = ts!("<" cm.TupleN_ident "<" tuple_type_params "> as " cm.RefTypes ">");
extend_ts!(tb,"
impl <T , " type_params " > " cm.TupleMutator "<T , " cm.TupleN_ident "<" tuple_type_params "> >
for " cm.TupleNMutator_ident "< " mutator_type_params " >
where
T: " cm.Clone ","
join_ts!(0..nbr_elements, i,
Ti(i) ":" cm.Clone " + 'static ,"
Mi(i) ":" cm.fuzzcheck_traits_Mutator "<" Ti(i) ">,"
) "
T: " cm.TupleStructure "<" cm.TupleN_ident "<" tuple_type_params "> >,
{
type Cache = Cache <"
join_ts!(0..nbr_elements, i,
"<" Mi(i) "as" cm.fuzzcheck_traits_Mutator "<" Ti(i) "> >::Cache "
, separator: ",")
">;
type MutationStep = MutationStep <"
join_ts!(0..nbr_elements, i,
"<" Mi(i) "as" cm.fuzzcheck_traits_Mutator "<" Ti(i) "> >::MutationStep "
, separator: ",")
">;
type ArbitraryStep = ArbitraryStep <"
join_ts!(0..nbr_elements, i,
"<" Mi(i) "as" cm.fuzzcheck_traits_Mutator "<" Ti(i) "> >::ArbitraryStep "
, separator: ",")
">;
type UnmutateToken = UnmutateToken <"
join_ts!(0..nbr_elements, i,
"<" Mi(i) "as" cm.fuzzcheck_traits_Mutator "<" Ti(i) "> >::UnmutateToken "
, separator: ",")
">;
#[no_coverage]
fn default_arbitrary_step(&self) -> Self::ArbitraryStep {
Self::ArbitraryStep {"
join_ts!(0..nbr_elements, i,
ti(i) ": self." mutator_i(i) ".default_arbitrary_step()"
, separator: ",")
"}
}
#[no_coverage]
fn max_complexity(&self) -> f64 {"
join_ts!(0..nbr_elements, i,
"self." mutator_i(i) ".max_complexity()"
, separator: "+")
"}
#[no_coverage]
fn min_complexity(&self) -> f64 {"
join_ts!(0..nbr_elements, i,
"self." mutator_i(i) ".min_complexity()"
, separator: "+")
"}
#[no_coverage]
fn complexity<'a>(&'a self, _value: " tuple_ref ", cache: &'a Self::Cache) -> f64 {
cache.cplx
}
#[no_coverage]
fn validate_value<'a>(&'a self, value: " tuple_ref ") -> " cm.Option "<(Self::Cache, Self::MutationStep)> {"
join_ts!(0..nbr_elements, i,
"let (" ident!("c" i) ", " ident!("s" i) ") = self." mutator_i(i) ".validate_value(value." i ")?;"
)
join_ts!(0..nbr_elements, i,
"let" ident!("cplx_" i) " = self." mutator_i(i) ".complexity(value." i ", &" ident!("c" i) ");"
)
"let sum_cplx = "
join_ts!(0..nbr_elements, i,
ident!("cplx_" i)
, separator: "+") ";
let mut probabilities = vec!["
join_ts!(0..nbr_elements, i,
"10. +" ident!("cplx_" i)
, separator: ",") "
];
let sum_prob = probabilities.iter().sum::<f64>();
probabilities.iter_mut().for_each(#[no_coverage] |c| *c /= sum_prob);
let vose_alias = " cm.VoseAlias "::new(probabilities);
let step = Self::MutationStep {"
join_ts!(0..nbr_elements, i, ti(i) ":" ident!("s" i) ",")
"inner: vec![" join_ts!(0..nbr_elements, i, "InnerMutationStep::" Ti(i), separator: ",") "] ,
vose_alias: Some(vose_alias.clone())
};
let cache = Self::Cache {"
join_ts!(0..nbr_elements, i, ti(i) ":" ident!("c" i) ",")
"cplx: sum_cplx,
vose_alias,
};
" cm.Some "((cache, step))
}
#[no_coverage]
fn ordered_arbitrary(
&self,
step: &mut Self::ArbitraryStep,
max_cplx: f64,
) -> " cm.Option "<(T, f64)> {
if max_cplx < <Self as" cm.TupleMutator "<T , " cm.TupleN_ident "<" tuple_type_params "> > >::min_complexity(self) {
return " cm.None "
}
" // TODO: actually write something that is ordered_arbitrary sense here
cm.Some " (self.random_arbitrary(max_cplx))
}
#[no_coverage]
fn random_arbitrary(&self, max_cplx: f64) -> (T, f64) {"
join_ts!(0..nbr_elements, i,
"let mut" ti_value(i) ":" cm.Option "<_> =" cm.None ";"
)
"let mut indices = ( 0 .." nbr_elements ").collect::<" cm.Vec "<_>>();"
"self.rng.shuffle(&mut indices);"
"let mut sum_cplx = 0.0;
for idx in indices.iter() {
match idx {"
join_ts!(0..nbr_elements, i,
i "=> {
let (value, cplx) = self." mutator_i(i) ".random_arbitrary(max_cplx - sum_cplx);
" ti_value(i) " = Some(value);
sum_cplx += cplx;
}"
)
"_ => unreachable!() ,
}
}
(
T::new(
("
join_ts!(0..nbr_elements, i,
ti_value(i) ".unwrap(),"
)
")
),
sum_cplx,
)
}
#[no_coverage]
fn ordered_mutate<'a>(
&'a self,
value: " tuple_mut ",
cache: &'a mut Self::Cache,
step: &'a mut Self::MutationStep,
max_cplx: f64,
) -> " cm.Option "<(Self::UnmutateToken, f64)> {
if max_cplx < <Self as" cm.TupleMutator "<T , " cm.TupleN_ident "<" tuple_type_params "> > >::min_complexity(self) { return " cm.None " }
if step.inner.is_empty() || step.vose_alias.is_none() {
return " cm.None ";
}
let vose_alias = step.vose_alias.as_ref().unwrap();
let step_idx = vose_alias.sample();
let current_cplx = " SelfAsTupleMutator "::complexity(self, " TupleNAsRefTypes "::get_ref_from_mut(&value), cache);
let inner_step_to_remove: usize;
match step.inner[step_idx] {"
join_ts!(0..nbr_elements, i,
"InnerMutationStep::" Ti(i) "=> {
let old_field_cplx = self." mutator_i(i) ".complexity(value." i ", &cache." ti(i) ");
let max_field_cplx = max_cplx - current_cplx + old_field_cplx;
if let " cm.Some "((token, new_field_cplx)) =
self." mutator_i(i) "
.ordered_mutate(value." i ", &mut cache." ti(i) ", &mut step." ti(i) ", max_field_cplx)
{
return " cm.Some "((Self::UnmutateToken {
" ti(i) ": " cm.Some "(token),
..Self::UnmutateToken::default()
}, current_cplx - old_field_cplx + new_field_cplx));
} else {
inner_step_to_remove = step_idx;
}
}"
)"
}
let mut prob = vose_alias.original_probabilities.clone();
prob[inner_step_to_remove] = 0.0;
let sum = prob.iter().sum::<f64>();
if sum == 0.0 {
step.vose_alias = " cm.None ";
} else {
prob.iter_mut().for_each(#[no_coverage] |c| *c /= sum );
step.vose_alias = " cm.Some "(" cm.VoseAlias "::new(prob));
}
" SelfAsTupleMutator "::ordered_mutate(self, value, cache, step, max_cplx)
}
#[no_coverage]
fn random_mutate<'a>(&'a self, value: " tuple_mut ", cache: &'a mut Self::Cache, max_cplx: f64, ) -> (Self::UnmutateToken, f64) {
let current_cplx = " SelfAsTupleMutator "::complexity(self, " TupleNAsRefTypes "::get_ref_from_mut(&value), cache);
match cache.vose_alias.sample() {"
join_ts!(0..nbr_elements, i,
i "=> {
let old_field_cplx = self." mutator_i(i) ".complexity(value." i ", &cache." ti(i) ");
let max_field_cplx = max_cplx - current_cplx + old_field_cplx;
let (token, new_field_cplx) = self." mutator_i(i) "
.random_mutate(value." i ", &mut cache." ti(i) ", max_field_cplx) ;
return (Self::UnmutateToken {
" ti(i) ": " cm.Some "(token),
..Self::UnmutateToken::default()
}, current_cplx - old_field_cplx + new_field_cplx);
}"
)
"_ => unreachable!() ,
}
}
#[no_coverage]
fn unmutate<'a>(&'a self, value: " tuple_mut ", cache: &'a mut Self::Cache, t: Self::UnmutateToken) {"
join_ts!(0..nbr_elements, i,
"if let" cm.Some "(subtoken) = t." ti(i) "{
self. " mutator_i(i) ".unmutate(value." i ", &mut cache." ti(i) ", subtoken);
}"
)
"}
}
"
)
}
#[allow(non_snake_case)]
fn impl_default_mutator_for_tuple(tb: &mut TokenBuilder, nbr_elements: usize) {
let cm = Common::new(nbr_elements);
let Ti = cm.Ti.as_ref();
let tuple_type_params = join_ts!(0..nbr_elements, i, Ti(i), separator: ",");
let TupleN = ts!(ident!("Tuple" nbr_elements) "<" tuple_type_params ">");
let TupleMutatorWrapper = ts!(
cm.TupleMutatorWrapper "<"
cm.TupleNMutator_ident "<"
join_ts!(0..nbr_elements, i,
"<" Ti(i) "as" cm.DefaultMutator "> :: Mutator"
, separator: ",")
">,"
TupleN
">"
);
extend_ts!(tb,
// "
// impl<" type_params ">" cm.Default "for" cm.TupleNMutator_ident "<" mutator_type_params ">
// where"
// join_ts!(0..nbr_elements, i, Mi(i) ":" cm.Default, separator: ",")
// "{
// fn default() -> Self {
// Self::new("
// join_ts!(0..nbr_elements, i,
// "<" Mi(i) "as" cm.Default "> :: default()"
// , separator: ",")
// ")
// }
// }
"
impl<" tuple_type_params ">" cm.DefaultMutator "for (" tuple_type_params ",)
where" join_ts!(0..nbr_elements, i, Ti(i) ":" cm.DefaultMutator "+ 'static", separator: ",")
"{
type Mutator = " TupleMutatorWrapper ";
#[no_coverage]
fn default_mutator() -> Self::Mutator {
Self::Mutator::new(" cm.TupleNMutator_ident "::new("
join_ts!(0..nbr_elements, i,
"<" Ti(i) "as" cm.DefaultMutator "> :: default_mutator()"
, separator: ",")
"))
}
}"
)
}
| make_tuple_type_structure |
relevance.py | # -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
"""
Contains a feature selection method that evaluates the importance of the different extracted features. To do so,
for every feature the influence on the target is evaluated by an univariate tests and the p-Value is calculated.
The methods that calculate the p-values are called feature selectors.
Afterwards the Benjamini Hochberg procedure which is a multiple testing procedure decides which features to keep and
which to cut off (solely based on the p-values).
"""
from multiprocessing import Pool
import warnings
import numpy as np
import pandas as pd
from functools import partial, reduce
from tsfresh import defaults
from tsfresh.feature_selection.benjamini_hochberg_test import benjamini_hochberg_test
from tsfresh.feature_selection.significance_tests import target_binary_feature_real_test, \
target_real_feature_binary_test, target_real_feature_real_test, target_binary_feature_binary_test
from tsfresh.utilities.distribution import initialize_warnings_in_workers
def calculate_relevance_table(X, y, ml_task='auto', n_jobs=defaults.N_PROCESSES,
show_warnings=defaults.SHOW_WARNINGS, chunksize=defaults.CHUNKSIZE,
test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE,
test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE,
test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE,
test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE,
fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT):
"""
Calculate the relevance table for the features contained in feature matrix `X` with respect to target vector `y`.
The relevance table is calculated for the intended machine learning task `ml_task`.
To accomplish this for each feature from the input pandas.DataFrame an univariate feature significance test
is conducted. Those tests generate p values that are then evaluated by the Benjamini Hochberg procedure to
decide which features to keep and which to delete.
We are testing
:math:`H_0` = the Feature is not relevant and should not be added
against
:math:`H_1` = the Feature is relevant and should be kept
or in other words
:math:`H_0` = Target and Feature are independent / the Feature has no influence on the target
:math:`H_1` = Target and Feature are associated / dependent
When the target is binary this becomes
:math:`H_0 = \\left( F_{\\text{target}=1} = F_{\\text{target}=0} \\right)`
:math:`H_1 = \\left( F_{\\text{target}=1} \\neq F_{\\text{target}=0} \\right)`
Where :math:`F` is the distribution of the target.
In the same way we can state the hypothesis when the feature is binary
:math:`H_0 = \\left( T_{\\text{feature}=1} = T_{\\text{feature}=0} \\right)`
:math:`H_1 = \\left( T_{\\text{feature}=1} \\neq T_{\\text{feature}=0} \\right)`
Here :math:`T` is the distribution of the target.
TODO: And for real valued?
:param X: Feature matrix in the format mentioned before which will be reduced to only the relevant features.
It can contain both binary or real-valued features at the same time.
:type X: pandas.DataFrame
:param y: Target vector which is needed to test which features are relevant. Can be binary or real-valued.
:type y: pandas.Series or numpy.ndarray
:param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`.
Defaults to `'auto'`, meaning the intended task is inferred from `y`.
If `y` has a boolean, integer or object dtype, the task is assumend to be classification,
else regression.
:type ml_task: str
:param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature
(currently unused)
:type test_for_binary_target_binary_feature: str
:param test_for_binary_target_real_feature: Which test to be used for binary target, real feature
:type test_for_binary_target_real_feature: str
:param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused)
:type test_for_real_target_binary_feature: str
:param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused)
:type test_for_real_target_real_feature: str
:param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant
features among all created features.
:type fdr_level: float
:param hypotheses_independent: Can the significance of the features be assumed to be independent?
Normally, this should be set to False as the features are never
independent (e.g. mean and median)
:type hypotheses_independent: bool
:param n_jobs: Number of processes to use during the p-value calculation
:type n_jobs: int
:param show_warnings: Show warnings during the p-value calculation (needed for debugging of calculators).
:type show_warnings: bool
:param chunksize: The size of one chunk that is submitted to the worker
process for the parallelisation. Where one chunk is defined as a
singular time series for one id and one kind. If you set the chunksize
to 10, then it means that one task is to calculate all features for 10
time series. If it is set it to None, depending on distributor,
heuristics are used to find the optimal chunksize. If you get out of
memory exceptions, you can try it with the dask distributor and a
smaller chunksize.
:type chunksize: None or int
:return: A pandas.DataFrame with each column of the input DataFrame X as index with information on the significance
of this particular feature. The DataFrame has the columns
"Feature",
"type" (binary, real or const),
"p_value" (the significance of this feature as a p-value, lower means more significant)
"relevant" (True if the Benjamini Hochberg procedure rejected the null hypothesis [the feature is
not relevant] for this feature)
:rtype: pandas.DataFrame
"""
if ml_task not in ['auto', 'classification', 'regression']:
raise ValueError('ml_task must be one of: \'auto\', \'classification\', \'regression\'')
elif ml_task == 'auto':
ml_task = infer_ml_task(y)
with warnings.catch_warnings():
if not show_warnings:
warnings.simplefilter("ignore")
else:
warnings.simplefilter("default")
if n_jobs == 0:
map_function = map
else:
pool = Pool(processes=n_jobs, initializer=initialize_warnings_in_workers, initargs=(show_warnings,))
map_function = partial(pool.map, chunksize=chunksize)
relevance_table = pd.DataFrame(index=pd.Series(X.columns, name='feature'))
relevance_table['feature'] = relevance_table.index
relevance_table['type'] = pd.Series(
map_function(get_feature_type, [X[feature] for feature in relevance_table.index]),
index=relevance_table.index
)
table_real = relevance_table[relevance_table.type == 'real'].copy()
table_binary = relevance_table[relevance_table.type == 'binary'].copy()
table_const = relevance_table[relevance_table.type == 'constant'].copy()
table_const['p_value'] = np.NaN
table_const['relevant'] = False
if not table_const.empty:
warnings.warn("[test_feature_significance] Constant features: {}"
.format(", ".join(table_const.feature)), RuntimeWarning)
if len(table_const) == len(relevance_table):
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
return table_const
if ml_task == 'classification':
tables = []
for label in y.unique():
_test_real_feature = partial(target_binary_feature_real_test, y=(y == label),
test=test_for_binary_target_real_feature)
_test_binary_feature = partial(target_binary_feature_binary_test, y=(y == label))
tmp = _calculate_relevance_table_for_implicit_target(
table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,
fdr_level, map_function
)
tables.append(tmp)
relevance_table = combine_relevance_tables(tables)
elif ml_task == 'regression':
_test_real_feature = partial(target_real_feature_real_test, y=y)
_test_binary_feature = partial(target_real_feature_binary_test, y=y)
relevance_table = _calculate_relevance_table_for_implicit_target(
table_real, table_binary, X, _test_real_feature, _test_binary_feature, hypotheses_independent,
fdr_level, map_function
)
if n_jobs != 0:
pool.close()
pool.terminate()
pool.join()
relevance_table = pd.concat([relevance_table, table_const], axis=0)
if sum(relevance_table['relevant']) == 0:
warnings.warn(
"No feature was found relevant for {} for fdr level = {} (which corresponds to the maximal percentage "
"of irrelevant features, consider using an higher fdr level or add other features."
.format(ml_task, fdr_level), RuntimeWarning)
return relevance_table
def | (table_real, table_binary, X, test_real_feature, test_binary_feature,
hypotheses_independent, fdr_level, map_function):
table_real['p_value'] = pd.Series(
map_function(test_real_feature, [X[feature] for feature in table_real.index]),
index=table_real.index
)
table_binary['p_value'] = pd.Series(
map_function(test_binary_feature, [X[feature] for feature in table_binary.index]),
index=table_binary.index
)
relevance_table = pd.concat([table_real, table_binary])
return benjamini_hochberg_test(relevance_table, hypotheses_independent, fdr_level)
def infer_ml_task(y):
"""
Infer the machine learning task to select for.
The result will be either `'regression'` or `'classification'`.
If the target vector only consists of integer typed values or objects, we assume the task is `'classification'`.
Else `'regression'`.
:param y: The target vector y.
:type y: pandas.Series
:return: 'classification' or 'regression'
:rtype: str
"""
if y.dtype.kind in np.typecodes['AllInteger'] or y.dtype == np.object:
ml_task = 'classification'
else:
ml_task = 'regression'
return ml_task
def combine_relevance_tables(relevance_tables):
"""
Create a combined relevance table out of a list of relevance tables,
aggregating the p-values and the relevances.
:param relevance_tables: A list of relevance tables
:type relevance_tables: List[pd.DataFrame]
:return: The combined relevance table
:rtype: pandas.DataFrame
"""
def _combine(a, b):
a.relevant |= b.relevant
a.p_value = a.p_value.combine(b.p_value, min, 1)
return a
return reduce(_combine, relevance_tables)
def get_feature_type(feature_column):
"""
For a given feature, determine if it is real, binary or constant.
Here binary means that only two unique values occur in the feature.
:param feature_column: The feature column
:type feature_column: pandas.Series
:return: 'constant', 'binary' or 'real'
"""
n_unique_values = len(set(feature_column.values))
if n_unique_values == 1:
return 'constant'
elif n_unique_values == 2:
return 'binary'
else:
return 'real'
| _calculate_relevance_table_for_implicit_target |
mod.rs | use chrono::prelude::*;
use color_eyre::eyre::{eyre, Result, WrapErr};
use glob::glob;
use std::{cmp::Ordering, fs};
pub mod frontmatter;
#[derive(Eq, PartialEq, Debug, Clone)]
pub struct Post {
pub front_matter: frontmatter::Data,
pub link: String,
pub body: String,
pub body_html: String,
pub date: DateTime<FixedOffset>,
pub mentions: Vec<mi::WebMention>,
}
impl Into<jsonfeed::Item> for Post {
fn into(self) -> jsonfeed::Item {
let mut result = jsonfeed::Item::builder()
.title(self.front_matter.title)
.content_html(self.body_html)
.content_text(self.body)
.id(format!("https://christine.website/{}", self.link))
.url(format!("https://christine.website/{}", self.link))
.date_published(self.date.to_rfc3339())
.author(
jsonfeed::Author::new()
.name("Christine Dodrill")
.url("https://christine.website")
.avatar("https://christine.website/static/img/avatar.png"),
);
let mut tags: Vec<String> = vec![];
if let Some(series) = self.front_matter.series {
tags.push(series);
}
if let Some(mut meta_tags) = self.front_matter.tags {
tags.append(&mut meta_tags);
}
if tags.len() != 0 {
result = result.tags(tags);
}
if let Some(image_url) = self.front_matter.image {
result = result.image(image_url);
}
result.build().unwrap()
}
}
impl Ord for Post {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(&other).unwrap()
}
}
impl PartialOrd for Post {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.date.cmp(&other.date))
}
}
impl Post {
pub fn detri(&self) -> String {
self.date.format("M%m %d %Y").to_string()
}
}
pub async fn load(dir: &str, mi: Option<&mi::Client>) -> Result<Vec<Post>> {
let mut result: Vec<Post> = vec![];
for path in glob(&format!("{}/*.markdown", dir))?.filter_map(Result::ok) {
log::debug!("loading {:?}", path);
let body =
fs::read_to_string(path.clone()).wrap_err_with(|| format!("can't read {:?}", path))?;
let (fm, content_offset) = frontmatter::Data::parse(body.clone().as_str())
.wrap_err_with(|| format!("can't parse frontmatter of {:?}", path))?;
let markup = &body[content_offset..];
let date = NaiveDate::parse_from_str(&fm.clone().date, "%Y-%m-%d")
.map_err(|why| eyre!("error parsing date in {:?}: {}", path, why))?;
let link = format!("{}/{}", dir, path.file_stem().unwrap().to_str().unwrap());
let mentions: Vec<mi::WebMention> = match mi {
None => vec![],
Some(mi) => mi
.mentioners(format!("https://christine.website/{}", link))
.await
.map_err(|why| tracing::error!("error: can't load mentions for {}: {}", link, why))
.unwrap_or(vec![]),
};
result.push(Post {
front_matter: fm,
link: link,
body: markup.to_string(),
body_html: crate::app::markdown::render(&markup)
.wrap_err_with(|| format!("can't parse markdown for {:?}", path))?,
date: {
DateTime::<Utc>::from_utc(
NaiveDateTime::new(date, NaiveTime::from_hms(0, 0, 0)),
Utc,
)
.with_timezone(&Utc)
.into()
},
mentions: mentions,
})
} | if result.len() == 0 {
Err(eyre!("no posts loaded"))
} else {
result.sort();
result.reverse();
Ok(result)
}
}
#[cfg(test)]
mod tests {
use super::*;
use color_eyre::eyre::Result;
#[tokio::test]
async fn blog() {
let _ = pretty_env_logger::try_init();
load("blog", None).await.expect("posts to load");
}
#[tokio::test]
async fn gallery() -> Result<()> {
let _ = pretty_env_logger::try_init();
load("gallery", None).await?;
Ok(())
}
#[tokio::test]
async fn talks() -> Result<()> {
let _ = pretty_env_logger::try_init();
load("talks", None).await?;
Ok(())
}
} | |
assignments.go | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements initialization and assignment checks.
package types
import (
"fmt"
"go/ast"
"strings"
)
// assignment reports whether x can be assigned to a variable of type T,
// if necessary by attempting to convert untyped values to the appropriate
// type. context describes the context in which the assignment takes place.
// Use T == nil to indicate assignment to an untyped blank identifier.
// x.mode is set to invalid if the assignment failed.
func (check *Checker) assignment(x *operand, T Type, context string) {
check.singleValue(x)
switch x.mode {
case invalid:
return // error reported before
case constant_, variable, mapindex, value, commaok, commaerr:
// ok
default:
// we may get here because of other problems (issue #39634, crash 12)
check.errorf(x, 0, "cannot assign %s to %s in %s", x, T, context)
return
}
if isUntyped(x.typ) {
target := T
// spec: "If an untyped constant is assigned to a variable of interface
// type or the blank identifier, the constant is first converted to type
// bool, rune, int, float64, complex128 or string respectively, depending
// on whether the value is a boolean, rune, integer, floating-point,
// complex, or string constant."
if T == nil || IsInterface(T) && !isTypeParam(T) {
if T == nil && x.typ == Typ[UntypedNil] {
check.errorf(x, _UntypedNil, "use of untyped nil in %s", context)
x.mode = invalid
return
}
target = Default(x.typ)
}
newType, val, code := check.implicitTypeAndValue(x, target)
if code != 0 {
msg := check.sprintf("cannot use %s as %s value in %s", x, target, context)
switch code {
case _TruncatedFloat:
msg += " (truncated)"
case _NumericOverflow:
msg += " (overflows)"
default:
code = _IncompatibleAssign
}
check.error(x, code, msg)
x.mode = invalid
return
}
if val != nil {
x.val = val
check.updateExprVal(x.expr, val)
}
if newType != x.typ {
x.typ = newType
check.updateExprType(x.expr, newType, false)
}
}
// A generic (non-instantiated) function value cannot be assigned to a variable.
if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
check.errorf(x, _WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context)
}
// spec: "If a left-hand side is the blank identifier, any typed or
// non-constant value except for the predeclared identifier nil may
// be assigned to it."
if T == nil {
return
}
reason := ""
if ok, code := x.assignableTo(check, T, &reason); !ok {
if compilerErrorMessages {
if reason != "" {
check.errorf(x, code, "cannot use %s as type %s in %s:\n\t%s", x, T, context, reason)
} else {
check.errorf(x, code, "cannot use %s as type %s in %s", x, T, context)
}
} else {
if reason != "" {
check.errorf(x, code, "cannot use %s as %s value in %s: %s", x, T, context, reason)
} else {
check.errorf(x, code, "cannot use %s as %s value in %s", x, T, context)
}
}
x.mode = invalid
}
}
func (check *Checker) initConst(lhs *Const, x *operand) {
if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
if lhs.typ == nil {
lhs.typ = Typ[Invalid]
}
return
}
// rhs must be a constant
if x.mode != constant_ {
check.errorf(x, _InvalidConstInit, "%s is not constant", x)
if lhs.typ == nil {
lhs.typ = Typ[Invalid]
}
return
}
assert(isConstType(x.typ))
// If the lhs doesn't have a type yet, use the type of x.
if lhs.typ == nil {
lhs.typ = x.typ
}
check.assignment(x, lhs.typ, "constant declaration")
if x.mode == invalid {
return
}
lhs.val = x.val
}
func (check *Checker) initVar(lhs *Var, x *operand, context string) Type {
if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
if lhs.typ == nil {
lhs.typ = Typ[Invalid]
}
return nil
}
// If the lhs doesn't have a type yet, use the type of x.
if lhs.typ == nil {
typ := x.typ
if isUntyped(typ) {
// convert untyped types to default types
if typ == Typ[UntypedNil] {
check.errorf(x, _UntypedNil, "use of untyped nil in %s", context)
lhs.typ = Typ[Invalid]
return nil
}
typ = Default(typ)
}
lhs.typ = typ
}
check.assignment(x, lhs.typ, context)
if x.mode == invalid {
return nil
}
return x.typ
}
func (check *Checker) assignVar(lhs ast.Expr, x *operand) Type {
if x.mode == invalid || x.typ == Typ[Invalid] {
check.useLHS(lhs)
return nil
}
// Determine if the lhs is a (possibly parenthesized) identifier.
ident, _ := unparen(lhs).(*ast.Ident)
// Don't evaluate lhs if it is the blank identifier.
if ident != nil && ident.Name == "_" {
check.recordDef(ident, nil)
check.assignment(x, nil, "assignment to _ identifier")
if x.mode == invalid {
return nil
}
return x.typ
}
// If the lhs is an identifier denoting a variable v, this assignment
// is not a 'use' of v. Remember current value of v.used and restore
// after evaluating the lhs via check.expr.
var v *Var
var v_used bool
if ident != nil {
if obj := check.lookup(ident.Name); obj != nil {
// It's ok to mark non-local variables, but ignore variables
// from other packages to avoid potential race conditions with
// dot-imported variables.
if w, _ := obj.(*Var); w != nil && w.pkg == check.pkg {
v = w
v_used = v.used
}
}
}
var z operand
check.expr(&z, lhs)
if v != nil {
v.used = v_used // restore v.used
}
if z.mode == invalid || z.typ == Typ[Invalid] {
return nil
}
// spec: "Each left-hand side operand must be addressable, a map index
// expression, or the blank identifier. Operands may be parenthesized."
switch z.mode {
case invalid:
return nil
case variable, mapindex:
// ok
default:
if sel, ok := z.expr.(*ast.SelectorExpr); ok {
var op operand
check.expr(&op, sel.X)
if op.mode == mapindex {
check.errorf(&z, _UnaddressableFieldAssign, "cannot assign to struct field %s in map", ExprString(z.expr))
return nil
}
}
check.errorf(&z, _UnassignableOperand, "cannot assign to %s", &z)
return nil
}
check.assignment(x, z.typ, "assignment")
if x.mode == invalid {
return nil
}
return x.typ
}
// operandTypes returns the list of types for the given operands.
func operandTypes(list []*operand) (res []Type) |
// varTypes returns the list of types for the given variables.
func varTypes(list []*Var) (res []Type) {
for _, x := range list {
res = append(res, x.typ)
}
return res
}
// typesSummary returns a string of the form "(t1, t2, ...)" where the
// ti's are user-friendly string representations for the given types.
// If variadic is set and the last type is a slice, its string is of
// the form "...E" where E is the slice's element type.
func (check *Checker) typesSummary(list []Type, variadic bool) string {
var res []string
for i, t := range list {
var s string
switch {
case t == nil:
fallthrough // should not happen but be cautious
case t == Typ[Invalid]:
s = "<T>"
case isUntyped(t):
if isNumeric(t) {
// Do not imply a specific type requirement:
// "have number, want float64" is better than
// "have untyped int, want float64" or
// "have int, want float64".
s = "number"
} else {
// If we don't have a number, omit the "untyped" qualifier
// for compactness.
s = strings.Replace(t.(*Basic).name, "untyped ", "", -1)
}
case variadic && i == len(list)-1:
s = check.sprintf("...%s", t.(*Slice).elem)
}
if s == "" {
s = check.sprintf("%s", t)
}
res = append(res, s)
}
return "(" + strings.Join(res, ", ") + ")"
}
func (check *Checker) assignError(rhs []ast.Expr, nvars, nvals int) {
measure := func(x int, unit string) string {
s := fmt.Sprintf("%d %s", x, unit)
if x != 1 {
s += "s"
}
return s
}
vars := measure(nvars, "variable")
vals := measure(nvals, "value")
rhs0 := rhs[0]
if len(rhs) == 1 {
if call, _ := unparen(rhs0).(*ast.CallExpr); call != nil {
check.errorf(rhs0, _WrongAssignCount, "assignment mismatch: %s but %s returns %s", vars, call.Fun, vals)
return
}
}
check.errorf(rhs0, _WrongAssignCount, "assignment mismatch: %s but %s", vars, vals)
}
// If returnStmt != nil, initVars is called to type-check the assignment
// of return expressions, and returnStmt is the return statement.
func (check *Checker) initVars(lhs []*Var, origRHS []ast.Expr, returnStmt ast.Stmt) {
rhs, commaOk := check.exprList(origRHS, len(lhs) == 2 && returnStmt == nil)
if len(lhs) != len(rhs) {
// invalidate lhs
for _, obj := range lhs {
obj.used = true // avoid declared but not used errors
if obj.typ == nil {
obj.typ = Typ[Invalid]
}
}
// don't report an error if we already reported one
for _, x := range rhs {
if x.mode == invalid {
return
}
}
if returnStmt != nil {
var at positioner = returnStmt
qualifier := "not enough"
if len(rhs) > len(lhs) {
at = rhs[len(lhs)].expr // report at first extra value
qualifier = "too many"
} else if len(rhs) > 0 {
at = rhs[len(rhs)-1].expr // report at last value
}
check.errorf(at, _WrongResultCount, "%s return values\n\thave %s\n\twant %s",
qualifier,
check.typesSummary(operandTypes(rhs), false),
check.typesSummary(varTypes(lhs), false),
)
return
}
if compilerErrorMessages {
check.assignError(origRHS, len(lhs), len(rhs))
} else {
check.errorf(rhs[0], _WrongAssignCount, "cannot initialize %d variables with %d values", len(lhs), len(rhs))
}
return
}
context := "assignment"
if returnStmt != nil {
context = "return statement"
}
if commaOk {
var a [2]Type
for i := range a {
a[i] = check.initVar(lhs[i], rhs[i], context)
}
check.recordCommaOkTypes(origRHS[0], a)
return
}
for i, lhs := range lhs {
check.initVar(lhs, rhs[i], context)
}
}
func (check *Checker) assignVars(lhs, origRHS []ast.Expr) {
rhs, commaOk := check.exprList(origRHS, len(lhs) == 2)
if len(lhs) != len(rhs) {
check.useLHS(lhs...)
// don't report an error if we already reported one
for _, x := range rhs {
if x.mode == invalid {
return
}
}
if compilerErrorMessages {
check.assignError(origRHS, len(lhs), len(rhs))
} else {
check.errorf(rhs[0], _WrongAssignCount, "cannot assign %d values to %d variables", len(rhs), len(lhs))
}
return
}
if commaOk {
var a [2]Type
for i := range a {
a[i] = check.assignVar(lhs[i], rhs[i])
}
check.recordCommaOkTypes(origRHS[0], a)
return
}
for i, lhs := range lhs {
check.assignVar(lhs, rhs[i])
}
}
func (check *Checker) shortVarDecl(pos positioner, lhs, rhs []ast.Expr) {
top := len(check.delayed)
scope := check.scope
// collect lhs variables
seen := make(map[string]bool, len(lhs))
lhsVars := make([]*Var, len(lhs))
newVars := make([]*Var, 0, len(lhs))
hasErr := false
for i, lhs := range lhs {
ident, _ := lhs.(*ast.Ident)
if ident == nil {
check.useLHS(lhs)
// TODO(rFindley) this is redundant with a parser error. Consider omitting?
check.errorf(lhs, _BadDecl, "non-name %s on left side of :=", lhs)
hasErr = true
continue
}
name := ident.Name
if name != "_" {
if seen[name] {
check.errorf(lhs, _RepeatedDecl, "%s repeated on left side of :=", lhs)
hasErr = true
continue
}
seen[name] = true
}
// Use the correct obj if the ident is redeclared. The
// variable's scope starts after the declaration; so we
// must use Scope.Lookup here and call Scope.Insert
// (via check.declare) later.
if alt := scope.Lookup(name); alt != nil {
check.recordUse(ident, alt)
// redeclared object must be a variable
if obj, _ := alt.(*Var); obj != nil {
lhsVars[i] = obj
} else {
check.errorf(lhs, _UnassignableOperand, "cannot assign to %s", lhs)
hasErr = true
}
continue
}
// declare new variable
obj := NewVar(ident.Pos(), check.pkg, name, nil)
lhsVars[i] = obj
if name != "_" {
newVars = append(newVars, obj)
}
check.recordDef(ident, obj)
}
// create dummy variables where the lhs is invalid
for i, obj := range lhsVars {
if obj == nil {
lhsVars[i] = NewVar(lhs[i].Pos(), check.pkg, "_", nil)
}
}
check.initVars(lhsVars, rhs, nil)
// process function literals in rhs expressions before scope changes
check.processDelayed(top)
if len(newVars) == 0 && !hasErr {
check.softErrorf(pos, _NoNewVar, "no new variables on left side of :=")
return
}
// declare new variables
// spec: "The scope of a constant or variable identifier declared inside
// a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl
// for short variable declarations) and ends at the end of the innermost
// containing block."
scopePos := rhs[len(rhs)-1].End()
for _, obj := range newVars {
check.declare(scope, nil, obj, scopePos) // id = nil: recordDef already called
}
}
| {
for _, x := range list {
res = append(res, x.typ)
}
return res
} |
mio5.py | ''' Classes for read / write of matlab (TM) 5 files
The matfile specification last found here:
https://www.mathworks.com/access/helpdesk/help/pdf_doc/matlab/matfile_format.pdf
(as of December 5 2008)
'''
'''
=================================
Note on functions and mat files
=================================
The document above does not give any hints as to the storage of matlab
function handles, or anonymous function handles. I had, therefore, to
guess the format of matlab arrays of ``mxFUNCTION_CLASS`` and
``mxOPAQUE_CLASS`` by looking at example mat files.
``mxFUNCTION_CLASS`` stores all types of matlab functions. It seems to
contain a struct matrix with a set pattern of fields. For anonymous
functions, a sub-fields of one of these fields seems to contain the
well-named ``mxOPAQUE_CLASS``. This seems to contain:
* array flags as for any matlab matrix
* 3 int8 strings
* a matrix
It seems that whenever the mat file contains a ``mxOPAQUE_CLASS``
instance, there is also an un-named matrix (name == '') at the end of
the mat file. I'll call this the ``__function_workspace__`` matrix.
When I saved two anonymous functions in a mat file, or appended another
anonymous function to the mat file, there was still only one
``__function_workspace__`` un-named matrix at the end, but larger than
that for a mat file with a single anonymous function, suggesting that
the workspaces for the two functions had been merged.
The ``__function_workspace__`` matrix appears to be of double class
(``mxCLASS_DOUBLE``), but stored as uint8, the memory for which is in
the format of a mini .mat file, without the first 124 bytes of the file
header (the description and the subsystem_offset), but with the version
U2 bytes, and the S2 endian test bytes. There follow 4 zero bytes,
presumably for 8 byte padding, and then a series of ``miMATRIX``
entries, as in a standard mat file. The ``miMATRIX`` entries appear to
be series of un-named (name == '') matrices, and may also contain arrays
of this same mini-mat format.
I guess that:
* saving an anonymous function back to a mat file will need the
associated ``__function_workspace__`` matrix saved as well for the
anonymous function to work correctly.
* appending to a mat file that has a ``__function_workspace__`` would
involve first pulling off this workspace, appending, checking whether
there were any more anonymous functions appended, and then somehow
merging the relevant workspaces, and saving at the end of the mat
file.
The mat files I was playing with are in ``tests/data``:
* sqr.mat
* parabola.mat
* some_functions.mat
See ``tests/test_mio.py:test_mio_funcs.py`` for the debugging
script I was working with.
'''
# Small fragments of current code adapted from matfile.py by Heiko
# Henkelmann; parts of the code for simplify_cells=True adapted from
# http://blog.nephics.com/2019/08/28/better-loadmat-for-scipy/.
import os
import time
import sys
import zlib
from io import BytesIO
import warnings
import numpy as np
from numpy.compat import asbytes, asstr
import scipy.sparse
from .byteordercodes import native_code, swapped_code
from .miobase import (MatFileReader, docfiller, matdims, read_dtype,
arr_to_chars, arr_dtype_number, MatWriteError,
MatReadError, MatReadWarning)
# Reader object for matlab 5 format variables
from .mio5_utils import VarReader5
# Constants and helper objects
from .mio5_params import (MatlabObject, MatlabFunction, MDTYPES, NP_TO_MTYPES,
NP_TO_MXTYPES, miCOMPRESSED, miMATRIX, miINT8,
miUTF8, miUINT32, mxCELL_CLASS, mxSTRUCT_CLASS,
mxOBJECT_CLASS, mxCHAR_CLASS, mxSPARSE_CLASS,
mxDOUBLE_CLASS, mclass_info, mat_struct)
from .streams import ZlibInputStream
def _has_struct(elem):
"""Determine if elem is an array and if first array item is a struct."""
return (isinstance(elem, np.ndarray) and (elem.size > 0) and
isinstance(elem[0], mat_struct))
def _inspect_cell_array(ndarray):
"""Construct lists from cell arrays (loaded as numpy ndarrays), recursing
into items if they contain mat_struct objects."""
elem_list = []
for sub_elem in ndarray:
if isinstance(sub_elem, mat_struct):
elem_list.append(_matstruct_to_dict(sub_elem))
elif _has_struct(sub_elem):
elem_list.append(_inspect_cell_array(sub_elem))
else:
elem_list.append(sub_elem)
return elem_list
def _matstruct_to_dict(matobj):
"""Construct nested dicts from mat_struct objects."""
d = {}
for f in matobj._fieldnames:
elem = matobj.__dict__[f]
if isinstance(elem, mat_struct):
d[f] = _matstruct_to_dict(elem)
elif _has_struct(elem):
d[f] = _inspect_cell_array(elem)
else:
d[f] = elem
return d
def _simplify_cells(d):
"""Convert mat objects in dict to nested dicts."""
for key in d:
if isinstance(d[key], mat_struct):
d[key] = _matstruct_to_dict(d[key])
elif _has_struct(d[key]):
d[key] = _inspect_cell_array(d[key])
return d
class MatFile5Reader(MatFileReader):
''' Reader for Mat 5 mat files
Adds the following attribute to base class
uint16_codec - char codec to use for uint16 char arrays
(defaults to system default codec)
Uses variable reader that has the following stardard interface (see
abstract class in ``miobase``::
__init__(self, file_reader)
read_header(self)
array_from_header(self)
and added interface::
set_stream(self, stream)
read_full_tag(self)
'''
@docfiller
def __init__(self,
mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True,
uint16_codec=None,
simplify_cells=False):
'''Initializer for matlab 5 file format reader
%(matstream_arg)s
%(load_args)s
%(struct_arg)s
uint16_codec : {None, string}
Set codec to use for uint16 char arrays (e.g., 'utf-8').
Use system default codec if None
'''
super(MatFile5Reader, self).__init__(
mat_stream,
byte_order,
mat_dtype,
squeeze_me,
chars_as_strings,
matlab_compatible,
struct_as_record,
verify_compressed_data_integrity,
simplify_cells)
# Set uint16 codec
if not uint16_codec:
uint16_codec = sys.getdefaultencoding()
self.uint16_codec = uint16_codec
# placeholders for readers - see initialize_read method
self._file_reader = None
self._matrix_reader = None
def guess_byte_order(self):
''' Guess byte order.
Sets stream pointer to 0 '''
self.mat_stream.seek(126)
mi = self.mat_stream.read(2)
self.mat_stream.seek(0)
return mi == b'IM' and '<' or '>'
def read_file_header(self):
''' Read in mat 5 file header '''
hdict = {}
hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header']
hdr = read_dtype(self.mat_stream, hdr_dtype)
hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000')
v_major = hdr['version'] >> 8
v_minor = hdr['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
def initialize_read(self):
''' Run when beginning read of variables
Sets up readers from parameters in `self`
'''
# reader for top level stream. We need this extra top-level
# reader because we use the matrix_reader object to contain
# compressed matrices (so they have their own stream)
self._file_reader = VarReader5(self)
# reader for matrix streams
self._matrix_reader = VarReader5(self)
def read_var_header(self):
''' Read header, return header, next position
Header has to define at least .name and .is_global
Parameters
----------
None
Returns
-------
header : object
object that can be passed to self.read_var_array, and that
has attributes .name and .is_global
next_position : int
position in stream of next variable
'''
mdtype, byte_count = self._file_reader.read_full_tag()
if not byte_count > 0:
raise ValueError("Did not read any bytes")
next_pos = self.mat_stream.tell() + byte_count
if mdtype == miCOMPRESSED:
# Make new stream from compressed data
stream = ZlibInputStream(self.mat_stream, byte_count)
self._matrix_reader.set_stream(stream)
check_stream_limit = self.verify_compressed_data_integrity
mdtype, byte_count = self._matrix_reader.read_full_tag()
else:
check_stream_limit = False
self._matrix_reader.set_stream(self.mat_stream)
if not mdtype == miMATRIX:
raise TypeError('Expecting miMATRIX type here, got %d' % mdtype)
header = self._matrix_reader.read_header(check_stream_limit)
return header, next_pos
def read_var_array(self, header, process=True):
''' Read array, given `header`
Parameters
----------
header : header object
object with fields defining variable header
process : {True, False} bool, optional
If True, apply recursive post-processing during loading of
array.
Returns
-------
arr : array
array with post-processing applied or not according to
`process`.
'''
return self._matrix_reader.array_from_header(header, process)
def get_variables(self, variable_names=None):
''' get variables from stream as dictionary
variable_names - optional list of variable names to get
If variable_names is None, then get all variables in file
'''
if isinstance(variable_names, str):
variable_names = [variable_names]
elif variable_names is not None:
variable_names = list(variable_names)
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
mdict = self.read_file_header()
mdict['__globals__'] = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name in mdict:
warnings.warn('Duplicate variable name "%s" in stream'
' - replacing previous with new\n'
'Consider mio5.varmats_from_mat to split '
'file into single variable files' % name,
MatReadWarning, stacklevel=2)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
# We want to keep this raw because mat_dtype processing
# will break the format (uint8 as mxDOUBLE_CLASS)
process = False
else:
process = True
if variable_names is not None and name not in variable_names:
self.mat_stream.seek(next_position)
continue
try:
res = self.read_var_array(hdr, process)
except MatReadError as err:
warnings.warn(
f'Unreadable variable "{name}", because "{err}"',
Warning, stacklevel=2)
res = f"Read error: {err}"
self.mat_stream.seek(next_position)
mdict[name] = res
if hdr.is_global:
mdict['__globals__'].append(name)
if variable_names is not None:
variable_names.remove(name)
if len(variable_names) == 0:
break
if self.simplify_cells:
return _simplify_cells(mdict)
else:
return mdict
def list_variables(self):
''' list variables from stream '''
self.mat_stream.seek(0)
# Here we pass all the parameters in self to the reading objects
self.initialize_read()
self.read_file_header()
vars = []
while not self.end_of_stream():
hdr, next_position = self.read_var_header()
name = asstr(hdr.name)
if name == '':
# can only be a matlab 7 function workspace
name = '__function_workspace__'
shape = self._matrix_reader.shape_from_header(hdr)
if hdr.is_logical:
info = 'logical'
else:
info = mclass_info.get(hdr.mclass, 'unknown')
vars.append((name, shape, info))
self.mat_stream.seek(next_position)
return vars
def varmats_from_mat(file_obj):
""" Pull variables out of mat 5 file as a sequence of mat file objects
This can be useful with a difficult mat file, containing unreadable
variables. This routine pulls the variables out in raw form and puts them,
unread, back into a file stream for saving or reading. Another use is the
pathological case where there is more than one variable of the same name in
the file; this routine returns the duplicates, whereas the standard reader
will overwrite duplicates in the returned dictionary.
The file pointer in `file_obj` will be undefined. File pointers for the
returned file-like objects are set at 0.
Parameters
----------
file_obj : file-like
file object containing mat file
Returns
-------
named_mats : list
list contains tuples of (name, BytesIO) where BytesIO is a file-like
object containing mat file contents as for a single variable. The
BytesIO contains a string with the original header and a single var. If
``var_file_obj`` is an individual BytesIO instance, then save as a mat
file with something like ``open('test.mat',
'wb').write(var_file_obj.read())``
Examples
--------
>>> import scipy.io
BytesIO is from the ``io`` module in Python 3, and is ``cStringIO`` for
Python < 3.
>>> mat_fileobj = BytesIO()
>>> scipy.io.savemat(mat_fileobj, {'b': np.arange(10), 'a': 'a string'})
>>> varmats = varmats_from_mat(mat_fileobj)
>>> sorted([name for name, str_obj in varmats])
['a', 'b']
"""
rdr = MatFile5Reader(file_obj)
file_obj.seek(0)
# Raw read of top-level file header
hdr_len = MDTYPES[native_code]['dtypes']['file_header'].itemsize
raw_hdr = file_obj.read(hdr_len)
# Initialize variable reading
file_obj.seek(0)
rdr.initialize_read()
rdr.read_file_header()
next_position = file_obj.tell()
named_mats = []
while not rdr.end_of_stream():
start_position = next_position
hdr, next_position = rdr.read_var_header()
name = asstr(hdr.name)
# Read raw variable string
file_obj.seek(start_position)
byte_count = next_position - start_position | out_obj.write(var_str)
out_obj.seek(0)
named_mats.append((name, out_obj))
return named_mats
class EmptyStructMarker(object):
""" Class to indicate presence of empty matlab struct on output """
def to_writeable(source):
''' Convert input object ``source`` to something we can write
Parameters
----------
source : object
Returns
-------
arr : None or ndarray or EmptyStructMarker
If `source` cannot be converted to something we can write to a matfile,
return None. If `source` is equivalent to an empty dictionary, return
``EmptyStructMarker``. Otherwise return `source` converted to an
ndarray with contents for writing to matfile.
'''
if isinstance(source, np.ndarray):
return source
if source is None:
return None
# Objects that implement mappings
is_mapping = (hasattr(source, 'keys') and hasattr(source, 'values') and
hasattr(source, 'items'))
# Objects that don't implement mappings, but do have dicts
if isinstance(source, np.generic):
# NumPy scalars are never mappings (PyPy issue workaround)
pass
elif not is_mapping and hasattr(source, '__dict__'):
source = dict((key, value) for key, value in source.__dict__.items()
if not key.startswith('_'))
is_mapping = True
if is_mapping:
dtype = []
values = []
for field, value in source.items():
if (isinstance(field, str) and
field[0] not in '_0123456789'):
dtype.append((str(field), object))
values.append(value)
if dtype:
return np.array([tuple(values)], dtype)
else:
return EmptyStructMarker
# Next try and convert to an array
narr = np.asanyarray(source)
if narr.dtype.type in (object, np.object_) and \
narr.shape == () and narr == source:
# No interesting conversion possible
return None
return narr
# Native byte ordered dtypes for convenience for writers
NDT_FILE_HDR = MDTYPES[native_code]['dtypes']['file_header']
NDT_TAG_FULL = MDTYPES[native_code]['dtypes']['tag_full']
NDT_TAG_SMALL = MDTYPES[native_code]['dtypes']['tag_smalldata']
NDT_ARRAY_FLAGS = MDTYPES[native_code]['dtypes']['array_flags']
class VarWriter5(object):
''' Generic matlab matrix writing class '''
mat_tag = np.zeros((), NDT_TAG_FULL)
mat_tag['mdtype'] = miMATRIX
def __init__(self, file_writer):
self.file_stream = file_writer.file_stream
self.unicode_strings = file_writer.unicode_strings
self.long_field_names = file_writer.long_field_names
self.oned_as = file_writer.oned_as
# These are used for top level writes, and unset after
self._var_name = None
self._var_is_global = False
def write_bytes(self, arr):
self.file_stream.write(arr.tobytes(order='F'))
def write_string(self, s):
self.file_stream.write(s)
def write_element(self, arr, mdtype=None):
''' write tag and data '''
if mdtype is None:
mdtype = NP_TO_MTYPES[arr.dtype.str[1:]]
# Array needs to be in native byte order
if arr.dtype.byteorder == swapped_code:
arr = arr.byteswap().newbyteorder()
byte_count = arr.size*arr.itemsize
if byte_count <= 4:
self.write_smalldata_element(arr, mdtype, byte_count)
else:
self.write_regular_element(arr, mdtype, byte_count)
def write_smalldata_element(self, arr, mdtype, byte_count):
# write tag with embedded data
tag = np.zeros((), NDT_TAG_SMALL)
tag['byte_count_mdtype'] = (byte_count << 16) + mdtype
# if arr.tobytes is < 4, the element will be zero-padded as needed.
tag['data'] = arr.tobytes(order='F')
self.write_bytes(tag)
def write_regular_element(self, arr, mdtype, byte_count):
# write tag, data
tag = np.zeros((), NDT_TAG_FULL)
tag['mdtype'] = mdtype
tag['byte_count'] = byte_count
self.write_bytes(tag)
self.write_bytes(arr)
# pad to next 64-bit boundary
bc_mod_8 = byte_count % 8
if bc_mod_8:
self.file_stream.write(b'\x00' * (8-bc_mod_8))
def write_header(self,
shape,
mclass,
is_complex=False,
is_logical=False,
nzmax=0):
''' Write header for given data options
shape : sequence
array shape
mclass - mat5 matrix class
is_complex - True if matrix is complex
is_logical - True if matrix is logical
nzmax - max non zero elements for sparse arrays
We get the name and the global flag from the object, and reset
them to defaults after we've used them
'''
# get name and is_global from one-shot object store
name = self._var_name
is_global = self._var_is_global
# initialize the top-level matrix tag, store position
self._mat_tag_pos = self.file_stream.tell()
self.write_bytes(self.mat_tag)
# write array flags (complex, global, logical, class, nzmax)
af = np.zeros((), NDT_ARRAY_FLAGS)
af['data_type'] = miUINT32
af['byte_count'] = 8
flags = is_complex << 3 | is_global << 2 | is_logical << 1
af['flags_class'] = mclass | flags << 8
af['nzmax'] = nzmax
self.write_bytes(af)
# shape
self.write_element(np.array(shape, dtype='i4'))
# write name
name = np.asarray(name)
if name == '': # empty string zero-terminated
self.write_smalldata_element(name, miINT8, 0)
else:
self.write_element(name, miINT8)
# reset the one-shot store to defaults
self._var_name = ''
self._var_is_global = False
def update_matrix_tag(self, start_pos):
curr_pos = self.file_stream.tell()
self.file_stream.seek(start_pos)
byte_count = curr_pos - start_pos - 8
if byte_count >= 2**32:
raise MatWriteError("Matrix too large to save with Matlab "
"5 format")
self.mat_tag['byte_count'] = byte_count
self.write_bytes(self.mat_tag)
self.file_stream.seek(curr_pos)
def write_top(self, arr, name, is_global):
""" Write variable at top level of mat file
Parameters
----------
arr : array_like
array-like object to create writer for
name : str, optional
name as it will appear in matlab workspace
default is empty string
is_global : {False, True}, optional
whether variable will be global on load into matlab
"""
# these are set before the top-level header write, and unset at
# the end of the same write, because they do not apply for lower levels
self._var_is_global = is_global
self._var_name = name
# write the header and data
self.write(arr)
def write(self, arr):
''' Write `arr` to stream at top and sub levels
Parameters
----------
arr : array_like
array-like object to create writer for
'''
# store position, so we can update the matrix tag
mat_tag_pos = self.file_stream.tell()
# First check if these are sparse
if scipy.sparse.issparse(arr):
self.write_sparse(arr)
self.update_matrix_tag(mat_tag_pos)
return
# Try to convert things that aren't arrays
narr = to_writeable(arr)
if narr is None:
raise TypeError('Could not convert %s (type %s) to array'
% (arr, type(arr)))
if isinstance(narr, MatlabObject):
self.write_object(narr)
elif isinstance(narr, MatlabFunction):
raise MatWriteError('Cannot write matlab functions')
elif narr is EmptyStructMarker: # empty struct array
self.write_empty_struct()
elif narr.dtype.fields: # struct array
self.write_struct(narr)
elif narr.dtype.hasobject: # cell array
self.write_cells(narr)
elif narr.dtype.kind in ('U', 'S'):
if self.unicode_strings:
codec = 'UTF8'
else:
codec = 'ascii'
self.write_char(narr, codec)
else:
self.write_numeric(narr)
self.update_matrix_tag(mat_tag_pos)
def write_numeric(self, arr):
imagf = arr.dtype.kind == 'c'
logif = arr.dtype.kind == 'b'
try:
mclass = NP_TO_MXTYPES[arr.dtype.str[1:]]
except KeyError:
# No matching matlab type, probably complex256 / float128 / float96
# Cast data to complex128 / float64.
if imagf:
arr = arr.astype('c128')
elif logif:
arr = arr.astype('i1') # Should only contain 0/1
else:
arr = arr.astype('f8')
mclass = mxDOUBLE_CLASS
self.write_header(matdims(arr, self.oned_as),
mclass,
is_complex=imagf,
is_logical=logif)
if imagf:
self.write_element(arr.real)
self.write_element(arr.imag)
else:
self.write_element(arr)
def write_char(self, arr, codec='ascii'):
''' Write string array `arr` with given `codec`
'''
if arr.size == 0 or np.all(arr == ''):
# This an empty string array or a string array containing
# only empty strings. Matlab cannot distinguish between a
# string array that is empty, and a string array containing
# only empty strings, because it stores strings as arrays of
# char. There is no way of having an array of char that is
# not empty, but contains an empty string. We have to
# special-case the array-with-empty-strings because even
# empty strings have zero padding, which would otherwise
# appear in matlab as a string with a space.
shape = (0,) * np.max([arr.ndim, 2])
self.write_header(shape, mxCHAR_CLASS)
self.write_smalldata_element(arr, miUTF8, 0)
return
# non-empty string.
#
# Convert to char array
arr = arr_to_chars(arr)
# We have to write the shape directly, because we are going
# recode the characters, and the resulting stream of chars
# may have a different length
shape = arr.shape
self.write_header(shape, mxCHAR_CLASS)
if arr.dtype.kind == 'U' and arr.size:
# Make one long string from all the characters. We need to
# transpose here, because we're flattening the array, before
# we write the bytes. The bytes have to be written in
# Fortran order.
n_chars = np.prod(shape)
st_arr = np.ndarray(shape=(),
dtype=arr_dtype_number(arr, n_chars),
buffer=arr.T.copy()) # Fortran order
# Recode with codec to give byte string
st = st_arr.item().encode(codec)
# Reconstruct as 1-D byte array
arr = np.ndarray(shape=(len(st),),
dtype='S1',
buffer=st)
self.write_element(arr, mdtype=miUTF8)
def write_sparse(self, arr):
''' Sparse matrices are 2D
'''
A = arr.tocsc() # convert to sparse CSC format
A.sort_indices() # MATLAB expects sorted row indices
is_complex = (A.dtype.kind == 'c')
is_logical = (A.dtype.kind == 'b')
nz = A.nnz
self.write_header(matdims(arr, self.oned_as),
mxSPARSE_CLASS,
is_complex=is_complex,
is_logical=is_logical,
# matlab won't load file with 0 nzmax
nzmax=1 if nz == 0 else nz)
self.write_element(A.indices.astype('i4'))
self.write_element(A.indptr.astype('i4'))
self.write_element(A.data.real)
if is_complex:
self.write_element(A.data.imag)
def write_cells(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxCELL_CLASS)
# loop over data, column major
A = np.atleast_2d(arr).flatten('F')
for el in A:
self.write(el)
def write_empty_struct(self):
self.write_header((1, 1), mxSTRUCT_CLASS)
# max field name length set to 1 in an example matlab struct
self.write_element(np.array(1, dtype=np.int32))
# Field names element is empty
self.write_element(np.array([], dtype=np.int8))
def write_struct(self, arr):
self.write_header(matdims(arr, self.oned_as),
mxSTRUCT_CLASS)
self._write_items(arr)
def _write_items(self, arr):
# write fieldnames
fieldnames = [f[0] for f in arr.dtype.descr]
length = max([len(fieldname) for fieldname in fieldnames])+1
max_length = (self.long_field_names and 64) or 32
if length > max_length:
raise ValueError("Field names are restricted to %d characters" %
(max_length-1))
self.write_element(np.array([length], dtype='i4'))
self.write_element(
np.array(fieldnames, dtype='S%d' % (length)),
mdtype=miINT8)
A = np.atleast_2d(arr).flatten('F')
for el in A:
for f in fieldnames:
self.write(el[f])
def write_object(self, arr):
'''Same as writing structs, except different mx class, and extra
classname element after header
'''
self.write_header(matdims(arr, self.oned_as),
mxOBJECT_CLASS)
self.write_element(np.array(arr.classname, dtype='S'),
mdtype=miINT8)
self._write_items(arr)
class MatFile5Writer(object):
''' Class for writing mat5 files '''
@docfiller
def __init__(self, file_stream,
do_compression=False,
unicode_strings=False,
global_vars=None,
long_field_names=False,
oned_as='row'):
''' Initialize writer for matlab 5 format files
Parameters
----------
%(do_compression)s
%(unicode_strings)s
global_vars : None or sequence of strings, optional
Names of variables to be marked as global for matlab
%(long_fields)s
%(oned_as)s
'''
self.file_stream = file_stream
self.do_compression = do_compression
self.unicode_strings = unicode_strings
if global_vars:
self.global_vars = global_vars
else:
self.global_vars = []
self.long_field_names = long_field_names
self.oned_as = oned_as
self._matrix_writer = None
def write_file_header(self):
# write header
hdr = np.zeros((), NDT_FILE_HDR)
hdr['description'] = 'MATLAB 5.0 MAT-file Platform: %s, Created on: %s' \
% (os.name,time.asctime())
hdr['version'] = 0x0100
hdr['endian_test'] = np.ndarray(shape=(),
dtype='S2',
buffer=np.uint16(0x4d49))
self.file_stream.write(hdr.tobytes())
def put_variables(self, mdict, write_header=None):
''' Write variables in `mdict` to stream
Parameters
----------
mdict : mapping
mapping with method ``items`` returns name, contents pairs where
``name`` which will appear in the matlab workspace in file load, and
``contents`` is something writeable to a matlab file, such as a NumPy
array.
write_header : {None, True, False}, optional
If True, then write the matlab file header before writing the
variables. If None (the default) then write the file header
if we are at position 0 in the stream. By setting False
here, and setting the stream position to the end of the file,
you can append variables to a matlab file
'''
# write header if requested, or None and start of file
if write_header is None:
write_header = self.file_stream.tell() == 0
if write_header:
self.write_file_header()
self._matrix_writer = VarWriter5(self)
for name, var in mdict.items():
if name[0] == '_':
continue
is_global = name in self.global_vars
if self.do_compression:
stream = BytesIO()
self._matrix_writer.file_stream = stream
self._matrix_writer.write_top(var, asbytes(name), is_global)
out_str = zlib.compress(stream.getvalue())
tag = np.empty((), NDT_TAG_FULL)
tag['mdtype'] = miCOMPRESSED
tag['byte_count'] = len(out_str)
self.file_stream.write(tag.tobytes())
self.file_stream.write(out_str)
else: # not compressing
self._matrix_writer.write_top(var, asbytes(name), is_global) | var_str = file_obj.read(byte_count)
# write to stringio object
out_obj = BytesIO()
out_obj.write(raw_hdr) |
07_tensor_linear_regresssion.py | #! /usr/bin/env python
# PyTorch Tutorial 07 - Linear Regression
# https://www.youtube.com/watch?v=YAJ5XBwlN4o&list=PLqnslRFeH2UrcDBWF5mfPGpqQDSta6VK4&index=7
#from __future__ import print_function
import torch
print("\n" * 20)
print("-" * 80)
print("-" * 80)
print("\n" * 2)
#### Steps in Torch ML pipeline
# 1) Design Model (input, output size, forward pass)
# 2) Construct the loss & optimiser
# 3) Training Loop
# - forward pass: compute prediction
# - backward pass: gradients
# - update weights
# 0m - review Steps in Torch ML pipeline
# 1m - library imports
# 2m - coding starts - prepare data
# 4m30 - 1) Design Model (input, output size, forward pass)
# 5m40 - 2) Construct the loss & optimiser
# 7m - 3) Training Loop
# 10m - plot
import torch
import torch.nn as nn # PyTorch nn module has high-level APIs to build a neural network.
# Torch. nn module uses Tensors and Automatic differentiation modules for training and building layers such as input,
# hidden, and output layers - DOCS: https://pytorch.org/docs/stable/nn.html
import numpy as np # NumPy is a library for the Python programming language, adding support for large,
# multi-dimensional arrays and matrices, along with a large collection of high-level mathematical functions to operate
# on these arrays - DOCS: https://numpy.org/doc/stable/user/whatisnumpy.html
from sklearn import datasets # to generate a regression dataset
# Scikit-learn is a library in Python that provides many unsupervised and supervised
# learning algorithms. It contains a lot of efficient tools for machine learning and statistical modeling including
# classification, regression, clustering and dimensionality reduction. Built upon some of the technology you might
# already be familiar with, like NumPy, pandas, and Matplotlib!
# DOCS: https://scikit-learn.org/stable/
import matplotlib.pyplot as plt # Matplotlib is a plotting library for the Python programming language. It provides an
# object-oriented API for embedding plots into applications using general-purpose GUI toolkits like Tkinter,
# wxPython, Qt, or GTK - DOCS:
# cheatsheets: https://github.com/matplotlib/cheatsheets#cheatsheets
# How to plot & save graph hello world: https://github.com/UnacceptableBehaviour/latex_maths#python---matplotlib-numpy
# 0) prepare data - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
x_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=1)
# returned from ^
# change data type from double to float32 - avoid erros later
X = torch.from_numpy(x_numpy.astype(np.float32)) # create torch tensor from numpy array
Y = torch.from_numpy(y_numpy.astype(np.float32))
print(f"\n Y = torch.from_numpy(y_numpy.astype(np.float32)) \n{ Y }")
# Y = torch.from_numpy(y_numpy.astype(np.float32)) # tensor w a single row - see square brackets
# tensor([-5.5539e+01, -1.0662e+01, 2.2757e+01, 1.0110e+02, 1.4434e+02,
# 3.3289e+01, 3.3015e+01, -2.5887e+01, -9.9639e+01, 2.3803e+01,
# -4.5589e+01, -8.3388e+00, -9.5315e+01, 3.6407e+01, -8.7293e+01,
# 6.7669e+01, -1.3687e+01, -5.5441e+01, -6.5340e+01, -5.4450e+01,
# -2.8835e+01, 1.7884e+02, 6.5084e+01, 2.6668e+01, -1.8546e+01,
# -4.1499e+01, 8.5583e-01, 4.4562e+01, 1.1598e+02, -6.4620e+01,
# -2.5931e+01, -6.0882e+01, 1.8720e+01, 7.5070e+01, 1.1720e+02,
# -2.2698e+01, -5.6363e+01, 1.8084e+02, -1.9257e+02, 6.8503e+01,
# 1.6552e+02, 1.0500e+02, -7.0434e+01, -5.8769e+01, -4.1576e+01,
# 7.3247e+01, 4.0966e+01, 8.0462e+01, -2.8794e+01, 3.4234e+01,
# -4.1715e+01, 1.4355e+01, 7.9336e+01, 2.7129e+01, -3.9487e+01,
# 6.6805e+01, 9.5531e+01, 3.5610e+00, 1.0857e-01, 5.6495e+01,
# 5.1575e+01, -2.0974e+00, -2.6656e+01, 3.9742e+01, 3.6101e+01,
# -7.5602e+01, 1.9713e+01, -7.1601e+01, -1.9904e+01, -7.6708e+01,
# -1.1834e+02, -2.9825e+01, 1.5108e+02, 5.2923e+01, -5.9552e+01,
# 3.0721e+01, -2.9355e+01, -4.4786e+01, 1.0006e+02, 1.5058e+02,
# 1.2200e+02, -1.8186e+02, 3.4739e+00, -2.2980e+01, 4.5184e+01,
# 9.8606e+01, -9.2779e+00, -5.2478e+01, 3.8593e+01, -1.9997e+02,
# -9.5201e+00, -3.4724e+00, -3.5312e+01, 7.5406e+01, 1.7570e+01,
# -2.3960e+01, 1.3209e+02, 2.0608e+01, 5.1111e+01, -2.6306e+01])
print(f"\n Y.shape[0] \n{ Y.shape[0] }") # 100
y = Y.view(Y.shape[0], 1) # reshape to a column tensor Y.view(ROW, COL) Y.view(100, 1)
print(f"\n y = Y.view(y.shape[0], 1) \n{ y }")
# tensor([[-5.5539e+01],
# [-1.0662e+01],
# [ 2.2757e+01],
# [ 1.0110e+02],
# .
# 100 in total
# .
# [ 1.3209e+02],
# [ 2.0608e+01],
# [ 5.1111e+01],
# [-2.6306e+01]])
print(f"\n y.shape \n{ y.shape }") # new little y shape = torch.Size([100, 1]) ROWS, COLS
print(f"\n X.shape \n{ X.shape }")
n_samples, n_features = X.shape
#print(f"\n \n{ }") |
# 1) model - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# in LINEAR REGRESSION case this is ONE layer
input_size = n_features
output_size = 1
model = nn.Linear(input_size, output_size) # built in Linear model
# 2) loss optimizer - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
learning_rate = 0.01
criterion = nn.MSELoss() # for LINEAR REGRESSION - BUILT IN Loss function Mean Squared Error Loss
# nn.MSELoss() creates a criterion - https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # SGD - Stocastic Gradient Descent
# https://pytorch.org/docs/stable/optim.html?highlight=torch%20optim%20sgd#torch.optim.SGD
# w/ optional Nesterov momentum :o
# 3) training loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
num_epochs = 100
for epoch in range(num_epochs):
# - forward pass: compute prediction
y_predicted = model(X) # call model passing in data X
loss = criterion(y_predicted, y) # actual labels & predicted - output = criterion(input, target)
# - backward pass: gradients
loss.backward()
# - update weights
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
print(f'epoch: {epoch+1}, loss = {loss.item():.4f}')
# plot
predicted = model(X).detach().numpy() # prevent gradient tracking?
label_data = plt.plot(x_numpy, y_numpy, 'ro')
label_model = plt.plot(x_numpy, predicted, 'b')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend(['data','model'])
plt.show()
print('plt.show')
print(f"\n x_numpy \n{ x_numpy }")
print(f"\n y_numpy \n{ y_numpy }")
print(f"\n predicted \n{ predicted }")
#print(f"\n \n{ }")
#print(f"\n \n{ }")
print('\n') | |
util.py | from lxml.etree import fromstring, tostring
from lxml import builder
from openpack.basepack import ooxml_namespaces
docx_namespaces = {
'w': "http://schemas.openxmlformats.org/wordprocessingml/2006/main",
'r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing'),
'v': 'urn:schemas-microsoft-com:vml',
}
docx_namespaces.update(ooxml_namespaces)
def expand_namespace(tag):
"""
>>> expand_namespace('w:document')
'{http://schemas.openxmlformats.org/wordprocessingml/2006/main}document'
"""
namespace, sep, tag = tag.rpartition(':')
fmt = '{%(namespace)s}%(tag)s' if namespace else '%(tag)s'
namespace = docx_namespaces[namespace]
return fmt % vars()
class ElementMaker(builder.ElementMaker):
def __getitem__(self, name):
return "%s%s" % (self._namespace, name)
w = ElementMaker(namespace=docx_namespaces['w'], nsmap=docx_namespaces)
dcterms = ElementMaker(namespace=docx_namespaces['dcterms'], nsmap=docx_namespaces)
class TemplateSource(object):
template = None
def __init__(self, variables, encoding='utf-8'):
self.variables = variables
self.xml = self._from_template()
self.element = self._to_element()
def _from_template(self):
"""Use self.template and self.variables to generate XML content.""" |
def _to_element(self):
return fromstring(self.xml)
def __str__(self):
return tostring(self.element, encoding=self.encoding) | raise NotImplementedError |
tree.js | /**
*
* create by ligx
*
* @flow
*/
import '../common/shirm';
import type { ExpandInfo, NodeId2ExtendInfo, NodeId2SelectInfo, QueryType } from '@lugia/lugia-web';
import animation from '../common/openAnimation';
import ThemeHoc from '@lugia/theme-hoc';
import Empty from '../empty';
import * as React from 'react';
import { TreeNode } from './rc-tree';
import Support from '../common/FormFieldWidgetSupport';
import ThrottleTree from './ThrottleTree';
import Widget from '../consts/index';
import './index.css';
import TreeUtils from './utils';
import { deleteValue } from '../utils/index';
import styled from 'styled-components';
import { FontSize, FontSizeNumber } from '../css';
import { px2emcss } from '../css/units';
const em = px2emcss(FontSizeNumber);
type RowData = { [key: string]: any };
export type TreeProps = {
start: number,
end: number,
query: string,
pathSeparator?: string,
onScroller?: Function,
/** ๆฏๅฆๆฏๆๅค้ */
mutliple?: boolean,
limitCount?: number,
/** ้ป่ฎคๅฑๅผๆๆๆ ่็น */
expandAll: boolean,
onlySelectLeaf: boolean,
displayField: string,
valueField: string,
igronSelectField?: string,
value: ?Array<string>,
displayValue: ?Array<string>,
defaultValue: ?Array<string> | string,
svThemVersion?: number,
/** ๅฑๅผ/ๆถ่ตท่็นๆถ่งฆๅ */
onExpand?: Function,
/** ็นๅปๆ ่็น่งฆๅ */
onSelect?: Function,
/**
* ๅฝๅผๅ็ๅๅ็ๆถๅๅบๅ
*/
onChange?: Function,
/**
* ๅญ้กน้ไธญๅ,็ถ็บงๆฏๅฆ้ซไบฎ
*/
parentIsHighlight?: boolean,
getTreeData?: Function,
splitQuery?: string,
current: number,
data?: Array<RowData>,
inlineType: 'primary' | 'ellipse',
blackList: ?(string[]),
whiteList: ?(string[]),
searchType?: QueryType,
shape: 'default' | 'round',
showSwitch: boolean,
__navmenu: boolean,
__dontShowEmpty?: boolean,
switchAtEnd?: boolean,
switchIconNames?: Object,
getPartOfThemeProps: Function,
renderSuffixItems?: Function,
onRightClick?: Function,
theme?: Object,
};
export type TreeState = {
start: number,
expand: ExpandInfo,
selectedInfo: NodeId2SelectInfo,
expandedKeys: Array<string>,
selectValue?: Array<string>,
hasError: boolean,
parentHighlightKeys: Array<string>,
};
const EmptyBox = styled.span`
font-size: ${FontSize};
line-height: ${em(20)};
text-align: center;
display: block;
`;
const ErrorTooltip = styled(EmptyBox)`
color: red;
`;
class Tree extends React.Component<TreeProps, TreeState> {
static displayName = Widget.Tree;
static defaultP | = {
expandAll: false,
mutliple: false,
pathSeparator: '|',
defaultValue: '',
displayField: 'title',
valueField: 'key',
onlySelectLeaf: false,
query: '',
current: -1,
openAnimation: animation,
igronSelectField: 'disabled',
inlineType: 'primary',
parentIsHighlight: false,
shape: 'default',
showSwitch: true,
__navmenu: false,
switchAtEnd: false,
switchIconNames: {
open: 'lugia-icon-direction_caret_down',
close: 'lugia-icon-direction_caret_right',
},
};
static TreeNode: TreeNode;
allExpandKeys: Array<string> | null;
allExpandInfo: ExpandInfo;
allStart: number;
queryAllUtils: TreeUtils;
utils: TreeUtils;
value: any;
data: Array<RowData>;
end: number;
canSeeCount: number;
constructor(props: TreeProps) {
super(props);
this.allExpandInfo = this.getEmptyExpandInfo();
this.allStart = 0;
this.end = 0;
this.canSeeCount = 0;
this.createQueryAllTreelUtils(props);
if (this.isEmpty(props)) {
this.state = {
start: 0,
hasError: false,
expandedKeys: [],
expand: this.getEmptyExpandInfo(),
selectValue: [],
selectedInfo: this.getEmptyNodeId2SelectInfo(),
parentHighlightKeys: [],
__dontShowEmpty: false,
};
return;
}
const expand = this.updateExpandInfo(props);
const { id2ExtendInfo } = expand;
const state = {
hasError: false,
start: Support.getInitStart(props, 0),
expandedKeys: this.getExpandedKeys(props, id2ExtendInfo),
expand,
selectValue: [],
selectedInfo: this.getEmptyNodeId2SelectInfo(),
parentHighlightKeys: [],
};
this.updateStateValuForLimitValue(props, state, id2ExtendInfo, this.getInitValue(props));
this.state = state;
}
getViewData(): Array<RowData> {
const { data = [] } = this;
return data;
}
getQueryData(): Array<RowData> {
const { props } = this;
if (this.isQueryAll(props)) {
const { data = [] } = props;
return data;
}
return this.getViewData();
}
isSelectAll() {
const { expand } = this.state;
const { id2ExtendInfo } = expand;
const { props } = this;
const { limitCount = 9999999 } = props;
const utils = this.getUtils(props);
const userInput = Object.keys(this.getNotInTree()).length;
const canSelect = Math.min(utils.getCanTotal(id2ExtendInfo), limitCount);
if (canSelect <= 0) {
return false;
}
return utils.selCount + userInput >= canSelect;
}
isChecked(key: string) {
const { selectedInfo } = this.state;
const { checked, halfchecked } = selectedInfo;
return checked[key] || halfchecked[key];
}
getInitValue(props: TreeProps): Array<string> {
return Support.getInitValueArray(props);
}
isNotLimit(props: TreeProps) {
return Support.isNotLimit(props);
}
componentWillReceiveProps(props: TreeProps) {
const dataChanged = JSON.stringify(props.data) !== JSON.stringify(this.props.data);
if (dataChanged === true) {
this.allExpandInfo = this.getEmptyExpandInfo();
this.createQueryAllTreelUtils(props);
}
const queryChanged = this.props.query !== props.query;
const blackListChange = this.props.blackList !== props.blackList;
const whiteListChange = this.props.whiteList !== props.whiteList;
const valueChanged = props.value != this.props.value;
if (dataChanged || queryChanged || valueChanged || blackListChange || whiteListChange) {
const expand = this.updateExpandInfo(props);
const { id2ExtendInfo } = expand;
const newState: TreeState = {
hasError: false,
start: this.isQueryAll(props)
? this.allStart
: Support.getInitStart(props, this.state.start),
selectedInfo: this.getEmptyNodeId2SelectInfo(),
expandedKeys: this.getExpandedKeys(props, id2ExtendInfo),
expand,
selectValue: [],
};
if (this.isNotLimit(props)) {
const { selectValue = [], selectedInfo } = this.state;
const { value } = selectedInfo;
this.updateStateValue(
props,
newState,
id2ExtendInfo,
selectValue,
value,
Object.keys(value)
);
} else {
this.updateStateValuForLimitValue(props, newState, id2ExtendInfo, this.getInitValue(props));
}
this.setState(newState);
}
const startChange = this.props.start !== props.start;
if (startChange) {
this.setState({ start: Support.getInitStart(props, this.state.start) });
}
const { current } = this.props;
const currentChange = current !== props.current;
if (currentChange) {
if (current > this.end - 2) {
const start = Math.min(this.state.start + this.canSeeCount, this.getViewData().length - 1);
this.setState({ start });
}
if (current < this.state.start) {
this.setState({ start: Math.max(this.state.start - this.canSeeCount, 0) });
}
}
this.setState({ hasError: false });
}
getEmptyNodeId2SelectInfo(): NodeId2SelectInfo {
return {
checked: {},
value: {},
halfchecked: {},
};
}
getNotInTree(): Object {
return this.getUtils(this.props).getNotInTree();
}
getInTree(): Object {
return this.getUtils(this.props).getInTree();
}
updateStateValuForLimitValue(
props: TreeProps,
state: TreeState,
id2ExtendInfo: NodeId2ExtendInfo,
value: Array<string>
) {
const { obj, val } = this.getValueObject(props, value);
this.updateStateValue(props, state, id2ExtendInfo, value, obj, val);
}
getValueObject(props: TreeProps, value: Array<string>) {
if (this.isSingleSelectForProps(props)) {
if (!value || value.length === 0) {
return { obj: {}, val: [] };
}
const first = value[0];
return { obj: { [first]: true }, val: [first] };
}
const len = value.length;
const result = {};
for (let i = 0; i < len; i++) {
const oneValue = value[i];
if (oneValue !== '') {
result[oneValue] = true;
}
}
return { obj: result, val: value };
}
updateStateValue(
props: TreeProps,
state: TreeState,
id2ExtendInfo: NodeId2ExtendInfo,
selectValue: Array<string>,
valueObject: Object,
val: Array<string>
) {
const { displayValue = [] } = props;
if (this.isSingleSelectForProps(props)) {
state.selectValue = selectValue;
} else {
state.selectedInfo = this.getUtils(props).value2SelectInfo(
val,
displayValue ? displayValue : [],
valueObject,
id2ExtendInfo
);
}
}
updateExpandInfo(props: TreeProps): ExpandInfo {
let result = this.getEmptyExpandInfo();
if (this.isQueryAll(props)) {
result = this.allExpandInfo;
}
this.createQueryTreeUtils(props);
const { query, blackList, whiteList, searchType = 'include' } = props;
const utils = this.getUtils(props);
this.search(utils, result, query, searchType, blackList, whiteList);
if (this.state) {
this.allExpandKeys = this.state.expandedKeys;
const usableExpandKeys = this.filterUsableExpandKeys(
this.state.expandedKeys,
result.id2ExtendInfo
);
if (usableExpandKeys.length > 0) {
usableExpandKeys.forEach(item => {
utils.expandNode(item, result.id2ExtendInfo);
});
}
}
return result;
}
filterUsableExpandKeys(source, id2ExtendInfo) {
return source.filter(item => id2ExtendInfo[item] && item !== 'lugia_tree_root');
}
getEmptyExpandInfo(): ExpandInfo {
return { id2ExtendInfo: {} };
}
getExpandedKeys(props: TreeProps, id2ExtendInfo): Array<string> {
if (this.isQueryAll(props)) {
const utils = this.getUtils(props);
if (this.allExpandKeys == undefined || utils.isWhiteOrBlackListChanged()) {
const { expandAll } = this.props;
this.allExpandKeys = expandAll
? Object.keys(id2ExtendInfo)
: this.state
? this.filterUsableExpandKeys(this.state.expandedKeys, id2ExtendInfo)
: [];
}
return this.allExpandKeys;
}
const newAllExpandKeys = this.allExpandKeys || [];
return Array.from(
new Set([
...newAllExpandKeys.filter(item => item !== 'lugia_tree_root'),
...Object.keys(id2ExtendInfo).filter(item => item !== 'lugia_tree_root'),
])
);
}
isQueryAll({ query }): boolean {
return query === '';
}
shouldComponentUpdate(nextProps: TreeProps, nextState: TreeState) {
const { props } = this;
const dataChanged = props.data !== nextProps.data;
const blackListChange = props.blackList !== nextProps.blackList;
const whiteListChange = props.whiteList !== nextProps.whiteList;
const themeChange = nextProps.theme !== props.theme;
const { state } = this;
return (
props.query !== nextProps.query ||
dataChanged ||
blackListChange ||
whiteListChange ||
props.current !== nextProps.current ||
state.hasError !== nextState.hasError ||
state.start !== nextState.start ||
props.svThemVersion !== nextProps.svThemVersion ||
props.mutliple !== nextProps.mutliple ||
state.selectValue !== nextState.selectValue ||
state.expand !== nextState.expand ||
state.selectedInfo !== nextState.selectedInfo ||
state.parentHighlightKeys !== nextState.parentHighlightKeys ||
themeChange
);
}
createQueryTreeUtils(props: TreeProps) {
const utils = this.createUtils(props, true);
if (utils) {
this.utils = utils;
}
}
createQueryAllTreelUtils(props: TreeProps) {
const utils = this.createUtils(props);
if (utils) {
this.queryAllUtils = utils;
this.allExpandKeys = null;
this.allStart = 0;
}
}
createUtils(
{
data,
onlySelectLeaf,
expandAll,
displayField,
valueField,
limitCount,
splitQuery,
igronSelectField,
pathSeparator,
pathField,
pidField,
},
realyExpandAll: boolean = expandAll
): ?TreeUtils {
if (!data) {
return null;
}
return new TreeUtils(data, {
expandAll: realyExpandAll,
onlySelectLeaf,
displayField,
valueField,
limitCount,
splitQuery,
igronSelectField,
pathSeparator,
pathField,
pidField,
});
}
getUtils(props: TreeProps) {
if (this.isQueryAll(props)) {
return this.queryAllUtils;
}
return this.utils;
}
render() {
const { props, state } = this;
const empty = <Empty themeInfo={props.getPartOfThemeProps('Container')} />;
const { __dontShowEmpty } = props;
if (this.isEmpty(props) && !__dontShowEmpty) {
return empty;
}
if (this.state.hasError) {
return <ErrorTooltip>ๆ ๅฝขๆฐๆฎ้่ฏฏ</ErrorTooltip>;
}
const {
query,
current,
igronSelectField,
blackList,
whiteList,
searchType = 'include',
valueField,
getTreeData,
} = props;
const {
expand,
expandedKeys,
selectedInfo,
start,
selectValue = [],
parentHighlightKeys = [],
} = state;
const { id2ExtendInfo } = expand;
const { checked, halfchecked } = selectedInfo;
const utils = this.getUtils(props);
const data = this.search(utils, expand, query, searchType, blackList, whiteList);
this.data = data;
getTreeData && getTreeData(data);
if (data.length === 0 && !__dontShowEmpty) {
return empty;
}
if (this.isQueryAll(props)) {
this.allStart = start;
}
const highlight = [];
const row = data[current];
if (row) {
const { [valueField]: key } = row;
highlight.push(key + '');
}
return (
<ThrottleTree
{...props}
id2ExtendInfo={id2ExtendInfo}
start={start}
igronSelectField={igronSelectField}
onScroller={this.onScroller}
onScrollerEndChange={this.onScrollerEndChange}
onCanSeeCountChange={this.onCanSeeCountChange}
onCheck={this.onCheck}
onSelect={this.onSelect}
data={data}
selectable={this.isSingleSelect()}
highlight={highlight}
selectedKeys={selectValue}
parentHighlightKeys={parentHighlightKeys}
checkedKeys={Object.keys(checked)}
halfCheckedKeys={Object.keys(halfchecked)}
utils={utils}
expandedKeys={expandedKeys}
onExpand={this.onExpand}
/>
);
}
onScrollerEndChange = (end: number) => {
this.end = end;
};
onCanSeeCountChange = (count: number) => {
this.canSeeCount = count;
};
search(
utils: TreeUtils,
expand: ExpandInfo,
query: string,
searchType: QueryType = 'include',
blackList: ?(string[]),
whiteList: ?(string[])
): Array<RowData> {
return (this.data = utils.search(expand, query, searchType, blackList, whiteList));
}
onSelect = (selectValue: Array<string>, eventObject: any, itemObj: Object) => {
const { parentIsHighlight, pathField, pathSeparator } = this.props; // ๆฏๅฆๅผๅฏ้ไธญๅญ่็น๏ผ็ถ่็น้ซไบฎ
const {
node: {
props: {
item: { [pathField]: nodePath = '' },
isLeaf,
},
},
} = eventObject;
const parentHighlightKeys = parentIsHighlight ? nodePath.split(pathSeparator) : [];
const { onSelect } = this.props;
onSelect && onSelect(selectValue, itemObj);
this.select(selectValue, parentHighlightKeys, itemObj);
};
select(selectValue: Array<string>, parentHighlightKeys: Array<string>, itemObj: Object) {
if (this.isSingleSelect() === false) {
return;
}
const selVal = selectValue[0];
const value = selVal !== undefined && selVal !== null ? selVal : '';
const { props } = this;
const { onlySelectLeaf = false, igronSelectField = '', limitCount } = props;
if (limitCount != undefined && limitCount <= 0) {
return;
}
if (onlySelectLeaf === true || igronSelectField) {
const utils = this.getUtils(props);
const { expand } = this.state;
const { id2ExtendInfo } = expand;
if (onlySelectLeaf && !utils.isLeaf(value, id2ExtendInfo)) {
return;
}
if (igronSelectField != '' && igronSelectField != undefined) {
const row = utils.getRow(value, id2ExtendInfo);
if (row && row[igronSelectField] === true) {
return;
}
}
}
this.onChange([value], itemObj);
if (this.isNotLimit(props)) {
this.setState({ selectValue, parentHighlightKeys });
} else {
this.setState({ parentHighlightKeys });
}
}
onCheck = (_, event, item) => {
const { node, checked, shiftKey } = event;
const { eventKey } = node.props;
this.check(eventKey, checked, shiftKey, item);
};
check(eventKey: string, checked: boolean, shiftKey: boolean = false, item: Object) {
const { state, props } = this;
const { selectedInfo } = state;
const { halfchecked, value } = selectedInfo;
const isHalfSelect = halfchecked[eventKey] === undefined;
const isSelected = isHalfSelect && checked;
const utils = this.getUtils(props);
const { selectDirNode, unSelectNode, selectNode } = utils;
const onlyProcessYouself = isSelected ? selectDirNode : unSelectNode;
const processAllNode = isSelected ? selectNode : unSelectNode;
const { expand } = state;
const { id2ExtendInfo } = expand;
const check = shiftKey ? onlyProcessYouself : processAllNode;
check.call(utils, eventKey, selectedInfo, id2ExtendInfo);
this.onChange(Object.keys(value), item, selectedInfo);
if (this.isNotLimit(props)) {
const newState: TreeState = {
start: this.state.start,
hasError: false,
selectedInfo: this.getEmptyNodeId2SelectInfo(),
expandedKeys: this.state.expandedKeys,
expand: this.state.expand,
selectValue: [],
parentHighlightKeys: [],
};
const { value } = selectedInfo;
this.updateStateValue(props, newState, id2ExtendInfo, [], value, Object.keys(value));
this.setState(newState);
}
}
getRows(valArray: Array<any> = []): Array<any> {
if (!valArray || valArray.length <= 0) {
return [];
}
const result = [];
const { props, state } = this;
const { expand } = state;
const { id2ExtendInfo } = expand;
const utils = this.getUtils(props);
const len = valArray.length;
for (let i = 0; i < len; i++) {
const val = valArray[i];
const row = utils.getRow(val, id2ExtendInfo);
if (row) {
result.push(row);
} else {
result.push(null);
}
}
return result;
}
onChange = (value: any, item: Object, selectedInfo?: Object) => {
this.value = value;
const { props } = this;
const { onChange } = props;
const checkedItems = this.getCheckedItems(value);
onChange && onChange(value, this.getTitle(value), { item, checkedItems, selectedInfo });
};
getCheckedItems = (keys: any) => {
const checkedItems = [];
if ((keys || []).length === 0) {
return checkedItems;
}
const { data, valueField } = this.props;
data.forEach(item => {
const value = item[valueField];
if (keys.indexOf(value) !== -1) {
checkedItems.push(item);
}
});
return checkedItems;
};
getTitle(value: Array<string>): Array<string> {
const { id2ExtendInfo } = this.allExpandInfo;
return this.queryAllUtils.getTitle(value, id2ExtendInfo);
}
onExpand = (expandedKeys: Array<string>, event: { expanded: boolean, node: Object }) => {
const { expanded, node } = event;
const key = node.props.eventKey;
this.expandOrCollapse(key, expandedKeys, expanded);
};
expand(key: string) {
if (this.isExpand(key)) {
return;
}
this.state.expandedKeys.push(key + '');
this.expandOrCollapse(key, [...this.state.expandedKeys], true);
}
collapse(key: string) {
if (!this.isExpand(key)) {
return;
}
deleteValue(this.state.expandedKeys, key + '');
this.expandOrCollapse(key, [...this.state.expandedKeys], false);
}
isExpand(key: string): boolean {
return this.state.expandedKeys.indexOf(key + '') !== -1;
}
expandOrCollapse(key: string, expandedKeys: Array<string>, expanded: boolean) {
const { props, state } = this;
const utils = this.getUtils(props);
const { expand } = state;
const { id2ExtendInfo } = expand;
expanded ? utils.expandNode(key, id2ExtendInfo) : utils.colapseNode(key, id2ExtendInfo);
if (this.isQueryAll(props)) {
this.allExpandKeys = expandedKeys;
}
const newExpand = Object.assign({}, expand, { id2ExtendInfo });
this.setState({
expand: newExpand,
expandedKeys,
});
const { onExpand, data = [] } = props;
onExpand && onExpand(expandedKeys, data);
}
onScroller = (start: number, end: number) => {
if (!this.isLimitStart()) {
this.setState({ start });
}
const { onScroller } = this.props;
onScroller && onScroller(start, end);
};
isLimitStart() {
return 'start' in this.props;
}
isEmpty({ data }) {
return !data || data.length === 0;
}
isSingleSelect() {
return this.isSingleSelectForProps(this.props);
}
canSelect(key: string) {
return this.state.expand.id2ExtendInfo[key].can;
}
isSingleSelectForProps({ mutliple }) {
return mutliple === false;
}
componentDidCatch() {
this.setState({ hasError: true });
}
}
export default ThemeHoc(Tree, Widget.Tree, { hover: true });
| rops |
fss.py | """
This is an implementation of Function Secret Sharing
Useful papers are:
- Function Secret Sharing- Improvements and Extensions, Boyle 2017
Link: https://eprint.iacr.org/2018/707.pdf
- Secure Computation with Preprocessing via Function Secret Sharing, Boyle 2019
Link: https://eprint.iacr.org/2019/1095
Note that the protocols are quite different in aspect from those papers
"""
import hashlib
import torch as th
import syft as sy
ฮป = 110 # 6 # 110 or 63 # security parameter
n = 32 # 8 # 32 # bit precision
dtype = th.int32
no_wrap = {"no_wrap": True}
def initialize_crypto_plans(worker):
"""
This is called manually for the moment, to build the plan used to perform
Function Secret Sharing on a specific worker.
"""
eq_plan_1 = sy.Plan(
forward_func=lambda x, y: mask_builder(x, y, "eq"),
owner=worker,
tags=["#fss_eq_plan_1"],
is_built=True,
)
worker.register_obj(eq_plan_1)
eq_plan_2 = sy.Plan(
forward_func=eq_eval_plan, owner=worker, tags=["#fss_eq_plan_2"], is_built=True
)
worker.register_obj(eq_plan_2)
comp_plan_1 = sy.Plan(
forward_func=lambda x, y: mask_builder(x, y, "comp"),
owner=worker,
tags=["#fss_comp_plan_1"],
is_built=True,
)
worker.register_obj(comp_plan_1)
comp_plan_2 = sy.Plan(
forward_func=comp_eval_plan, owner=worker, tags=["#fss_comp_plan_2"], is_built=True
)
worker.register_obj(comp_plan_2)
xor_add_plan = sy.Plan(
forward_func=xor_add_convert_1, owner=worker, tags=["#xor_add_1"], is_built=True
)
worker.register_obj(xor_add_plan)
xor_add_plan = sy.Plan(
forward_func=xor_add_convert_2, owner=worker, tags=["#xor_add_2"], is_built=True
)
worker.register_obj(xor_add_plan)
def request_run_plan(worker, plan_tag, location, return_value, args=(), kwargs={}):
response_ids = (sy.ID_PROVIDER.pop(),)
args = (args, response_ids)
response = worker.send_command(
cmd_name="run",
target=plan_tag,
recipient=location,
return_ids=response_ids,
return_value=return_value,
kwargs_=kwargs,
args_=args,
)
return response
def fss_op(x1, x2, type_op="eq"):
"""
Define the workflow for a binary operation using Function Secret Sharing
Currently supported operand are = & <=, respectively corresponding to
type_op = 'eq' and 'comp'
Args:
x1: first AST
x2: second AST
type_op: type of operation to perform, should be 'eq' or 'comp'
Returns:
shares of the comparison
"""
me = sy.local_worker
locations = x1.locations
shares = []
for location in locations:
args = (x1.child[location.id], x2.child[location.id])
share = request_run_plan(
me, f"#fss_{type_op}_plan_1", location, return_value=True, args=args
)
shares.append(share)
mask_value = sum(shares) % 2 ** n
shares = []
for i, location in enumerate(locations):
args = (th.IntTensor([i]), mask_value)
share = request_run_plan(
me, f"#fss_{type_op}_plan_2", location, return_value=False, args=args
)
shares.append(share)
if type_op == "comp":
prev_shares = shares
shares = []
for prev_share, location in zip(prev_shares, locations):
share = request_run_plan(
me, "#xor_add_1", location, return_value=True, args=(prev_share,)
)
shares.append(share)
masked_value = shares[0] ^ shares[1] # TODO case >2 workers ?
shares = {}
for i, prev_share, location in zip(range(len(locations)), prev_shares, locations):
share = request_run_plan(
me,
"#xor_add_2",
location,
return_value=False,
args=(th.IntTensor([i]), masked_value),
)
shares[location.id] = share
else:
shares = {loc.id: share for loc, share in zip(locations, shares)}
response = sy.AdditiveSharingTensor(shares, **x1.get_class_attributes())
return response
# share level
def mask_builder(x1, x2, type_op):
x = x1 - x2
# Keep the primitive in store as we use it after
alpha, s_0, *CW = x1.owner.crypto_store.get_keys(
f"fss_{type_op}", n_instances=x1.numel(), remove=False
)
return x + alpha.reshape(x.shape)
# share level
def eq_eval_plan(b, x_masked):
alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys(
type_op="fss_eq", n_instances=x_masked.numel(), remove=True
)
result_share = DPF.eval(b, x_masked, s_0, *CW)
return result_share
# share level
def comp_eval_plan(b, x_masked):
alpha, s_0, *CW = x_masked.owner.crypto_store.get_keys(
type_op="fss_comp", n_instances=x_masked.numel(), remove=True
)
result_share = DIF.eval(b, x_masked, s_0, *CW)
return result_share
def xor_add_convert_1(x):
xor_share, add_share = x.owner.crypto_store.get_keys(
type_op="xor_add_couple", n_instances=x.numel(), remove=False
)
return x ^ xor_share.reshape(x.shape)
def xor_add_convert_2(b, x):
xor_share, add_share = x.owner.crypto_store.get_keys(
type_op="xor_add_couple", n_instances=x.numel(), remove=True
)
return add_share.reshape(x.shape) * (1 - 2 * x) + x * b
def eq(x1, x2):
return fss_op(x1, x2, "eq")
def le(x1, x2):
return fss_op(x1, x2, "comp")
class DPF:
"""Distributed Point Function - used for equality"""
def __init__(self):
pass
@staticmethod
def keygen(n_values=1):
beta = th.tensor([1], dtype=dtype)
alpha = th.randint(0, 2 ** n, (n_values,))
ฮฑ = bit_decomposition(alpha)
s, t, CW = (
Array(n + 1, 2, ฮป, n_values),
Array(n + 1, 2, n_values),
Array(n, 2 * (ฮป + 1), n_values),
)
s[0] = randbit(size=(2, ฮป, n_values))
t[0] = th.tensor([[0, 1]] * n_values, dtype=th.uint8).t()
for i in range(0, n):
g0 = G(s[i, 0])
g1 = G(s[i, 1])
# Re-use useless randomness
sL_0, _, sR_0, _ = split(g0, [ฮป, 1, ฮป, 1])
sL_1, _, sR_1, _ = split(g1, [ฮป, 1, ฮป, 1])
s_rand = (sL_0 ^ sL_1) * ฮฑ[i] + (sR_0 ^ sR_1) * (1 - ฮฑ[i])
cw_i = TruthTableDPF(s_rand, ฮฑ[i])
CW[i] = cw_i ^ g0 ^ g1
for b in (0, 1):
ฯ = [g0, g1][b] ^ (t[i, b] * CW[i])
ฯ = ฯ.reshape(2, ฮป + 1, n_values)
# filtered_ฯ = ฯ[๐ผ[i]] OLD
ฮฑ_i = ฮฑ[i].unsqueeze(0).expand(ฮป + 1, n_values).unsqueeze(0).long()
filtered_ฯ = th.gather(ฯ, 0, ฮฑ_i).squeeze(0)
s[i + 1, b], t[i + 1, b] = split(filtered_ฯ, [ฮป, 1])
CW_n = (-1) ** t[n, 1].to(dtype) * (beta - Convert(s[n, 0]) + Convert(s[n, 1]))
return (alpha,) + s[0].unbind() + (CW, CW_n)
@staticmethod
def eval(b, x, *k_b):
original_shape = x.shape
x = x.reshape(-1)
n_values = x.shape[0]
x = bit_decomposition(x)
s, t = Array(n + 1, ฮป, n_values), Array(n + 1, 1, n_values)
s[0] = k_b[0]
# here k[1:] is (CW, CW_n)
CW = k_b[1].unbind() + (k_b[2],)
t[0] = b
for i in range(0, n):
ฯ = G(s[i]) ^ (t[i] * CW[i])
ฯ = ฯ.reshape(2, ฮป + 1, n_values)
x_i = x[i].unsqueeze(0).expand(ฮป + 1, n_values).unsqueeze(0).long()
filtered_ฯ = th.gather(ฯ, 0, x_i).squeeze(0)
s[i + 1], t[i + 1] = split(filtered_ฯ, [ฮป, 1])
flat_result = (-1) ** b * (Convert(s[n]) + t[n].squeeze() * CW[n])
return flat_result.reshape(original_shape)
class DIF:
"""Distributed Interval Function - used | )
gen_list = []
for seed_bit in seed_t:
enc_str = str(seed_bit).encode()
h = hashlib.sha3_256(enc_str)
r = h.digest()
binary_str = bin(int.from_bytes(r, byteorder="big"))[2 : 2 + (2 * (ฮป + 1))]
gen_list.append(list(map(int, binary_str)))
return th.tensor(gen_list, dtype=th.uint8).t()
def H(seed):
assert seed.shape[0] == ฮป
seed_t = seed.t().tolist()
gen_list = []
for seed_bit in seed_t:
enc_str = str(seed_bit).encode()
h = hashlib.sha3_256(enc_str)
r = h.digest()
binary_str = bin(int.from_bytes(r, byteorder="big"))[2 : 2 + 2 + (2 * (ฮป + 1))]
gen_list.append(list(map(int, binary_str)))
return th.tensor(gen_list, dtype=th.uint8).t()
def Convert(bits):
bit_pow_lambda = th.flip(2 ** th.arange(ฮป), (0,)).unsqueeze(-1).to(th.long)
return (bits.to(th.long) * bit_pow_lambda).sum(dim=0).to(dtype)
def Array(*shape):
return th.empty(shape, dtype=th.uint8)
bit_pow_n = th.flip(2 ** th.arange(n), (0,))
def bit_decomposition(x):
x = x.unsqueeze(-1)
z = bit_pow_n & x
z = z.t()
return (z > 0).to(th.uint8)
def randbit(size):
return th.randint(2, size=size)
def concat(*args, **kwargs):
return th.cat(args, **kwargs)
def split(x, idx):
return th.split(x, idx)
def TruthTableDPF(s, ฮฑ_i):
one = th.ones((1, s.shape[1])).to(th.uint8)
s_one = concat(s, one)
Table = th.zeros((2, ฮป + 1, len(ฮฑ_i)), dtype=th.uint8)
for j, el in enumerate(ฮฑ_i):
Table[el.item(), :, j] = s_one[:, j]
return Table.reshape(-1, Table.shape[2])
def TruthTableDIF(s, ฮฑ_i):
leafTable = th.zeros((2, 1, len(ฮฑ_i)), dtype=th.uint8)
# TODO optimize: just put alpha on first line
leaf_value = ฮฑ_i
for j, el in enumerate(ฮฑ_i):
leafTable[(1 - el).item(), 0, j] = leaf_value[j]
one = th.ones((1, s.shape[1])).to(th.uint8)
s_one = concat(s, one)
nextTable = th.zeros((2, ฮป + 1, len(ฮฑ_i)), dtype=th.uint8)
for j, el in enumerate(ฮฑ_i):
nextTable[el.item(), :, j] = s_one[:, j]
Table = concat(leafTable, nextTable, axis=1)
Table = Table.reshape(-1, Table.shape[2])
return Table
| for comparison <="""
def __init__(self):
pass
@staticmethod
def keygen(n_values=1):
alpha = th.randint(0, 2 ** n, (n_values,))
ฮฑ = bit_decomposition(alpha)
s, t, CW = (
Array(n + 1, 2, ฮป, n_values),
Array(n + 1, 2, n_values),
Array(n, 2 + 2 * (ฮป + 1), n_values),
)
s[0] = randbit(size=(2, ฮป, n_values))
t[0] = th.tensor([[0, 1]] * n_values, dtype=th.uint8).t()
for i in range(0, n):
h0 = H(s[i, 0])
h1 = H(s[i, 1])
# Re-use useless randomness
_, _, sL_0, _, sR_0, _ = split(h0, [1, 1, ฮป, 1, ฮป, 1])
_, _, sL_1, _, sR_1, _ = split(h1, [1, 1, ฮป, 1, ฮป, 1])
s_rand = (sL_0 ^ sL_1) * ฮฑ[i] + (sR_0 ^ sR_1) * (1 - ฮฑ[i])
cw_i = TruthTableDIF(s_rand, ฮฑ[i])
CW[i] = cw_i ^ h0 ^ h1
for b in (0, 1):
ฯ = [h0, h1][b] ^ (t[i, b] * CW[i])
ฯ = ฯ.reshape(2, ฮป + 2, n_values)
# filtered_ฯ = ฯ[๐ผ[i]] OLD
ฮฑ_i = ฮฑ[i].unsqueeze(0).expand(ฮป + 2, n_values).unsqueeze(0).long()
filtered_ฯ = th.gather(ฯ, 0, ฮฑ_i).squeeze(0)
ฯ_leaf, s[i + 1, b], t[i + 1, b] = split(filtered_ฯ, [1, ฮป, 1])
return (alpha,) + s[0].unbind() + (CW,)
@staticmethod
def eval(b, x, *k_b):
original_shape = x.shape
x = x.reshape(-1)
n_values = x.shape[0]
x = bit_decomposition(x)
FnOutput = Array(n + 1, n_values)
s, t = Array(n + 1, ฮป, n_values), Array(n + 1, 1, n_values)
s[0] = k_b[0]
CW = k_b[1].unbind()
t[0] = b
for i in range(0, n):
ฯ = H(s[i]) ^ (t[i] * CW[i])
ฯ = ฯ.reshape(2, ฮป + 2, n_values)
x_i = x[i].unsqueeze(0).expand(ฮป + 2, n_values).unsqueeze(0).long()
filtered_ฯ = th.gather(ฯ, 0, x_i).squeeze(0)
ฯ_leaf, s[i + 1], t[i + 1] = split(filtered_ฯ, [1, ฮป, 1])
FnOutput[i] = ฯ_leaf
# Last tour, the other ฯ is also a leaf:
FnOutput[n] = t[n]
flat_result = FnOutput.sum(axis=0) % 2
return flat_result.reshape(original_shape)
# PRG
def G(seed):
assert seed.shape[0] == ฮป
seed_t = seed.t().tolist( |
DomainStore.ts | import { NewsStore, ContentStore, UserStore, AppStore } from './';
import { TransportLayer } from './../models';
export default class | {
appStore: AppStore;
newsStore: NewsStore;
contentStore: ContentStore;
userStore: UserStore;
transportLayer: TransportLayer;
// Configuration
host: string = 'http://localhost/';
constructor() {
this.transportLayer = new TransportLayer(this.host);
this.appStore = new AppStore(this);
this.newsStore = new NewsStore(this);
this.contentStore = new ContentStore(this);
this.userStore = new UserStore(this);
}
}
| DomainStore |
targets.py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.rules.targets import COMMON_JVM_FIELDS
from pants.engine.target import Sources, StringField, Target
class JaxbJavaPackage(StringField):
"""Java package (com.company.package) in which to generate the output Java files.
If unspecified, Pants guesses it from the file path leading to the schema (xsd) file. This guess
is accurate only if the .xsd file is in a path like `.../com/company/package/schema.xsd`. Pants |
alias = "package"
class JaxbLanguage(StringField):
"""The language to use, which currently can only be `java`."""
alias = "language"
valid_choices = ("java",)
default = "java"
value: str
class JaxbLibrary(Target):
"""A Java library generated from JAXB xsd files."""
alias = "jaxb_library"
core_fields = (*COMMON_JVM_FIELDS, Sources, JaxbJavaPackage, JaxbLanguage)
v1_only = True | looks for packages that start with 'com', 'org', or 'net'.
""" |
l2_normalization.py | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.partial_infer.elemental import copy_shape_infer
| node_attrs = {
'op': 'Normalize',
'type': 'Normalize',
'eps': eps,
'across_spatial': 0,
'channel_shared': 0,
'infer': copy_shape_infer
}
return node_attrs | def l2_normalization_ext(attrs):
eps = attrs.float('eps', 1e-10)
|
TestExpressionParser.py | import TestConstants
from generator.ExpressionParser import ExpressionParser
import unittest
class TestExpressionParser(unittest.TestCase):
# Test to verify the minute functionality & */multiple expression check.
def test_valid_minute_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_minutes(TestConstants.Minutes),[0,20,40])
# Test to verify the invalid minute functionality & */multiple expression check.
def test_invalid_minute_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertNotEqual(expressionParser._parse_minutes(TestConstants.Minutes),[1,20,40])
# Test to verify the hour functionality & [a-b] (hyphen) expression check.
def test_valid_hour_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_hours(TestConstants.Hours),[2,3,4])
# Test to verify the month_parsing functionality & comma seperated expression check.
def test_valid_day_in_month_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_month(TestConstants.Days_in_month),[6,8,9])
# Test to verify the week functionality & * expression check.
def test_valid_day_in_week_parsing(self):
expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_day_of_week(TestConstants.Days_in_week),[1,2,3,4,5,6,7])
# Test to verify the command functionality check.
def test_valid_command(self): | unittest.main() | expressionParser = ExpressionParser(TestConstants.Valid_cron_expression)
self.assertListEqual(expressionParser._parse_command(TestConstants.Command),['/usr/bin/find'])
if __name__ == '__main__': |
tensor.go | //go:generate cgotorch/build.sh
package gotorch
// #cgo CFLAGS: -I ${SRCDIR}
// #cgo LDFLAGS: -L ${SRCDIR}/cgotorch -Wl,-rpath ${SRCDIR}/cgotorch -lcgotorch
// #cgo LDFLAGS: -L ${SRCDIR}/cgotorch/libtorch/lib -Wl,-rpath ${SRCDIR}/cgotorch/libtorch/lib -lc10 -ltorch -ltorch_cpu
// #include "cgotorch/cgotorch.h"
import "C"
import (
"log"
"unsafe"
)
// Tensor wrappers a pointer to C.Tensor
type Tensor struct {
T *unsafe.Pointer
}
// MustNil asserts error to be nil
func MustNil(err unsafe.Pointer) {
if err != nil {
msg := C.GoString((*C.char)(err))
C.FreeString((*C.char)(err))
panic(msg)
}
}
// Detach tensor.detach
func (a *Tensor) Detach() Tensor {
var t C.Tensor
MustNil(unsafe.Pointer(C.Tensor_Detach(C.Tensor(*a.T), &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
} | // String returns the Tensor as a string
func (a Tensor) String() string {
s := C.Tensor_String(C.Tensor(*a.T))
r := C.GoString(s)
C.FreeString(s)
return r
}
// Print the tensor
func (a Tensor) Print() {
C.Tensor_Print(C.Tensor(*a.T))
}
// Save the tensor to a file
func (a Tensor) Save(path string) {
C.Tensor_Save(C.Tensor(*a.T), C.CString(path))
}
// Load tensor from a file
func Load(path string) Tensor {
var t C.Tensor
MustNil(unsafe.Pointer(C.Tensor_Load(C.CString(path), &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// Dim returns dim
func (a Tensor) Dim() int64 {
var dim int64
MustNil(unsafe.Pointer(C.Tensor_Dim(C.Tensor(*a.T), (*C.int64_t)(&dim))))
return dim
}
// Shape returns shape
func (a Tensor) Shape() []int64 {
shape := make([]int64, a.Dim())
if len(shape) == 0 {
return shape
}
MustNil(unsafe.Pointer(C.Tensor_Shape(C.Tensor(*a.T), (*C.int64_t)(unsafe.Pointer(&shape[0])))))
return shape
}
// Dtype returns data type
func (a Tensor) Dtype() int8 {
var t int8
MustNil(unsafe.Pointer(C.Tensor_Dtype(C.Tensor(*a.T), (*C.int8_t)(unsafe.Pointer(&t)))))
return t
}
// Backward compute the gradient of current tensor
func (a Tensor) Backward() {
C.Tensor_Backward(C.Tensor(*a.T))
}
// Grad returns a reference of the gradient
func (a Tensor) Grad() Tensor {
t := C.Tensor_Grad(C.Tensor(*a.T))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// To returns a Tensor on the specified device with the same content as the a.
// If the specified device doesn't exist, To panics.
func (a Tensor) To(device Device, dtype ...int8) Tensor {
var t C.Tensor
var d int8
if len(dtype) == 0 {
d = a.Dtype()
} else {
d = dtype[0]
}
MustNil(unsafe.Pointer(C.Tensor_To(C.Tensor(*a.T), device.T, C.int8_t(d), &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// CUDA returns a Tensor on CUDA device
func (a Tensor) CUDA(device Device, nonBlocking bool) Tensor {
var t C.Tensor
n := int8(0)
if nonBlocking {
n = 1
}
MustNil(unsafe.Pointer(C.Tensor_CUDA(C.Tensor(*a.T), device.T, C.int8_t(n), &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// CastTo cast tensor dtype
func (a Tensor) CastTo(dtype int8) Tensor {
var t C.Tensor
MustNil(unsafe.Pointer(C.Tensor_CastTo(C.Tensor(*a.T), C.int8_t(dtype), &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// CopyTo cast tensor dtype
func (a Tensor) CopyTo(device Device) Tensor {
var t C.Tensor
MustNil(unsafe.Pointer(C.Tensor_CopyTo(C.Tensor(*a.T), device.T, &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// PinMemory returns a tensor in pinned memory. Pinned memory requires CUDA.
func (a Tensor) PinMemory() Tensor {
if !IsCUDAAvailable() {
return a
}
var t C.Tensor
MustNil(unsafe.Pointer(C.Tensor_PinMemory(C.Tensor(*a.T), &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// SetData sets the tensor data held by b to a
func (a Tensor) SetData(b Tensor) {
MustNil(unsafe.Pointer(C.Tensor_SetData(C.Tensor(*a.T), C.Tensor(*b.T))))
}
// To returns a Tensor on the specified device with the same content as the a.
// If the specified device doesn't exist, To panics.
func To(a Tensor, device Device, dtype int8) Tensor {
return a.To(device, dtype)
}
// FromBlob returns a deep copy Tensor with the given data memory
func FromBlob(data unsafe.Pointer, dtype int8, sizes []int64) Tensor {
var t C.Tensor
MustNil(unsafe.Pointer(C.Tensor_FromBlob(
data,
C.int8_t(dtype),
(*C.int64_t)(unsafe.Pointer(&sizes[0])),
C.int64_t(len(sizes)),
&t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
}
// Index calls Tensor::index to return a single-element tensor of the element at
// the given index.
func (a Tensor) Index(index ...int64) Tensor {
if int64(len(index)) != a.Dim() {
log.Panicf("Index %v has length that differs from the tenosr dim %d", index, a.Dim())
}
var t C.Tensor
MustNil(unsafe.Pointer(C.Tensor_Index(
C.Tensor(*a.T),
(*C.int64_t)(unsafe.Pointer(&index[0])),
C.int64_t(len(index)), &t)))
SetTensorFinalizer((*unsafe.Pointer)(&t))
return Tensor{(*unsafe.Pointer)(&t)}
} | |
walk.go | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/types"
"cmd/internal/objabi"
"cmd/internal/sys"
"encoding/binary"
"fmt"
"strings"
)
// The constant is known to runtime.
const tmpstringbufsize = 32
func walk(fn *Node) {
Curfn = fn
if Debug['W'] != 0 {
s := fmt.Sprintf("\nbefore walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
lno := lineno
// Final typecheck for any unused variables.
for i, ln := range fn.Func.Dcl {
if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
ln = typecheck(ln, Erv|Easgn)
fn.Func.Dcl[i] = ln
}
}
// Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
for _, ln := range fn.Func.Dcl {
if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
ln.Name.Defn.Left.Name.SetUsed(true)
}
}
for _, ln := range fn.Func.Dcl {
if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
continue
}
if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
if defn.Left.Name.Used() {
continue
}
yyerrorl(defn.Left.Pos, "%v declared and not used", ln.Sym)
defn.Left.Name.SetUsed(true) // suppress repeats
} else {
yyerrorl(ln.Pos, "%v declared and not used", ln.Sym)
}
}
lineno = lno
if nerrors != 0 {
return
}
walkstmtlist(Curfn.Nbody.Slice())
if Debug['W'] != 0 {
s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Nbody)
}
zeroResults()
heapmoves()
if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 {
s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
dumplist(s, Curfn.Func.Enter)
}
}
func walkstmtlist(s []*Node) {
for i := range s {
s[i] = walkstmt(s[i])
}
}
func samelist(a, b []*Node) bool {
if len(a) != len(b) {
return false
}
for i, n := range a {
if n != b[i] {
return false
}
}
return true
}
func paramoutheap(fn *Node) bool {
for _, ln := range fn.Func.Dcl {
switch ln.Class() {
case PPARAMOUT:
if ln.isParamStackCopy() || ln.Addrtaken() {
return true
}
case PAUTO:
// stop early - parameters are over
return false
}
}
return false
}
// adds "adjust" to all the argument locations for the call n.
// n must be a defer or go node that has already been walked.
func adjustargs(n *Node, adjust int) {
callfunc := n.Left
for _, arg := range callfunc.List.Slice() {
if arg.Op != OAS {
Fatalf("call arg not assignment")
}
lhs := arg.Left
if lhs.Op == ONAME {
// This is a temporary introduced by reorder1.
// The real store to the stack appears later in the arg list.
continue
}
if lhs.Op != OINDREGSP {
Fatalf("call argument store does not use OINDREGSP")
}
// can't really check this in machine-indep code.
//if(lhs->val.u.reg != D_SP)
// Fatalf("call arg assign not indreg(SP)")
lhs.Xoffset += int64(adjust)
}
}
// The result of walkstmt MUST be assigned back to n, e.g.
// n.Left = walkstmt(n.Left)
func walkstmt(n *Node) *Node {
if n == nil {
return n
}
setlineno(n)
walkstmtlist(n.Ninit.Slice())
switch n.Op {
default:
if n.Op == ONAME {
yyerror("%v is not a top level statement", n.Sym)
} else {
yyerror("%v is not a top level statement", n.Op)
}
Dump("nottop", n)
case OAS,
OASOP,
OAS2,
OAS2DOTTYPE,
OAS2RECV,
OAS2FUNC,
OAS2MAPR,
OCLOSE,
OCOPY,
OCALLMETH,
OCALLINTER,
OCALL,
OCALLFUNC,
ODELETE,
OSEND,
OPRINT,
OPRINTN,
OPANIC,
OEMPTY,
ORECOVER,
OGETG:
if n.Typecheck() == 0 {
Fatalf("missing typecheck: %+v", n)
}
wascopy := n.Op == OCOPY
init := n.Ninit
n.Ninit.Set(nil)
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
if wascopy && n.Op == OCONVNOP {
n.Op = OEMPTY // don't leave plain values as statements.
}
// special case for a receive where we throw away
// the value received.
case ORECV:
if n.Typecheck() == 0 {
Fatalf("missing typecheck: %+v", n)
}
init := n.Ninit
n.Ninit.Set(nil)
n.Left = walkexpr(n.Left, &init)
n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
n = walkexpr(n, &init)
n = addinit(n, init.Slice())
case OBREAK,
OCONTINUE,
OFALL,
OGOTO,
OLABEL,
ODCLCONST,
ODCLTYPE,
OCHECKNIL,
OVARKILL,
OVARLIVE:
break
case ODCL:
v := n.Left
if v.Class() == PAUTOHEAP {
if compiling_runtime {
yyerror("%v escapes to heap, not allowed in runtime.", v)
}
if prealloc[v] == nil {
prealloc[v] = callnew(v.Type)
}
nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v])
nn.SetColas(true)
nn = typecheck(nn, Etop)
return walkstmt(nn)
}
case OBLOCK:
walkstmtlist(n.List.Slice())
case OXCASE:
yyerror("case statement out of place")
n.Op = OCASE
fallthrough
case OCASE:
n.Right = walkstmt(n.Right)
case ODEFER:
Curfn.Func.SetHasDefer(true)
fallthrough
case OPROC:
switch n.Left.Op {
case OPRINT, OPRINTN:
n.Left = wrapCall(n.Left, &n.Ninit)
case ODELETE:
if mapfast(n.Left.List.First().Type) == mapslow {
n.Left = wrapCall(n.Left, &n.Ninit)
} else {
n.Left = walkexpr(n.Left, &n.Ninit)
}
case OCOPY:
n.Left = copyany(n.Left, &n.Ninit, true)
default:
n.Left = walkexpr(n.Left, &n.Ninit)
}
// make room for size & fn arguments.
adjustargs(n, 2*Widthptr)
case OFOR, OFORUNTIL:
if n.Left != nil {
walkstmtlist(n.Left.Ninit.Slice())
init := n.Left.Ninit
n.Left.Ninit.Set(nil)
n.Left = walkexpr(n.Left, &init)
n.Left = addinit(n.Left, init.Slice())
}
n.Right = walkstmt(n.Right)
if n.Op == OFORUNTIL {
walkstmtlist(n.List.Slice())
}
walkstmtlist(n.Nbody.Slice())
case OIF:
n.Left = walkexpr(n.Left, &n.Ninit)
walkstmtlist(n.Nbody.Slice())
walkstmtlist(n.Rlist.Slice())
case ORETURN:
if n.List.Len() == 0 {
break
}
if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
var rl []*Node
for _, ln := range Curfn.Func.Dcl {
cl := ln.Class()
if cl == PAUTO || cl == PAUTOHEAP {
break
}
if cl == PPARAMOUT {
if ln.isParamStackCopy() {
ln = walkexpr(typecheck(nod(OIND, ln.Name.Param.Heapaddr, nil), Erv), nil)
}
rl = append(rl, ln)
}
}
if got, want := n.List.Len(), len(rl); got != want {
// order should have rewritten multi-value function calls
// with explicit OAS2FUNC nodes.
Fatalf("expected %v return arguments, have %v", want, got)
}
if samelist(rl, n.List.Slice()) {
// special return in disguise
// TODO(josharian, 1.12): is "special return" still relevant?
// Tests still pass w/o this. See comments on https://go-review.googlesource.com/c/go/+/118318
walkexprlist(n.List.Slice(), &n.Ninit)
n.List.Set(nil)
break
}
// move function calls out, to make reorder3's job easier.
walkexprlistsafe(n.List.Slice(), &n.Ninit)
ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
n.List.Set(reorder3(ll))
break
}
walkexprlist(n.List.Slice(), &n.Ninit)
ll := ascompatte(nil, false, Curfn.Type.Results(), n.List.Slice(), 1, &n.Ninit)
n.List.Set(ll)
case ORETJMP:
break
case OSELECT:
walkselect(n)
case OSWITCH:
walkswitch(n)
case ORANGE:
n = walkrange(n)
}
if n.Op == ONAME {
Fatalf("walkstmt ended up with name: %+v", n)
}
return n
}
func isSmallMakeSlice(n *Node) bool {
if n.Op != OMAKESLICE {
return false
}
l := n.Left
r := n.Right
if r == nil {
r = l
}
t := n.Type
return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < (1<<16)/t.Elem().Width)
}
// walk the whole tree of the body of an
// expression or simple statement.
// the types expressions are calculated.
// compile-time constants are evaluated.
// complex side effects like statements are appended to init
func walkexprlist(s []*Node, init *Nodes) {
for i := range s {
s[i] = walkexpr(s[i], init)
}
}
func walkexprlistsafe(s []*Node, init *Nodes) {
for i, n := range s {
s[i] = safeexpr(n, init)
s[i] = walkexpr(s[i], init)
}
}
func walkexprlistcheap(s []*Node, init *Nodes) {
for i, n := range s {
s[i] = cheapexpr(n, init)
s[i] = walkexpr(s[i], init)
}
}
// convFuncName builds the runtime function name for interface conversion.
// It also reports whether the function expects the data by address.
// Not all names are possible. For example, we never generate convE2E or convE2I.
func convFuncName(from, to *types.Type) (fnname string, needsaddr bool) {
tkind := to.Tie()
switch from.Tie() {
case 'I':
switch tkind {
case 'I':
return "convI2I", false
}
case 'T':
switch tkind {
case 'E':
switch {
case from.Size() == 2 && from.Align == 2:
return "convT2E16", false
case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
return "convT2E32", false
case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
return "convT2E64", false
case from.IsString():
return "convT2Estring", true
case from.IsSlice():
return "convT2Eslice", true
case !types.Haspointers(from):
return "convT2Enoptr", true
}
return "convT2E", true
case 'I':
switch {
case from.Size() == 2 && from.Align == 2:
return "convT2I16", false
case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
return "convT2I32", false
case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
return "convT2I64", false
case from.IsString():
return "convT2Istring", true
case from.IsSlice():
return "convT2Islice", true
case !types.Haspointers(from):
return "convT2Inoptr", true
}
return "convT2I", true
}
}
Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
panic("unreachable")
}
// The result of walkexpr MUST be assigned back to n, e.g.
// n.Left = walkexpr(n.Left, init)
func walkexpr(n *Node, init *Nodes) *Node {
if n == nil {
return n
}
// Eagerly checkwidth all expressions for the back end.
if n.Type != nil && !n.Type.WidthCalculated() {
switch n.Type.Etype {
case TBLANK, TNIL, TIDEAL:
default:
checkwidth(n.Type)
}
}
if init == &n.Ninit {
// not okay to use n->ninit when walking n,
// because we might replace n with some other node
// and would lose the init list.
Fatalf("walkexpr init == &n->ninit")
}
if n.Ninit.Len() != 0 {
walkstmtlist(n.Ninit.Slice())
init.AppendNodes(&n.Ninit)
}
lno := setlineno(n)
if Debug['w'] > 1 {
Dump("before walk expr", n)
}
if n.Typecheck() != 1 {
Fatalf("missed typecheck: %+v", n)
}
if n.Type.IsUntyped() {
Fatalf("expression has untyped type: %+v", n)
}
if n.Op == ONAME && n.Class() == PAUTOHEAP {
nn := nod(OIND, n.Name.Param.Heapaddr, nil)
nn = typecheck(nn, Erv)
nn = walkexpr(nn, init)
nn.Left.SetNonNil(true)
return nn
}
opswitch:
switch n.Op {
default:
Dump("walk", n)
Fatalf("walkexpr: switch 1 unknown op %+S", n)
case ONONAME, OINDREGSP, OEMPTY, OGETG:
case OTYPE, ONAME, OLITERAL:
// TODO(mdempsky): Just return n; see discussion on CL 38655.
// Perhaps refactor to use Node.mayBeShared for these instead.
// If these return early, make sure to still call
// stringsym for constant strings.
case ONOT, OMINUS, OPLUS, OCOM, OREAL, OIMAG, ODOTMETH, ODOTINTER,
OIND, OSPTR, OITAB, OIDATA, OADDR:
n.Left = walkexpr(n.Left, init)
case OEFACE, OAND, OSUB, OMUL, OLT, OLE, OGE, OGT, OADD, OOR, OXOR:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
case ODOT:
usefield(n)
n.Left = walkexpr(n.Left, init)
case ODOTTYPE, ODOTTYPE2:
n.Left = walkexpr(n.Left, init)
// Set up interface type addresses for back end.
n.Right = typename(n.Type)
if n.Op == ODOTTYPE {
n.Right.Right = typename(n.Left.Type)
}
if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
n.List.Set1(itabname(n.Type, n.Left.Type))
}
case ODOTPTR:
usefield(n)
if n.Op == ODOTPTR && n.Left.Type.Elem().Width == 0 {
// No actual copy will be generated, so emit an explicit nil check.
n.Left = cheapexpr(n.Left, init)
checknil(n.Left, init)
}
n.Left = walkexpr(n.Left, init)
case OLEN, OCAP:
if isRuneCount(n) {
// Replace len([]rune(string)) with runtime.countrunes(string).
n = mkcall("countrunes", n.Type, init, conv(n.Left.Left, types.Types[TSTRING]))
break
}
n.Left = walkexpr(n.Left, init)
// replace len(*[10]int) with 10.
// delayed until now to preserve side effects.
t := n.Left.Type
if t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
safeexpr(n.Left, init)
setintconst(n, t.NumElem())
n.SetTypecheck(1)
}
case OLSH, ORSH:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
t := n.Left.Type
n.SetBounded(bounded(n.Right, 8*t.Width))
if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("shift bounds check elided")
}
case OCOMPLEX:
// Use results from call expression as arguments for complex.
if n.Left == nil && n.Right == nil {
n.Left = n.List.First()
n.Right = n.List.Second()
}
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
case OEQ, ONE:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
// Disable safemode while compiling this code: the code we
// generate internally can refer to unsafe.Pointer.
// In this case it can happen if we need to generate an ==
// for a struct containing a reflect.Value, which itself has
// an unexported field of type unsafe.Pointer.
old_safemode := safemode
safemode = false
n = walkcompare(n, init)
safemode = old_safemode
case OANDAND, OOROR:
n.Left = walkexpr(n.Left, init)
// cannot put side effects from n.Right on init,
// because they cannot run before n.Left is checked.
// save elsewhere and store on the eventual n.Right.
var ll Nodes
n.Right = walkexpr(n.Right, &ll)
n.Right = addinit(n.Right, ll.Slice())
n = walkinrange(n, init)
case OPRINT, OPRINTN:
walkexprlist(n.List.Slice(), init)
n = walkprint(n, init)
case OPANIC:
n = mkcall("gopanic", nil, init, n.Left)
case ORECOVER:
n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
case OCLOSUREVAR, OCFUNC:
n.SetAddable(true)
case OCALLINTER:
usemethod(n)
t := n.Left.Type
if n.List.Len() != 0 && n.List.First().Op == OAS {
break
}
n.Left = walkexpr(n.Left, init)
walkexprlist(n.List.Slice(), init)
ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
n.List.Set(reorder1(ll))
case OCALLFUNC:
if n.Left.Op == OCLOSURE {
// Transform direct call of a closure to call of a normal function.
// transformclosure already did all preparation work.
// Prepend captured variables to argument list.
n.List.Prepend(n.Left.Func.Enter.Slice()...)
n.Left.Func.Enter.Set(nil)
// Replace OCLOSURE with ONAME/PFUNC.
n.Left = n.Left.Func.Closure.Func.Nname
// Update type of OCALLFUNC node.
// Output arguments had not changed, but their offsets could.
if n.Left.Type.NumResults() == 1 {
n.Type = n.Left.Type.Results().Field(0).Type
} else {
n.Type = n.Left.Type.Results()
}
}
t := n.Left.Type
if n.List.Len() != 0 && n.List.First().Op == OAS {
break
}
n.Left = walkexpr(n.Left, init)
walkexprlist(n.List.Slice(), init)
ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
n.List.Set(reorder1(ll))
case OCALLMETH:
t := n.Left.Type
if n.List.Len() != 0 && n.List.First().Op == OAS {
break
}
n.Left = walkexpr(n.Left, init)
walkexprlist(n.List.Slice(), init)
ll := ascompatte(n, false, t.Recvs(), []*Node{n.Left.Left}, 0, init)
lr := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
ll = append(ll, lr...)
n.Left.Left = nil
updateHasCall(n.Left)
n.List.Set(reorder1(ll))
case OAS, OASOP:
init.AppendNodes(&n.Ninit)
// Recognize m[k] = append(m[k], ...) so we can reuse
// the mapassign call.
mapAppend := n.Left.Op == OINDEXMAP && n.Right.Op == OAPPEND
if mapAppend && !samesafeexpr(n.Left, n.Right.List.First()) {
Fatalf("not same expressions: %v != %v", n.Left, n.Right.List.First())
}
n.Left = walkexpr(n.Left, init)
n.Left = safeexpr(n.Left, init)
if mapAppend {
n.Right.List.SetFirst(n.Left)
}
if n.Op == OASOP {
// Rewrite x op= y into x = x op y.
n.Right = nod(n.SubOp(), n.Left, n.Right)
n.Right = typecheck(n.Right, Erv)
n.Op = OAS
n.ResetAux()
}
if oaslit(n, init) {
break
}
if n.Right == nil {
// TODO(austin): Check all "implicit zeroing"
break
}
if !instrumenting && isZero(n.Right) {
break
}
switch n.Right.Op {
default:
n.Right = walkexpr(n.Right, init)
case ORECV:
// x = <-c; n.Left is x, n.Right.Left is c.
// orderstmt made sure x is addressable.
n.Right.Left = walkexpr(n.Right.Left, init)
n1 := nod(OADDR, n.Left, nil)
r := n.Right.Left // the channel
n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
n = walkexpr(n, init)
break opswitch
case OAPPEND:
// x = append(...)
r := n.Right
if r.Type.Elem().NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem())
}
switch {
case isAppendOfMake(r):
// x = append(y, make([]T, y)...)
r = extendslice(r, init)
case r.Isddd():
r = appendslice(r, init) // also works for append(slice, string).
default:
r = walkappend(r, init, n)
}
n.Right = r
if r.Op == OAPPEND {
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
r.Left = typename(r.Type.Elem())
break opswitch
}
// Otherwise, lowered for race detector.
// Treat as ordinary assignment.
}
if n.Left != nil && n.Right != nil {
n = convas(n, init)
}
case OAS2:
init.AppendNodes(&n.Ninit)
walkexprlistsafe(n.List.Slice(), init)
walkexprlistsafe(n.Rlist.Slice(), init)
ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
ll = reorder3(ll)
n = liststmt(ll)
// a,b,... = fn()
case OAS2FUNC:
init.AppendNodes(&n.Ninit)
r := n.Rlist.First()
walkexprlistsafe(n.List.Slice(), init)
r = walkexpr(r, init)
if isIntrinsicCall(r) {
n.Rlist.Set1(r)
break
}
init.Append(r)
ll := ascompatet(n.List, r.Type)
n = liststmt(ll)
// x, y = <-c
// orderstmt made sure x is addressable.
case OAS2RECV:
init.AppendNodes(&n.Ninit)
r := n.Rlist.First()
walkexprlistsafe(n.List.Slice(), init)
r.Left = walkexpr(r.Left, init)
var n1 *Node
if n.List.First().isBlank() {
n1 = nodnil()
} else {
n1 = nod(OADDR, n.List.First(), nil)
}
fn := chanfn("chanrecv2", 2, r.Left.Type)
ok := n.List.Second()
call := mkcall1(fn, ok.Type, init, r.Left, n1)
n = nod(OAS, ok, call)
n = typecheck(n, Etop)
// a,b = m[i]
case OAS2MAPR:
init.AppendNodes(&n.Ninit)
r := n.Rlist.First()
walkexprlistsafe(n.List.Slice(), init)
r.Left = walkexpr(r.Left, init)
r.Right = walkexpr(r.Right, init)
t := r.Left.Type
fast := mapfast(t)
var key *Node
if fast != mapslow {
// fast versions take key by value
key = r.Right
} else {
// standard version takes key by reference
// orderexpr made sure key is addressable.
key = nod(OADDR, r.Right, nil)
}
// from:
// a,b = m[i]
// to:
// var,b = mapaccess2*(t, m, i)
// a = *var
a := n.List.First()
if w := t.Elem().Width; w <= 1024 { // 1024 must match runtime/map.go:maxZero
fn := mapfn(mapaccess2[fast], t)
r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
} else {
fn := mapfn("mapaccess2_fat", t)
z := zeroaddr(w)
r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
if ok := n.List.Second(); !ok.isBlank() && ok.Type.IsBoolean() {
r.Type.Field(1).Type = ok.Type
}
n.Rlist.Set1(r)
n.Op = OAS2FUNC
// don't generate a = *var if a is _
if !a.isBlank() {
var_ := temp(types.NewPtr(t.Elem()))
var_.SetTypecheck(1)
var_.SetNonNil(true) // mapaccess always returns a non-nil pointer
n.List.SetFirst(var_)
n = walkexpr(n, init)
init.Append(n)
n = nod(OAS, a, nod(OIND, var_, nil))
}
n = typecheck(n, Etop)
n = walkexpr(n, init)
case ODELETE:
init.AppendNodes(&n.Ninit)
map_ := n.List.First()
key := n.List.Second()
map_ = walkexpr(map_, init)
key = walkexpr(key, init)
t := map_.Type
fast := mapfast(t)
if fast == mapslow {
// orderstmt made sure key is addressable.
key = nod(OADDR, key, nil)
}
n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
case OAS2DOTTYPE:
walkexprlistsafe(n.List.Slice(), init)
n.Rlist.SetFirst(walkexpr(n.Rlist.First(), init))
case OCONVIFACE:
n.Left = walkexpr(n.Left, init)
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
if isdirectiface(n.Left.Type) {
var t *Node
if n.Type.IsEmptyInterface() {
t = typename(n.Left.Type)
} else {
t = itabname(n.Left.Type, n.Type)
}
l := nod(OEFACE, t, n.Left)
l.Type = n.Type
l.SetTypecheck(n.Typecheck())
n = l
break
}
if staticbytes == nil {
staticbytes = newname(Runtimepkg.Lookup("staticbytes"))
staticbytes.SetClass(PEXTERN)
staticbytes.Type = types.NewArray(types.Types[TUINT8], 256)
zerobase = newname(Runtimepkg.Lookup("zerobase"))
zerobase.SetClass(PEXTERN)
zerobase.Type = types.Types[TUINTPTR]
}
// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
// by using an existing addressable value identical to n.Left
// or creating one on the stack.
var value *Node
switch {
case n.Left.Type.Size() == 0:
// n.Left is zero-sized. Use zerobase.
cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
value = zerobase
case n.Left.Type.IsBoolean() || (n.Left.Type.Size() == 1 && n.Left.Type.IsInteger()):
// n.Left is a bool/byte. Use staticbytes[n.Left].
n.Left = cheapexpr(n.Left, init)
value = nod(OINDEX, staticbytes, byteindex(n.Left))
value.SetBounded(true)
case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
// n.Left is a readonly global; use it directly.
value = n.Left
case !n.Left.Type.IsInterface() && n.Esc == EscNone && n.Left.Type.Width <= 1024:
// n.Left does not escape. Use a stack temporary initialized to n.Left.
value = temp(n.Left.Type)
init.Append(typecheck(nod(OAS, value, n.Left), Etop))
}
if value != nil {
// Value is identical to n.Left.
// Construct the interface directly: {type/itab, &value}.
var t *Node
if n.Type.IsEmptyInterface() {
t = typename(n.Left.Type)
} else {
t = itabname(n.Left.Type, n.Type)
}
l := nod(OEFACE, t, typecheck(nod(OADDR, value, nil), Erv))
l.Type = n.Type
l.SetTypecheck(n.Typecheck())
n = l
break
}
// Implement interface to empty interface conversion.
// tmp = i.itab
// if tmp != nil {
// tmp = tmp.type
// }
// e = iface{tmp, i.data}
if n.Type.IsEmptyInterface() && n.Left.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
// Evaluate the input interface.
c := temp(n.Left.Type)
init.Append(nod(OAS, c, n.Left))
// Get the itab out of the interface.
tmp := temp(types.NewPtr(types.Types[TUINT8]))
init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), Erv)))
// Get the type out of the itab.
nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), Erv), nil)
nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
init.Append(nif)
// Build the result.
e := nod(OEFACE, tmp, ifaceData(c, types.NewPtr(types.Types[TUINT8])))
e.Type = n.Type // assign type manually, typecheck doesn't understand OEFACE.
e.SetTypecheck(1)
n = e
break
}
var ll []*Node
if n.Type.IsEmptyInterface() {
if !n.Left.Type.IsInterface() {
ll = append(ll, typename(n.Left.Type))
}
} else {
if n.Left.Type.IsInterface() {
ll = append(ll, typename(n.Type))
} else {
ll = append(ll, itabname(n.Left.Type, n.Type))
}
}
fnname, needsaddr := convFuncName(n.Left.Type, n.Type)
v := n.Left
if needsaddr {
// Types of large or unknown size are passed by reference.
// Orderexpr arranged for n.Left to be a temporary for all
// the conversions it could see. Comparison of an interface
// with a non-interface, especially in a switch on interface value
// with non-interface cases, is not visible to orderstmt, so we
// have to fall back on allocating a temp here.
if !islvalue(v) {
v = copyexpr(v, v.Type, init)
}
v = nod(OADDR, v, nil)
}
ll = append(ll, v)
dowidth(n.Left.Type)
fn := syslook(fnname)
fn = substArgTypes(fn, n.Left.Type, n.Type)
dowidth(fn.Type)
n = nod(OCALL, fn, nil)
n.List.Set(ll)
n = typecheck(n, Erv)
n = walkexpr(n, init)
case OCONV, OCONVNOP:
if thearch.SoftFloat {
// For the soft-float case, ssa.go handles these conversions.
n.Left = walkexpr(n.Left, init)
break
}
switch thearch.LinkArch.Family {
case sys.ARM, sys.MIPS:
if n.Left.Type.IsFloat() {
switch n.Type.Etype {
case TINT64:
n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break opswitch
case TUINT64:
n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break opswitch
}
}
if n.Type.IsFloat() {
switch n.Left.Type.Etype {
case TINT64:
n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type)
break opswitch
case TUINT64:
n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type)
break opswitch
}
}
case sys.I386:
if n.Left.Type.IsFloat() {
switch n.Type.Etype {
case TINT64:
n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break opswitch
case TUINT64:
n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break opswitch
case TUINT32, TUINT, TUINTPTR:
n = mkcall("float64touint32", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
break opswitch
}
}
if n.Type.IsFloat() {
switch n.Left.Type.Etype {
case TINT64:
n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type)
break opswitch
case TUINT64:
n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type)
break opswitch
case TUINT32, TUINT, TUINTPTR:
n = conv(mkcall("uint32tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT32])), n.Type)
break opswitch
}
}
}
n.Left = walkexpr(n.Left, init)
case OANDNOT:
n.Left = walkexpr(n.Left, init)
n.Op = OAND
n.Right = nod(OCOM, n.Right, nil)
n.Right = typecheck(n.Right, Erv)
n.Right = walkexpr(n.Right, init)
case ODIV, OMOD:
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
// rewrite complex div into function call.
et := n.Left.Type.Etype
if isComplex[et] && n.Op == ODIV {
t := n.Type
n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
n = conv(n, t)
break
}
// Nothing to do for float divisions.
if isFloat[et] {
break
}
// rewrite 64-bit div and mod on 32-bit architectures.
// TODO: Remove this code once we can introduce
// runtime calls late in SSA processing.
if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
if n.Right.Op == OLITERAL {
// Leave div/mod by constant powers of 2.
// The SSA backend will handle those.
switch et {
case TINT64:
c := n.Right.Int64()
if c < 0 {
c = -c
}
if c != 0 && c&(c-1) == 0 {
break opswitch
}
case TUINT64:
c := uint64(n.Right.Int64())
if c != 0 && c&(c-1) == 0 {
break opswitch
}
}
}
var fn string
if et == TINT64 {
fn = "int64"
} else {
fn = "uint64"
}
if n.Op == ODIV {
fn += "div"
} else {
fn += "mod"
}
n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
}
case OINDEX:
n.Left = walkexpr(n.Left, init)
// save the original node for bounds checking elision.
// If it was a ODIV/OMOD walk might rewrite it.
r := n.Right
n.Right = walkexpr(n.Right, init)
// if range of type cannot exceed static array bound,
// disable bounds check.
if n.Bounded() {
break
}
t := n.Left.Type
if t != nil && t.IsPtr() {
t = t.Elem()
}
if t.IsArray() {
n.SetBounded(bounded(r, t.NumElem()))
if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
yyerror("index out of bounds")
}
} else if Isconst(n.Left, CTSTR) {
n.SetBounded(bounded(r, int64(len(n.Left.Val().U.(string)))))
if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
if smallintconst(n.Right) && !n.Bounded() {
yyerror("index out of bounds")
}
}
if Isconst(n.Right, CTINT) {
if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
yyerror("index out of bounds")
}
}
case OINDEXMAP:
// Replace m[k] with *map{access1,assign}(maptype, m, &k)
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
map_ := n.Left
key := n.Right
t := map_.Type
if n.IndexMapLValue() {
// This m[k] expression is on the left-hand side of an assignment.
fast := mapfast(t)
if fast == mapslow {
// standard version takes key by reference.
// orderexpr made sure key is addressable.
key = nod(OADDR, key, nil)
}
n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
} else {
// m[k] is not the target of an assignment.
fast := mapfast(t)
if fast == mapslow {
// standard version takes key by reference.
// orderexpr made sure key is addressable.
key = nod(OADDR, key, nil)
}
if w := t.Elem().Width; w <= 1024 { // 1024 must match runtime/map.go:maxZero
n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key)
} else {
z := zeroaddr(w)
n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
}
}
n.Type = types.NewPtr(t.Elem())
n.SetNonNil(true) // mapaccess1* and mapassign always return non-nil pointers.
n = nod(OIND, n, nil)
n.Type = t.Elem()
n.SetTypecheck(1)
case ORECV:
Fatalf("walkexpr ORECV") // should see inside OAS only
case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
n.Left = walkexpr(n.Left, init)
low, high, max := n.SliceBounds()
low = walkexpr(low, init)
if low != nil && isZero(low) {
// Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
low = nil
}
high = walkexpr(high, init)
max = walkexpr(max, init)
n.SetSliceBounds(low, high, max)
if n.Op.IsSlice3() {
if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
// Reduce x[i:j:cap(x)] to x[i:j].
if n.Op == OSLICE3 {
n.Op = OSLICE
} else {
n.Op = OSLICEARR
}
n = reduceSlice(n)
}
} else {
n = reduceSlice(n)
}
case ONEW:
if n.Esc == EscNone {
if n.Type.Elem().Width >= 1<<16 {
Fatalf("large ONEW with EscNone: %v", n)
}
r := temp(n.Type.Elem())
r = nod(OAS, r, nil) // zero temp
r = typecheck(r, Etop)
init.Append(r)
r = nod(OADDR, r.Left, nil)
r = typecheck(r, Erv)
n = r
} else {
n = callnew(n.Type.Elem())
}
case OCMPSTR:
// s + "badgerbadgerbadger" == "badgerbadgerbadger"
if (n.SubOp() == OEQ || n.SubOp() == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) {
r := nod(n.SubOp(), nod(OLEN, n.Left.List.First(), nil), nodintconst(0))
n = finishcompare(n, r, init)
break
}
// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
var cs, ncs *Node // const string, non-const string
switch {
case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
// ignore; will be constant evaluated
case Isconst(n.Left, CTSTR):
cs = n.Left
ncs = n.Right
case Isconst(n.Right, CTSTR):
cs = n.Right
ncs = n.Left
}
if cs != nil {
cmp := n.SubOp()
// Our comparison below assumes that the non-constant string
// is on the left hand side, so rewrite "" cmp x to x cmp "".
// See issue 24817.
if Isconst(n.Left, CTSTR) {
cmp = brrev(cmp)
}
// maxRewriteLen was chosen empirically.
// It is the value that minimizes cmd/go file size
// across most architectures.
// See the commit description for CL 26758 for details.
maxRewriteLen := 6
// Some architectures can load unaligned byte sequence as 1 word.
// So we can cover longer strings with the same amount of code.
canCombineLoads := canMergeLoads()
combine64bit := false
if canCombineLoads {
// Keep this low enough to generate less code than a function call.
maxRewriteLen = 2 * thearch.LinkArch.RegSize
combine64bit = thearch.LinkArch.RegSize >= 8
}
var and Op
switch cmp {
case OEQ:
and = OANDAND
case ONE:
and = OOROR
default:
// Don't do byte-wise comparisons for <, <=, etc.
// They're fairly complicated.
// Length-only checks are ok, though.
maxRewriteLen = 0
}
if s := cs.Val().U.(string); len(s) <= maxRewriteLen {
if len(s) > 0 {
ncs = safeexpr(ncs, init)
}
r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
remains := len(s)
for i := 0; remains > 0; {
if remains == 1 || !canCombineLoads {
cb := nodintconst(int64(s[i]))
ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
r = nod(and, r, nod(cmp, ncb, cb))
remains--
i++
continue
}
var step int
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
convType = types.Types[TINT64]
step = 8
case remains >= 4:
convType = types.Types[TUINT32]
step = 4
case remains >= 2:
convType = types.Types[TUINT16]
step = 2
}
ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
ncsubstr = conv(ncsubstr, convType)
csubstr := int64(s[i])
// Calculate large constant from bytes as sequence of shifts and ors.
// Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will combine this into a single large load.
for offset := 1; offset < step; offset++ {
b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
b = conv(b, convType)
b = nod(OLSH, b, nodintconst(int64(8*offset)))
ncsubstr = nod(OOR, ncsubstr, b)
csubstr = csubstr | int64(s[i+offset])<<uint8(8*offset)
}
csubstrPart := nodintconst(csubstr)
// Compare "step" bytes as once
r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
remains -= step
i += step
}
n = finishcompare(n, r, init)
break
}
}
var r *Node
if n.SubOp() == OEQ || n.SubOp() == ONE {
// prepare for rewrite below
n.Left = cheapexpr(n.Left, init)
n.Right = cheapexpr(n.Right, init)
lstr := conv(n.Left, types.Types[TSTRING])
rstr := conv(n.Right, types.Types[TSTRING])
lptr := nod(OSPTR, lstr, nil)
rptr := nod(OSPTR, rstr, nil)
llen := conv(nod(OLEN, lstr, nil), types.Types[TUINTPTR])
rlen := conv(nod(OLEN, rstr, nil), types.Types[TUINTPTR])
fn := syslook("memequal")
fn = substArgTypes(fn, types.Types[TUINT8], types.Types[TUINT8])
r = mkcall1(fn, types.Types[TBOOL], init, lptr, rptr, llen)
// quick check of len before full compare for == or !=.
// memequal then tests equality up to length len.
if n.SubOp() == OEQ {
// len(left) == len(right) && memequal(left, right, len)
r = nod(OANDAND, nod(OEQ, llen, rlen), r)
} else {
// len(left) != len(right) || !memequal(left, right, len)
r = nod(ONOT, r, nil)
r = nod(OOROR, nod(ONE, llen, rlen), r)
}
} else {
// sys_cmpstring(s1, s2) :: 0
r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
r = nod(n.SubOp(), r, nodintconst(0))
}
n = finishcompare(n, r, init)
case OADDSTR:
n = addstr(n, init)
case OAPPEND:
// order should make sure we only see OAS(node, OAPPEND), which we handle above.
Fatalf("append outside assignment")
case OCOPY:
n = copyany(n, init, instrumenting && !compiling_runtime)
// cannot use chanfn - closechan takes any, not chan any
case OCLOSE:
fn := syslook("closechan")
fn = substArgTypes(fn, n.Left.Type)
n = mkcall1(fn, nil, init, n.Left)
case OMAKECHAN:
// When size fits into int, use makechan instead of
// makechan64, which is faster and shorter on 32 bit platforms.
size := n.Left
fnname := "makechan64"
argtype := types.Types[TINT64]
// Type checking guarantees that TIDEAL size is positive and fits in an int.
// The case of size overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makechan during runtime.
if size.Type.IsKind(TIDEAL) || maxintval[size.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
fnname = "makechan"
argtype = types.Types[TINT]
}
n = mkcall1(chanfn(fnname, 1, n.Type), n.Type, init, typename(n.Type), conv(size, argtype))
case OMAKEMAP:
t := n.Type
hmapType := hmap(t)
hint := n.Left
// var h *hmap
var h *Node
if n.Esc == EscNone {
// Allocate hmap on stack.
// var hv hmap
hv := temp(hmapType)
zero := nod(OAS, hv, nil)
zero = typecheck(zero, Etop)
init.Append(zero)
// h = &hv
h = nod(OADDR, hv, nil)
// Allocate one bucket pointed to by hmap.buckets on stack if hint
// is not larger than BUCKETSIZE. In case hint is larger than
// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
// Maximum key and value size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !Isconst(hint, CTINT) ||
!(hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) > 0) {
// var bv bmap
bv := temp(bmap(t))
zero = nod(OAS, bv, nil)
zero = typecheck(zero, Etop)
init.Append(zero)
// b = &bv
b := nod(OADDR, bv, nil)
// h.buckets = b
bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
na := nod(OAS, nodSym(ODOT, h, bsym), b)
na = typecheck(na, Etop)
init.Append(na)
}
}
if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and
// improves binary size by using calls with fewer arguments.
// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
// and no buckets will be allocated by makemap. Therefore,
// no buckets need to be allocated in this code path.
if n.Esc == EscNone {
// Only need to initialize h.hash0 since
// hmap h has been allocated on the stack already.
// h.hash0 = fastrand()
rand := mkcall("fastrand", types.Types[TUINT32], init)
hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
a := nod(OAS, nodSym(ODOT, h, hashsym), rand)
a = typecheck(a, Etop)
a = walkexpr(a, init)
init.Append(a)
n = nod(OCONVNOP, h, nil)
n.Type = t
n = typecheck(n, Erv)
} else {
// Call runtime.makehmap to allocate an
// hmap on the heap and initialize hmap's hash0 field.
fn := syslook("makemap_small")
fn = substArgTypes(fn, t.Key(), t.Elem())
n = mkcall1(fn, n.Type, init)
}
} else {
if n.Esc != EscNone {
h = nodnil()
}
// Map initialization with a variable or large hint is
// more complicated. We therefore generate a call to
// runtime.makemap to intialize hmap and allocate the
// map buckets.
// When hint fits into int, use makemap instead of
// makemap64, which is faster and shorter on 32 bit platforms.
fnname := "makemap64"
argtype := types.Types[TINT64]
// Type checking guarantees that TIDEAL hint is positive and fits in an int.
// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
// The case of hint overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makemap during runtime.
if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 {
fnname = "makemap"
argtype = types.Types[TINT]
}
fn := syslook(fnname)
fn = substArgTypes(fn, hmapType, t.Key(), t.Elem())
n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h)
}
case OMAKESLICE:
l := n.Left
r := n.Right
if r == nil {
r = safeexpr(l, init)
l = r
}
t := n.Type
if n.Esc == EscNone {
if !isSmallMakeSlice(n) {
Fatalf("non-small OMAKESLICE with EscNone: %v", n)
}
// var arr [r]T
// n = arr[:l]
t = types.NewArray(t.Elem(), nonnegintconst(r)) // [r]T
var_ := temp(t)
a := nod(OAS, var_, nil) // zero temp
a = typecheck(a, Etop)
init.Append(a)
r := nod(OSLICE, var_, nil) // arr[:l]
r.SetSliceBounds(nil, l, nil)
r = conv(r, n.Type) // in case n.Type is named.
r = typecheck(r, Erv)
r = walkexpr(r, init)
n = r
} else {
// n escapes; set up a call to makeslice.
// When len and cap can fit into int, use makeslice instead of
// makeslice64, which is faster and shorter on 32 bit platforms.
if t.Elem().NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
}
len, cap := l, r
fnname := "makeslice64"
argtype := types.Types[TINT64]
// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
// will be handled by the negative range checks in makeslice during runtime.
if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
(cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
fnname = "makeslice"
argtype = types.Types[TINT]
}
fn := syslook(fnname)
fn = substArgTypes(fn, t.Elem()) // any-1
n = mkcall1(fn, t, init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
}
case ORUNESTR:
a := nodnil()
if n.Esc == EscNone {
t := types.NewArray(types.Types[TUINT8], 4)
var_ := temp(t)
a = nod(OADDR, var_, nil)
}
// intstring(*[4]byte, rune)
n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
case OARRAYBYTESTR:
a := nodnil()
if n.Esc == EscNone {
// Create temporary buffer for string on stack.
t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
// slicebytetostring(*[32]byte, []byte) string;
n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
// slicebytetostringtmp([]byte) string;
case OARRAYBYTESTRTMP:
n.Left = walkexpr(n.Left, init)
if !instrumenting {
// Let the backend handle OARRAYBYTESTRTMP directly
// to avoid a function call to slicebytetostringtmp.
break
}
n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
// slicerunetostring(*[32]byte, []rune) string;
case OARRAYRUNESTR:
a := nodnil()
if n.Esc == EscNone {
// Create temporary buffer for string on stack.
t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
// stringtoslicebyte(*32[byte], string) []byte;
case OSTRARRAYBYTE:
a := nodnil()
if n.Esc == EscNone {
// Create temporary buffer for slice on stack.
t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
case OSTRARRAYBYTETMP:
// []byte(string) conversion that creates a slice
// referring to the actual string bytes.
// This conversion is handled later by the backend and
// is only for use by internal compiler optimizations
// that know that the slice won't be mutated.
// The only such case today is:
// for i, c := range []byte(string)
n.Left = walkexpr(n.Left, init)
// stringtoslicerune(*[32]rune, string) []rune
case OSTRARRAYRUNE:
a := nodnil()
if n.Esc == EscNone {
// Create temporary buffer for slice on stack.
t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
a = nod(OADDR, temp(t), nil)
}
n = mkcall("stringtoslicerune", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
// ifaceeq(i1 any-1, i2 any-2) (ret bool);
case OCMPIFACE:
if !eqtype(n.Left.Type, n.Right.Type) {
Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type)
}
var fn *Node
if n.Left.Type.IsEmptyInterface() {
fn = syslook("efaceeq")
} else {
fn = syslook("ifaceeq")
}
n.Right = cheapexpr(n.Right, init)
n.Left = cheapexpr(n.Left, init)
lt := nod(OITAB, n.Left, nil)
rt := nod(OITAB, n.Right, nil)
ld := nod(OIDATA, n.Left, nil)
rd := nod(OIDATA, n.Right, nil)
ld.Type = types.Types[TUNSAFEPTR]
rd.Type = types.Types[TUNSAFEPTR]
ld.SetTypecheck(1)
rd.SetTypecheck(1)
call := mkcall1(fn, n.Type, init, lt, ld, rd)
// Check itable/type before full compare.
// Note: short-circuited because order matters.
var cmp *Node
if n.SubOp() == OEQ {
cmp = nod(OANDAND, nod(OEQ, lt, rt), call)
} else {
cmp = nod(OOROR, nod(ONE, lt, rt), nod(ONOT, call, nil))
}
n = finishcompare(n, cmp, init)
case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
vstat := staticname(n.Type)
vstat.Name.SetReadonly(true)
fixedlit(inInitFunction, initKindStatic, n, vstat, init)
n = vstat
n = typecheck(n, Erv)
break
}
var_ := temp(n.Type)
anylit(n, var_, init)
n = var_
case OSEND:
n1 := n.Right
n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
n1 = walkexpr(n1, init)
n1 = nod(OADDR, n1, nil)
n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
case OCLOSURE:
n = walkclosure(n, init)
case OCALLPART:
n = walkpartialcall(n, init)
}
// Expressions that are constant at run time but not
// considered const by the language spec are not turned into
// constants until walk. For example, if n is y%1 == 0, the
// walk of y%1 may have replaced it by 0.
// Check whether n with its updated args is itself now a constant.
t := n.Type
evconst(n)
if n.Type != t {
Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
}
if n.Op == OLITERAL {
n = typecheck(n, Erv)
// Emit string symbol now to avoid emitting
// any concurrently during the backend.
if s, ok := n.Val().U.(string); ok {
_ = stringsym(n.Pos, s)
}
}
updateHasCall(n)
if Debug['w'] != 0 && n != nil {
Dump("after walk expr", n)
}
lineno = lno
return n
}
// TODO(josharian): combine this with its caller and simplify
func reduceSlice(n *Node) *Node {
low, high, max := n.SliceBounds()
if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
// Reduce x[i:len(x)] to x[i:].
high = nil
}
n.SetSliceBounds(low, high, max)
if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
// Reduce x[:] to x.
if Debug_slice > 0 {
Warn("slice: omit slice operation")
}
return n.Left
}
return n
}
func ascompatee1(l *Node, r *Node, init *Nodes) *Node {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
n := nod(OAS, l, r)
if l.Op == OINDEXMAP {
return n
}
return convas(n, init)
}
func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
// check assign expression list to
// an expression list. called in
// expr-list = expr-list
// ensure order of evaluation for function calls
for i := range nl {
nl[i] = safeexpr(nl[i], init)
}
for i1 := range nr {
nr[i1] = safeexpr(nr[i1], init)
}
var nn []*Node
i := 0
for ; i < len(nl); i++ {
if i >= len(nr) {
break
}
// Do not generate 'x = x' during return. See issue 4014.
if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
continue
}
nn = append(nn, ascompatee1(nl[i], nr[i], init))
}
// cannot happen: caller checked that lists had same length
if i < len(nl) || i < len(nr) {
var nln, nrn Nodes
nln.Set(nl)
nrn.Set(nr)
Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname())
}
return nn
}
// fncall reports whether assigning an rvalue of type rt to an lvalue l might involve a function call.
func fncall(l *Node, rt *types.Type) bool {
if l.HasCall() || l.Op == OINDEXMAP {
return true
}
if eqtype(l.Type, rt) {
return false
}
// There might be a conversion required, which might involve a runtime call.
return true
}
// check assign type list to
// an expression list. called in
// expr-list = func()
func ascompatet(nl Nodes, nr *types.Type) []*Node {
if nl.Len() != nr.NumFields() {
Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
}
var nn, mm Nodes
for i, l := range nl.Slice() {
if l.isBlank() {
continue
}
r := nr.Field(i)
// Any assignment to an lvalue that might cause a function call must be
// deferred until all the returned values have been read.
if fncall(l, r.Type) {
tmp := temp(r.Type)
tmp = typecheck(tmp, Erv)
a := nod(OAS, l, tmp)
a = convas(a, &mm)
mm.Append(a)
l = tmp
}
a := nod(OAS, l, nodarg(r, 0))
a = convas(a, &nn)
updateHasCall(a)
if a.HasCall() {
Dump("ascompatet ucount", a)
Fatalf("ascompatet: too many function calls evaluating parameters")
}
nn.Append(a)
}
return append(nn.Slice(), mm.Slice()...)
}
// nodarg returns a Node for the function argument denoted by t,
// which is either the entire function argument or result struct (t is a struct *types.Type)
// or a specific argument (t is a *types.Field within a struct *types.Type).
//
// If fp is 0, the node is for use by a caller invoking the given
// function, preparing the arguments before the call
// or retrieving the results after the call.
// In this case, the node will correspond to an outgoing argument
// slot like 8(SP).
//
// If fp is 1, the node is for use by the function itself
// (the callee), to retrieve its arguments or write its results.
// In this case the node will be an ONAME with an appropriate
// type and offset.
func nodarg(t interface{}, fp int) *Node {
var n *Node
switch t := t.(type) {
default:
Fatalf("bad nodarg %T(%v)", t, t)
case *types.Type:
// Entire argument struct, not just one arg
if !t.IsFuncArgStruct() {
Fatalf("nodarg: bad type %v", t)
}
// Build fake variable name for whole arg struct.
n = newname(lookup(".args"))
n.Type = t
first := t.Field(0)
if first == nil {
Fatalf("nodarg: bad struct")
}
if first.Offset == BADWIDTH {
Fatalf("nodarg: offset not computed for %v", t)
}
n.Xoffset = first.Offset
case *types.Field:
if fp == 1 {
// NOTE(rsc): This should be using t.Nname directly,
// except in the case where t.Nname.Sym is the blank symbol and
// so the assignment would be discarded during code generation.
// In that case we need to make a new node, and there is no harm
// in optimization passes to doing so. But otherwise we should
// definitely be using the actual declaration and not a newly built node.
// The extra Fatalf checks here are verifying that this is the case,
// without changing the actual logic (at time of writing, it's getting
// toward time for the Go 1.7 beta).
// At some quieter time (assuming we've never seen these Fatalfs happen)
// we could change this code to use "expect" directly.
expect := asNode(t.Nname)
if expect.isParamHeapCopy() {
expect = expect.Name.Param.Stackcopy
}
for _, n := range Curfn.Func.Dcl {
if (n.Class() == PPARAM || n.Class() == PPARAMOUT) && !t.Sym.IsBlank() && n.Sym == t.Sym {
if n != expect {
Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, asNode(t.Nname), asNode(t.Nname), asNode(t.Nname).Op)
}
return n
}
}
if !expect.Sym.IsBlank() {
Fatalf("nodarg: did not find node in dcl list: %v", expect)
}
}
// Build fake name for individual variable.
// This is safe because if there was a real declared name
// we'd have used it above.
n = newname(lookup("__"))
n.Type = t.Type
if t.Offset == BADWIDTH {
Fatalf("nodarg: offset not computed for %v", t)
}
n.Xoffset = t.Offset
n.Orig = asNode(t.Nname)
}
// Rewrite argument named _ to __,
// or else the assignment to _ will be
// discarded during code generation.
if n.isBlank() {
n.Sym = lookup("__")
}
if fp != 0 {
Fatalf("bad fp: %v", fp)
}
// preparing arguments for call
n.Op = OINDREGSP
n.Xoffset += Ctxt.FixedFrameSize()
n.SetTypecheck(1)
n.SetAddrtaken(true) // keep optimizers at bay
return n
}
// package all the arguments that match a ... T parameter into a []T.
func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node {
esc := uint16(EscUnknown)
if ddd != nil {
esc = ddd.Esc
}
if len(args) == 0 {
n := nodnil()
n.Type = typ
return n
}
n := nod(OCOMPLIT, nil, typenod(typ))
if ddd != nil && prealloc[ddd] != nil {
prealloc[n] = prealloc[ddd] // temporary to use
}
n.List.Set(args)
n.Esc = esc
n = typecheck(n, Erv)
if n.Type == nil {
Fatalf("mkdotargslice: typecheck failed")
}
n = walkexpr(n, init)
return n
}
// check assign expression list to
// a type list. called in
// return expr-list
// func(expr-list)
func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node {
// f(g()) where g has multiple return values
if len(rhs) == 1 && rhs[0].Type.IsFuncArgStruct() {
// optimization - can do block copy
if eqtypenoname(rhs[0].Type, lhs) {
nl := nodarg(lhs, fp)
nr := nod(OCONVNOP, rhs[0], nil)
nr.Type = nl.Type
n := convas(nod(OAS, nl, nr), init)
n.SetTypecheck(1)
return []*Node{n}
}
// conversions involved.
// copy into temporaries.
var tmps []*Node
for _, nr := range rhs[0].Type.FieldSlice() {
tmps = append(tmps, temp(nr.Type))
}
a := nod(OAS2, nil, nil)
a.List.Set(tmps)
a.Rlist.Set(rhs)
a = typecheck(a, Etop)
a = walkstmt(a)
init.Append(a)
rhs = tmps
}
// For each parameter (LHS), assign its corresponding argument (RHS).
// If there's a ... parameter (which is only valid as the final
// parameter) and this is not a ... call expression,
// then assign the remaining arguments as a slice.
var nn []*Node
for i, nl := range lhs.FieldSlice() {
var nr *Node
if nl.Isddd() && !isddd {
nr = mkdotargslice(nl.Type, rhs[i:], init, call.Right)
} else {
nr = rhs[i]
}
a := nod(OAS, nodarg(nl, fp), nr)
a = convas(a, init)
a.SetTypecheck(1)
nn = append(nn, a)
}
return nn
}
// generate code for print
func walkprint(nn *Node, init *Nodes) *Node {
// Hoist all the argument evaluation up before the lock.
walkexprlistcheap(nn.List.Slice(), init)
// For println, add " " between elements and "\n" at the end.
if nn.Op == OPRINTN {
s := nn.List.Slice()
t := make([]*Node, 0, len(s)*2)
for i, n := range s {
if i != 0 {
t = append(t, nodstr(" "))
}
t = append(t, n)
}
t = append(t, nodstr("\n"))
nn.List.Set(t)
}
// Collapse runs of constant strings.
s := nn.List.Slice()
t := make([]*Node, 0, len(s))
for i := 0; i < len(s); {
var strs []string
for i < len(s) && Isconst(s[i], CTSTR) {
strs = append(strs, s[i].Val().U.(string))
i++
}
if len(strs) > 0 {
t = append(t, nodstr(strings.Join(strs, "")))
}
if i < len(s) {
t = append(t, s[i])
i++
}
}
nn.List.Set(t)
calls := []*Node{mkcall("printlock", nil, init)}
for i, n := range nn.List.Slice() {
if n.Op == OLITERAL {
switch n.Val().Ctype() {
case CTRUNE:
n = defaultlit(n, types.Runetype)
case CTINT:
n = defaultlit(n, types.Types[TINT64])
case CTFLT:
n = defaultlit(n, types.Types[TFLOAT64])
}
}
if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
n = defaultlit(n, types.Types[TINT64])
}
n = defaultlit(n, nil)
nn.List.SetIndex(i, n)
if n.Type == nil || n.Type.Etype == TFORW {
continue
}
var on *Node
switch n.Type.Etype {
case TINTER:
if n.Type.IsEmptyInterface() {
on = syslook("printeface")
} else {
on = syslook("printiface")
}
on = substArgTypes(on, n.Type) // any-1
case TPTR32, TPTR64, TCHAN, TMAP, TFUNC, TUNSAFEPTR:
on = syslook("printpointer")
on = substArgTypes(on, n.Type) // any-1
case TSLICE:
on = syslook("printslice")
on = substArgTypes(on, n.Type) // any-1
case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR:
if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" {
on = syslook("printhex")
} else {
on = syslook("printuint")
}
case TINT, TINT8, TINT16, TINT32, TINT64:
on = syslook("printint")
case TFLOAT32, TFLOAT64:
on = syslook("printfloat")
case TCOMPLEX64, TCOMPLEX128:
on = syslook("printcomplex")
case TBOOL:
on = syslook("printbool")
case TSTRING:
cs := ""
if Isconst(n, CTSTR) {
cs = n.Val().U.(string)
}
switch cs {
case " ":
on = syslook("printsp")
case "\n":
on = syslook("printnl")
default:
on = syslook("printstring")
}
default:
badtype(OPRINT, n.Type, nil)
continue
}
r := nod(OCALL, on, nil)
if params := on.Type.Params().FieldSlice(); len(params) > 0 {
t := params[0].Type
if !eqtype(t, n.Type) {
n = nod(OCONV, n, nil)
n.Type = t
}
r.List.Append(n)
}
calls = append(calls, r)
}
calls = append(calls, mkcall("printunlock", nil, init))
typecheckslice(calls, Etop)
walkexprlist(calls, init)
r := nod(OEMPTY, nil, nil)
r = typecheck(r, Etop)
r = walkexpr(r, init)
r.Ninit.Set(calls)
return r
}
func callnew(t *types.Type) *Node {
if t.NotInHeap() {
yyerror("%v is go:notinheap; heap allocation disallowed", t)
}
dowidth(t)
fn := syslook("newobject")
fn = substArgTypes(fn, t)
v := mkcall1(fn, types.NewPtr(t), nil, typename(t))
v.SetNonNil(true)
return v
}
// isReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
func isReflectHeaderDataField(l *Node) bool {
if l.Type != types.Types[TUINTPTR] {
return false
}
var tsym *types.Sym
switch l.Op {
case ODOT:
tsym = l.Left.Type.Sym
case ODOTPTR:
tsym = l.Left.Type.Elem().Sym
default:
return false
}
if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
return false
}
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
func convas(n *Node, init *Nodes) *Node {
if n.Op != OAS {
Fatalf("convas: not OAS %v", n.Op)
}
defer updateHasCall(n)
n.SetTypecheck(1)
if n.Left == nil || n.Right == nil {
return n
}
lt := n.Left.Type
rt := n.Right.Type
if lt == nil || rt == nil {
return n
}
if n.Left.isBlank() {
n.Right = defaultlit(n.Right, nil)
return n
}
if !eqtype(lt, rt) {
n.Right = assignconv(n.Right, lt, "assignment")
n.Right = walkexpr(n.Right, init)
}
dowidth(n.Right.Type)
return n
}
// from ascompat[te]
// evaluating actual function arguments.
// f(a,b)
// if there is exactly one function expr,
// then it is done first. otherwise must
// make temp variables
func reorder1(all []*Node) []*Node {
// When instrumenting, force all arguments into temporary
// variables to prevent instrumentation calls from clobbering
// arguments already on the stack.
funcCalls := 0
if !instrumenting {
if len(all) == 1 {
return all
}
for _, n := range all {
updateHasCall(n)
if n.HasCall() {
funcCalls++
}
}
if funcCalls == 0 {
return all
}
}
var g []*Node // fncalls assigned to tempnames
var f *Node // last fncall assigned to stack
var r []*Node // non fncalls and tempnames assigned to stack
d := 0
for _, n := range all {
if !instrumenting {
if !n.HasCall() {
r = append(r, n)
continue
}
d++
if d == funcCalls {
f = n
continue
}
}
// make assignment of fncall to tempname
a := temp(n.Right.Type)
a = nod(OAS, a, n.Right)
g = append(g, a)
// put normal arg assignment on list
// with fncall replaced by tempname
n.Right = a.Left
r = append(r, n)
}
if f != nil {
g = append(g, f)
}
return append(g, r...)
}
// from ascompat[ee]
// a,b = c,d
// simultaneous assignment. there cannot
// be later use of an earlier lvalue.
//
// function calls have been removed.
func reorder3(all []*Node) []*Node {
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
var early []*Node
var mapinit Nodes
for i, n := range all {
l := n.Left
// Save subexpressions needed on left side.
// Drill through non-dereferences.
for {
if l.Op == ODOT || l.Op == OPAREN {
l = l.Left
continue
}
if l.Op == OINDEX && l.Left.Type.IsArray() {
l.Right = reorder3save(l.Right, all, i, &early)
l = l.Left
continue
}
break
}
switch l.Op {
default:
Fatalf("reorder3 unexpected lvalue %#v", l.Op)
case ONAME:
break
case OINDEX, OINDEXMAP:
l.Left = reorder3save(l.Left, all, i, &early)
l.Right = reorder3save(l.Right, all, i, &early)
if l.Op == OINDEXMAP {
all[i] = convas(all[i], &mapinit)
}
case OIND, ODOTPTR:
l.Left = reorder3save(l.Left, all, i, &early)
}
// Save expression on right side.
all[i].Right = reorder3save(all[i].Right, all, i, &early)
}
early = append(mapinit.Slice(), early...)
return append(early, all...)
}
// if the evaluation of *np would be affected by the
// assignments in all up to but not including the ith assignment,
// copy into a temporary during *early and
// replace *np with that temp.
// The result of reorder3save MUST be assigned back to n, e.g.
// n.Left = reorder3save(n.Left, all, i, early)
func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
if !aliased(n, all, i) {
return n
}
q := temp(n.Type)
q = nod(OAS, q, n)
q = typecheck(q, Etop)
*early = append(*early, q)
return q.Left
}
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
func outervalue(n *Node) *Node {
for {
switch n.Op {
case OXDOT:
Fatalf("OXDOT in walk")
case ODOT, OPAREN, OCONVNOP:
n = n.Left
continue
case OINDEX:
if n.Left.Type != nil && n.Left.Type.IsArray() {
n = n.Left
continue
}
}
return n
}
}
// Is it possible that the computation of n might be
// affected by writes in as up to but not including the ith element?
func aliased(n *Node, all []*Node, i int) bool {
if n == nil {
return false
}
// Treat all fields of a struct as referring to the whole struct.
// We could do better but we would have to keep track of the fields.
for n.Op == ODOT {
n = n.Left
}
// Look for obvious aliasing: a variable being assigned
// during the all list and appearing in n.
// Also record whether there are any writes to main memory.
// Also record whether there are any writes to variables
// whose addresses have been taken.
memwrite := false
varwrite := false
for _, an := range all[:i] {
a := outervalue(an.Left)
for a.Op == ODOT {
a = a.Left
}
if a.Op != ONAME {
memwrite = true
continue
}
switch n.Class() {
default:
varwrite = true
continue
case PAUTO, PPARAM, PPARAMOUT:
if n.Addrtaken() {
varwrite = true
continue
}
if vmatch2(a, n) {
// Direct hit.
return true
}
}
}
// The variables being written do not appear in n.
// However, n might refer to computed addresses
// that are being written.
// If no computed addresses are affected by the writes, no aliasing.
if !memwrite && !varwrite {
return false
}
// If n does not refer to computed addresses
// (that is, if n only refers to variables whose addresses
// have not been taken), no aliasing.
if varexpr(n) {
return false
}
// Otherwise, both the writes and n refer to computed memory addresses.
// Assume that they might conflict.
return true
}
// does the evaluation of n only refer to variables
// whose addresses have not been taken?
// (and no other memory)
func varexpr(n *Node) bool {
if n == nil {
return true
}
switch n.Op {
case OLITERAL:
return true
case ONAME:
switch n.Class() {
case PAUTO, PPARAM, PPARAMOUT:
if !n.Addrtaken() {
return true
}
}
return false
case OADD,
OSUB,
OOR,
OXOR,
OMUL,
ODIV,
OMOD,
OLSH,
ORSH,
OAND,
OANDNOT,
OPLUS,
OMINUS,
OCOM,
OPAREN,
OANDAND,
OOROR,
OCONV,
OCONVNOP,
OCONVIFACE,
ODOTTYPE:
return varexpr(n.Left) && varexpr(n.Right)
case ODOT: // but not ODOTPTR
// Should have been handled in aliased.
Fatalf("varexpr unexpected ODOT")
}
// Be conservative.
return false
}
// is the name l mentioned in r?
func vmatch2(l *Node, r *Node) bool {
if r == nil {
return false
}
switch r.Op {
// match each right given left
case ONAME:
return l == r
case OLITERAL:
return false
}
if vmatch2(l, r.Left) {
return true
}
if vmatch2(l, r.Right) {
return true
}
for _, n := range r.List.Slice() {
if vmatch2(l, n) {
return true
}
}
return false
}
// is any name mentioned in l also mentioned in r?
// called by sinit.go
func vmatch1(l *Node, r *Node) bool {
// isolate all left sides
if l == nil || r == nil {
return false
}
switch l.Op {
case ONAME:
switch l.Class() {
case PPARAM, PAUTO:
break
default:
// assignment to non-stack variable must be
// delayed if right has function calls.
if r.HasCall() {
return true
}
}
return vmatch2(l, r)
case OLITERAL:
return false
}
if vmatch1(l.Left, r) {
return true
}
if vmatch1(l.Right, r) {
return true
}
for _, n := range l.List.Slice() {
if vmatch1(n, r) {
return true
}
}
return false
}
// paramstoheap returns code to allocate memory for heap-escaped parameters
// and to copy non-result parameters' values from the stack.
func paramstoheap(params *types.Type) []*Node {
var nn []*Node
for _, t := range params.Fields().Slice() {
v := asNode(t.Nname)
if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
v = nil
}
if v == nil {
continue
}
if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
nn = append(nn, walkstmt(nod(ODCL, v, nil)))
if stackcopy.Class() == PPARAM {
nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), Etop)))
}
}
}
return nn
}
// zeroResults zeros the return values at the start of the function.
// We need to do this very early in the function. Defer might stop a
// panic and show the return values as they exist at the time of
// panic. For precise stacks, the garbage collector assumes results
// are always live, so we need to zero them before any allocations,
// even allocations to move params/results to the heap.
// The generated code is added to Curfn's Enter list.
func zeroResults() {
for _, f := range Curfn.Type.Results().Fields().Slice() {
if v := asNode(f.Nname); v != nil && v.Name.Param.Heapaddr != nil {
// The local which points to the return value is the
// thing that needs zeroing. This is already handled
// by a Needzero annotation in plive.go:livenessepilogue.
continue
}
// Zero the stack location containing f.
Curfn.Func.Enter.Append(nodl(Curfn.Pos, OAS, nodarg(f, 1), nil))
}
}
// returnsfromheap returns code to copy values for heap-escaped parameters
// back to the stack.
func returnsfromheap(params *types.Type) []*Node {
var nn []*Node
for _, t := range params.Fields().Slice() {
v := asNode(t.Nname)
if v == nil {
continue
}
if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT {
nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), Etop)))
}
}
return nn
}
// heapmoves generates code to handle migrating heap-escaped parameters
// between the stack and the heap. The generated code is added to Curfn's
// Enter and Exit lists.
func heapmoves() {
lno := lineno
lineno = Curfn.Pos
nn := paramstoheap(Curfn.Type.Recvs())
nn = append(nn, paramstoheap(Curfn.Type.Params())...)
nn = append(nn, paramstoheap(Curfn.Type.Results())...)
Curfn.Func.Enter.Append(nn...)
lineno = Curfn.Func.Endlineno
Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
lineno = lno
}
func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
if fn.Type == nil || fn.Type.Etype != TFUNC {
Fatalf("mkcall %v %v", fn, fn.Type)
}
n := fn.Type.NumParams()
if n != len(va) {
Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
}
r := nod(OCALL, fn, nil)
r.List.Set(va)
if fn.Type.NumResults() > 0 {
r = typecheck(r, Erv|Efnstruct)
} else {
r = typecheck(r, Etop)
}
r = walkexpr(r, init)
r.Type = t
return r
}
func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
return vmkcall(syslook(name), t, init, args)
}
func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
return vmkcall(fn, t, init, args)
}
func conv(n *Node, t *types.Type) *Node {
if eqtype(n.Type, t) {
return n
}
n = nod(OCONV, n, nil)
n.Type = t
n = typecheck(n, Erv)
return n
}
// byteindex converts n, which is byte-sized, to a uint8.
// We cannot use conv, because we allow converting bool to uint8 here,
// which is forbidden in user code.
func byteindex(n *Node) *Node {
if eqtype(n.Type, types.Types[TUINT8]) {
return n
}
n = nod(OCONV, n, nil)
n.Type = types.Types[TUINT8]
n.SetTypecheck(1)
return n
}
func chanfn(name string, n int, t *types.Type) *Node {
if !t.IsChan() {
Fatalf("chanfn %v", t)
}
fn := syslook(name)
switch n {
default:
Fatalf("chanfn %d", n)
case 1:
fn = substArgTypes(fn, t.Elem())
case 2:
fn = substArgTypes(fn, t.Elem(), t.Elem())
}
return fn
}
func mapfn(name string, t *types.Type) *Node {
if !t.IsMap() {
Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
return fn
}
func mapfndel(name string, t *types.Type) *Node {
if !t.IsMap() {
Fatalf("mapfn %v", t)
}
fn := syslook(name)
fn = substArgTypes(fn, t.Key(), t.Elem(), t.Key())
return fn
}
const (
mapslow = iota
mapfast32
mapfast32ptr
mapfast64
mapfast64ptr
mapfaststr
nmapfast
)
type mapnames [nmapfast]string
func mkmapnames(base string, ptr string) mapnames {
return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
}
var mapaccess1 = mkmapnames("mapaccess1", "")
var mapaccess2 = mkmapnames("mapaccess2", "")
var mapassign = mkmapnames("mapassign", "ptr")
var mapdelete = mkmapnames("mapdelete", "")
func mapfast(t *types.Type) int {
// Check runtime/map.go:maxValueSize before changing.
if t.Elem().Width > 128 {
return mapslow
}
switch algtype(t.Key()) {
case AMEM32:
if !t.Key().HasHeapPointer() {
return mapfast32
}
if Widthptr == 4 {
return mapfast32ptr
}
Fatalf("small pointer %v", t.Key())
case AMEM64:
if !t.Key().HasHeapPointer() {
return mapfast64
}
if Widthptr == 8 {
return mapfast64ptr
}
// Two-word object, at least one of which is a pointer.
// Use the slow path.
case ASTRING:
return mapfaststr
}
return mapslow
}
func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
fn := syslook(name)
fn = substArgTypes(fn, l, r)
return fn
}
func addstr(n *Node, init *Nodes) *Node {
// orderexpr rewrote OADDSTR to have a list of strings.
c := n.List.Len()
if c < 2 {
Fatalf("addstr count %d too small", c)
}
buf := nodnil()
if n.Esc == EscNone {
sz := int64(0)
for _, n1 := range n.List.Slice() {
if n1.Op == OLITERAL {
sz += int64(len(n1.Val().U.(string)))
}
}
// Don't allocate the buffer if the result won't fit.
if sz < tmpstringbufsize {
// Create temporary buffer for result string on stack.
t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
buf = nod(OADDR, temp(t), nil)
}
}
// build list of string arguments
args := []*Node{buf}
for _, n2 := range n.List.Slice() {
args = append(args, conv(n2, types.Types[TSTRING]))
}
var fn string
if c <= 5 {
// small numbers of strings use direct runtime helpers.
// note: orderexpr knows this cutoff too.
fn = fmt.Sprintf("concatstring%d", c)
} else {
// large numbers of strings are passed to the runtime as a slice.
fn = "concatstrings"
t := types.NewSlice(types.Types[TSTRING])
slice := nod(OCOMPLIT, nil, typenod(t))
if prealloc[n] != nil {
prealloc[slice] = prealloc[n]
}
slice.List.Set(args[1:]) // skip buf arg
args = []*Node{buf, slice}
slice.Esc = EscNone
}
cat := syslook(fn)
r := nod(OCALL, cat, nil)
r.List.Set(args)
r = typecheck(r, Erv)
r = walkexpr(r, init)
r.Type = n.Type
return r
}
func walkAppendArgs(n *Node, init *Nodes) {
walkexprlistsafe(n.List.Slice(), init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
ls := n.List.Slice()
for i1, n1 := range ls {
ls[i1] = cheapexpr(n1, init)
}
}
// expand append(l1, l2...) to
// init {
// s := l1
// n := len(s) + len(l2)
// // Compare as uint so growslice can panic on overflow.
// if uint(n) > uint(cap(s)) {
// s = growslice(s, n)
// }
// s = s[:n]
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
// }
// s
//
// l2 is allowed to be a string.
func appendslice(n *Node, init *Nodes) *Node {
walkAppendArgs(n, init)
l1 := n.List.First()
l2 := n.List.Second()
var l []*Node
// var s []T
s := temp(l1.Type)
l = append(l, nod(OAS, s, l1)) // s = l1
// n := len(s) + len(l2)
nn := temp(types.Types[TINT])
l = append(l, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
// if uint(n) > uint(cap(s))
nif := nod(OIF, nil, nil)
nif.Left = nod(OGT, nod(OCONV, nn, nil), nod(OCONV, nod(OCAP, s, nil), nil))
nif.Left.Left.Type = types.Types[TUINT]
nif.Left.Right.Type = types.Types[TUINT]
// instantiate growslice(Type*, []any, int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem())
// s = growslice(T, s, n)
nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type.Elem()), s, nn)))
l = append(l, nif)
// s = s[:n]
nt := nod(OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
l = append(l, nod(OAS, s, nt))
if l1.Type.Elem().HasHeapPointer() {
// copy(s[len(l1):], l2)
nptr1 := nod(OSLICE, s, nil)
nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
nptr2 := l2
Curfn.Func.setWBPos(n.Pos)
fn := syslook("typedslicecopy")
fn = substArgTypes(fn, l1.Type, l2.Type)
var ln Nodes
ln.Set(l)
nt := mkcall1(fn, types.Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2)
l = append(ln.Slice(), nt)
} else if instrumenting && !compiling_runtime {
// rely on runtime to instrument copy.
// copy(s[len(l1):], l2)
nptr1 := nod(OSLICE, s, nil)
nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
nptr2 := l2
var ln Nodes
ln.Set(l)
var nt *Node
if l2.Type.IsString() {
fn := syslook("slicestringcopy")
fn = substArgTypes(fn, l1.Type, l2.Type)
nt = mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2)
} else {
fn := syslook("slicecopy")
fn = substArgTypes(fn, l1.Type, l2.Type)
nt = mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width))
}
l = append(ln.Slice(), nt)
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
nptr1.SetBounded(true)
nptr1 = nod(OADDR, nptr1, nil)
nptr2 := nod(OSPTR, l2, nil)
fn := syslook("memmove")
fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem())
var ln Nodes
ln.Set(l)
nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &ln)
nwid = nod(OMUL, nwid, nodintconst(s.Type.Elem().Width))
nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid)
l = append(ln.Slice(), nt)
}
typecheckslice(l, Etop)
walkstmtlist(l)
init.Append(l...)
return s
}
// isAppendOfMake reports whether n is of the form append(x , make([]T, y)...).
// isAppendOfMake assumes n has already been typechecked.
func isAppendOfMake(n *Node) bool {
if Debug['N'] != 0 || instrumenting {
return false
}
if n.Typecheck() == 0 {
Fatalf("missing typecheck: %+v", n)
}
if n.Op != OAPPEND || !n.Isddd() || n.List.Len() != 2 {
return false
}
second := n.List.Second()
if second.Op != OMAKESLICE || second.Right != nil {
return false
}
// y must be either an integer constant or a variable of type int.
// typecheck checks that constant arguments to make are not negative and
// fit into an int.
// runtime.growslice uses int as type for the newcap argument.
// Constraining variables to be type int avoids the need for runtime checks
// that e.g. check if an int64 value fits into an int.
// TODO(moehrmann): support other integer types that always fit in an int
y := second.Left
if !Isconst(y, CTINT) && y.Type.Etype != TINT {
return false
}
return true
}
// extendslice rewrites append(l1, make([]T, l2)...) to
// init {
// if l2 < 0 {
// panicmakeslicelen()
// }
// s := l1
// n := len(s) + l2
// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
// // cap is a positive int and n can become negative when len(s) + l2
// // overflows int. Interpreting n when negative as uint makes it larger
// // than cap(s). growslice will check the int n arg and panic if n is
// // negative. This prevents the overflow from being undetected.
// if uint(n) > uint(cap(s)) {
// s = growslice(T, s, n)
// }
// s = s[:n]
// lptr := &l1[0]
// sptr := &s[0]
// if lptr == sptr || !hasPointers(T) {
// // growslice did not clear the whole underlying array (or did not get called)
// hp := &s[len(l1)]
// hn := l2 * sizeof(T)
// memclr(hp, hn)
// }
// }
// s
func extendslice(n *Node, init *Nodes) *Node {
// isAppendOfMake made sure l2 fits in an int.
l2 := conv(n.List.Second().Left, types.Types[TINT])
l2 = typecheck(l2, Erv)
n.List.SetSecond(l2) // walkAppendArgs expects l2 in n.List.Second().
walkAppendArgs(n, init)
l1 := n.List.First()
l2 = n.List.Second() // re-read l2, as it may have been updated by walkAppendArgs
var nodes []*Node
// if l2 < 0
nifneg := nod(OIF, nod(OLT, l2, nodintconst(0)), nil)
nifneg.SetLikely(false)
// panicmakeslicelen()
nifneg.Nbody.Set1(mkcall("panicmakeslicelen", nil, init))
nodes = append(nodes, nifneg)
// s := l1
s := temp(l1.Type)
nodes = append(nodes, nod(OAS, s, l1))
elemtype := s.Type.Elem()
// n := len(s) + l2
nn := temp(types.Types[TINT])
nodes = append(nodes, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), l2)))
// if uint(n) > uint(cap(s))
nuint := conv(nn, types.Types[TUINT])
capuint := conv(nod(OCAP, s, nil), types.Types[TUINT])
nif := nod(OIF, nod(OGT, nuint, capuint), nil)
// instantiate growslice(typ *type, old []any, newcap int) []any
fn := syslook("growslice")
fn = substArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(elemtype), s, nn)))
nodes = append(nodes, nif)
// s = s[:n]
nt := nod(OSLICE, s, nil)
nt.SetSliceBounds(nil, nn, nil)
nodes = append(nodes, nod(OAS, s, nt))
// lptr := &l1[0]
l1ptr := temp(l1.Type.Elem().PtrTo())
tmp := nod(OSPTR, l1, nil)
nodes = append(nodes, nod(OAS, l1ptr, tmp))
// sptr := &s[0]
sptr := temp(elemtype.PtrTo())
tmp = nod(OSPTR, s, nil)
nodes = append(nodes, nod(OAS, sptr, tmp))
// hp := &s[len(l1)]
hp := nod(OINDEX, s, nod(OLEN, l1, nil))
hp.SetBounded(true)
hp = nod(OADDR, hp, nil)
hp = nod(OCONVNOP, hp, nil)
hp.Type = types.Types[TUNSAFEPTR]
// hn := l2 * sizeof(elem(s))
hn := nod(OMUL, l2, nodintconst(elemtype.Width))
hn = conv(hn, types.Types[TUINTPTR])
clrname := "memclrNoHeapPointers"
hasPointers := types.Haspointers(elemtype)
if hasPointers {
clrname = "memclrHasPointers"
}
var clr Nodes
clrfn := mkcall(clrname, nil, &clr, hp, hn)
clr.Append(clrfn)
if hasPointers {
// if l1ptr == sptr
nifclr := nod(OIF, nod(OEQ, l1ptr, sptr), nil)
nifclr.Nbody = clr
nodes = append(nodes, nifclr)
} else {
nodes = append(nodes, clr.Slice()...)
}
typecheckslice(nodes, Etop)
walkstmtlist(nodes)
init.Append(nodes...)
return s
}
// Rewrite append(src, x, y, z) so that any side effects in
// x, y, z (including runtime panics) are evaluated in
// initialization statements before the append.
// For normal code generation, stop there and leave the
// rest to cgen_append.
//
// For race detector, expand append(src, a [, b]* ) to
//
// init {
// s := src
// const argc = len(args) - 1
// if cap(s) - len(s) < argc {
// s = growslice(s, len(s)+argc)
// }
// n := len(s)
// s = s[:n+argc]
// s[n] = a
// s[n+1] = b
// ...
// }
// s
func walkappend(n *Node, init *Nodes, dst *Node) *Node {
if !samesafeexpr(dst, n.List.First()) {
n.List.SetFirst(safeexpr(n.List.First(), init))
n.List.SetFirst(walkexpr(n.List.First(), init))
}
walkexprlistsafe(n.List.Slice()[1:], init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
ls := n.List.Slice()[1:]
for i, n := range ls {
ls[i] = cheapexpr(n, init)
}
nsrc := n.List.First()
argc := n.List.Len() - 1
if argc < 1 {
return nsrc
}
// General case, with no function calls left as arguments.
// Leave for gen, except that instrumentation requires old form.
if !instrumenting || compiling_runtime {
return n
}
var l []*Node
ns := temp(nsrc.Type)
l = append(l, nod(OAS, ns, nsrc)) // s = src
na := nodintconst(int64(argc)) // const argc
nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc
nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
nx.Nbody.Set1(nod(OAS, ns,
mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
nod(OADD, nod(OLEN, ns, nil), na))))
l = append(l, nx)
nn := temp(types.Types[TINT])
l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
ls = n.List.Slice()[1:]
for i, n := range ls {
nx = nod(OINDEX, ns, nn) // s[n] ...
nx.SetBounded(true)
l = append(l, nod(OAS, nx, n)) // s[n] = arg
if i+1 < len(ls) {
l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
}
}
typecheckslice(l, Etop)
walkstmtlist(l)
init.Append(l...)
return ns
}
// Lower copy(a, b) to a memmove call or a runtime call.
//
// init {
// n := len(a)
// if n > len(b) { n = len(b) }
// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
// }
// n;
//
// Also works if b is a string.
//
func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
if n.Left.Type.Elem().HasHeapPointer() {
Curfn.Func.setWBPos(n.Pos)
fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right)
}
if runtimecall {
if n.Right.Type.IsString() {
fn := syslook("slicestringcopy")
fn = substArgTypes(fn, n.Left.Type, n.Right.Type)
return mkcall1(fn, n.Type, init, n.Left, n.Right)
}
fn := syslook("slicecopy")
fn = substArgTypes(fn, n.Left.Type, n.Right.Type)
return mkcall1(fn, n.Type, init, n.Left, n.Right, nodintconst(n.Left.Type.Elem().Width))
}
n.Left = walkexpr(n.Left, init)
n.Right = walkexpr(n.Right, init)
nl := temp(n.Left.Type)
nr := temp(n.Right.Type)
var l []*Node
l = append(l, nod(OAS, nl, n.Left))
l = append(l, nod(OAS, nr, n.Right))
nfrm := nod(OSPTR, nr, nil)
nto := nod(OSPTR, nl, nil)
nlen := temp(types.Types[TINT])
// n = len(to)
l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
// if n > len(frm) { n = len(frm) }
nif := nod(OIF, nil, nil)
nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
l = append(l, nif)
// if to.ptr != frm.ptr { memmove( ... ) }
ne := nod(OIF, nod(ONE, nto, nfrm), nil)
ne.SetLikely(true)
l = append(l, ne)
fn := syslook("memmove")
fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
nwid := temp(types.Types[TUINTPTR])
setwid := nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))
ne.Nbody.Append(setwid)
nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
call := mkcall1(fn, nil, init, nto, nfrm, nwid)
ne.Nbody.Append(call)
typecheckslice(l, Etop)
walkstmtlist(l)
init.Append(l...)
return nlen
}
func eqfor(t *types.Type) (n *Node, needsize bool) {
// Should only arrive here with large memory or
// a struct/array containing a non-memory field/element.
// Small memory is handled inline, and single non-memory
// is handled during type check (OCMPSTR etc).
switch a, _ := algtype1(t); a {
case AMEM:
n := syslook("memequal")
n = substArgTypes(n, t, t)
return n, true
case ASPECIAL:
sym := typesymprefix(".eq", t)
n := newname(sym)
n.SetClass(PFUNC)
n.Type = functype(nil, []*Node{
anonfield(types.NewPtr(t)),
anonfield(types.NewPtr(t)),
}, []*Node{
anonfield(types.Types[TBOOL]),
})
return n, false
}
Fatalf("eqfor %v", t)
return nil, false
}
// The result of walkcompare MUST be assigned back to n, e.g.
// n.Left = walkcompare(n.Left, init)
func walkcompare(n *Node, init *Nodes) *Node {
// Given interface value l and concrete value r, rewrite
// l == r
// into types-equal && data-equal.
// This is efficient, avoids allocations, and avoids runtime calls.
var l, r *Node
if n.Left.Type.IsInterface() && !n.Right.Type.IsInterface() {
l = n.Left
r = n.Right
} else if !n.Left.Type.IsInterface() && n.Right.Type.IsInterface() {
l = n.Right
r = n.Left
}
if l != nil {
// Handle both == and !=.
eq := n.Op
var andor Op
if eq == OEQ {
andor = OANDAND
} else {
andor = OOROR
}
// Check for types equal.
// For empty interface, this is:
// l.tab == type(r)
// For non-empty interface, this is:
// l.tab != nil && l.tab._type == type(r)
var eqtype *Node
tab := nod(OITAB, l, nil)
rtyp := typename(r.Type)
if l.Type.IsEmptyInterface() {
tab.Type = types.NewPtr(types.Types[TUINT8])
tab.SetTypecheck(1)
eqtype = nod(eq, tab, rtyp)
} else {
nonnil := nod(brcom(eq), nodnil(), tab)
match := nod(eq, itabType(tab), rtyp)
eqtype = nod(andor, nonnil, match)
}
// Check for data equal.
eqdata := nod(eq, ifaceData(l, r.Type), r)
// Put it all together.
expr := nod(andor, eqtype, eqdata)
n = finishcompare(n, expr, init)
return n
}
// Must be comparison of array or struct.
// Otherwise back end handles it.
// While we're here, decide whether to
// inline or call an eq alg.
t := n.Left.Type
var inline bool
maxcmpsize := int64(4)
unalignedLoad := canMergeLoads()
if unalignedLoad {
// Keep this low enough to generate less code than a function call.
maxcmpsize = 2 * int64(thearch.LinkArch.RegSize)
}
switch t.Etype {
default:
return n
case TARRAY:
// We can compare several elements at once with 2/4/8 byte integer compares
inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
case TSTRUCT:
inline = t.NumComponents(types.IgnoreBlankFields) <= 4
}
cmpl := n.Left
for cmpl != nil && cmpl.Op == OCONVNOP {
cmpl = cmpl.Left
}
cmpr := n.Right
for cmpr != nil && cmpr.Op == OCONVNOP {
cmpr = cmpr.Left
}
// Chose not to inline. Call equality function directly.
if !inline {
if isvaluelit(cmpl) {
var_ := temp(cmpl.Type)
anylit(cmpl, var_, init)
cmpl = var_
}
if isvaluelit(cmpr) {
var_ := temp(cmpr.Type)
anylit(cmpr, var_, init)
cmpr = var_
}
if !islvalue(cmpl) || !islvalue(cmpr) {
Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
}
// eq algs take pointers
pl := temp(types.NewPtr(t))
al := nod(OAS, pl, nod(OADDR, cmpl, nil))
al = typecheck(al, Etop)
init.Append(al)
pr := temp(types.NewPtr(t))
ar := nod(OAS, pr, nod(OADDR, cmpr, nil))
ar = typecheck(ar, Etop)
init.Append(ar)
fn, needsize := eqfor(t)
call := nod(OCALL, fn, nil)
call.List.Append(pl)
call.List.Append(pr)
if needsize {
call.List.Append(nodintconst(t.Width))
}
res := call
if n.Op != OEQ {
res = nod(ONOT, res, nil)
}
n = finishcompare(n, res, init)
return n
}
// inline: build boolean expression comparing element by element
andor := OANDAND
if n.Op == ONE {
andor = OOROR
}
var expr *Node
compare := func(el, er *Node) {
a := nod(n.Op, el, er)
if expr == nil {
expr = a
} else {
expr = nod(andor, expr, a)
}
}
cmpl = safeexpr(cmpl, init)
cmpr = safeexpr(cmpr, init)
if t.IsStruct() {
for _, f := range t.Fields().Slice() {
sym := f.Sym
if sym.IsBlank() {
continue
}
compare(
nodSym(OXDOT, cmpl, sym),
nodSym(OXDOT, cmpr, sym),
)
}
} else {
step := int64(1)
remains := t.NumElem() * t.Elem().Width
combine64bit := unalignedLoad && Widthreg == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
for i := int64(0); remains > 0; {
var convType *types.Type
switch {
case remains >= 8 && combine64bit:
convType = types.Types[TINT64]
step = 8 / t.Elem().Width
case remains >= 4 && combine32bit:
convType = types.Types[TUINT32]
step = 4 / t.Elem().Width
case remains >= 2 && combine16bit:
convType = types.Types[TUINT16]
step = 2 / t.Elem().Width
default:
step = 1
}
if step == 1 {
compare(
nod(OINDEX, cmpl, nodintconst(i)),
nod(OINDEX, cmpr, nodintconst(i)),
)
i++
remains -= t.Elem().Width
} else {
elemType := t.Elem().ToUnsigned()
cmplw := nod(OINDEX, cmpl, nodintconst(i))
cmplw = conv(cmplw, elemType) // convert to unsigned
cmplw = conv(cmplw, convType) // widen
cmprw := nod(OINDEX, cmpr, nodintconst(i))
cmprw = conv(cmprw, elemType)
cmprw = conv(cmprw, convType)
// For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
// ssa will generate a single large load.
for offset := int64(1); offset < step; offset++ {
lb := nod(OINDEX, cmpl, nodintconst(i+offset))
lb = conv(lb, elemType)
lb = conv(lb, convType)
lb = nod(OLSH, lb, nodintconst(8*t.Elem().Width*offset))
cmplw = nod(OOR, cmplw, lb)
rb := nod(OINDEX, cmpr, nodintconst(i+offset))
rb = conv(rb, elemType)
rb = conv(rb, convType)
rb = nod(OLSH, rb, nodintconst(8*t.Elem().Width*offset))
cmprw = nod(OOR, cmprw, rb)
}
compare(cmplw, cmprw)
i += step
remains -= step * t.Elem().Width
}
}
}
if expr == nil {
expr = nodbool(n.Op == OEQ)
}
n = finishcompare(n, expr, init)
return n
}
// The result of finishcompare MUST be assigned back to n, e.g.
// n.Left = finishcompare(n.Left, x, r, init)
func finishcompare(n, r *Node, init *Nodes) *Node {
r = typecheck(r, Erv)
r = conv(r, n.Type)
r = walkexpr(r, init)
return r
}
// isIntOrdering reports whether n is a <, โค, >, or โฅ ordering between integers.
func (n *Node) isIntOrdering() bool {
switch n.Op {
case OLE, OLT, OGE, OGT:
default:
return false
}
return n.Left.Type.IsInteger() && n.Right.Type.IsInteger()
}
// walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10.
// n must be an OANDAND or OOROR node.
// The result of walkinrange MUST be assigned back to n, e.g.
// n.Left = walkinrange(n.Left)
func walkinrange(n *Node, init *Nodes) *Node {
// We are looking for something equivalent to a opl b OP b opr c, where:
// * a, b, and c have integer type
// * b is side-effect-free
// * opl and opr are each < or โค
// * OP is &&
l := n.Left
r := n.Right
if !l.isIntOrdering() || !r.isIntOrdering() {
return n
}
// Find b, if it exists, and rename appropriately.
// Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right
// Output is: a opl b(==x) ANDAND/OROR b(==x) opr c
a, opl, b := l.Left, l.Op, l.Right
x, opr, c := r.Left, r.Op, r.Right
for i := 0; ; i++ {
if samesafeexpr(b, x) {
break
}
if i == 3 {
// Tried all permutations and couldn't find an appropriate b == x.
return n
}
if i&1 == 0 {
a, opl, b = b, brrev(opl), a
} else {
x, opr, c = c, brrev(opr), x
}
}
// If n.Op is ||, apply de Morgan.
// Negate the internal ops now; we'll negate the top level op at the end.
// Henceforth assume &&.
negateResult := n.Op == OOROR
if negateResult {
opl = brcom(opl)
opr = brcom(opr)
}
cmpdir := func(o Op) int {
switch o {
case OLE, OLT:
return -1
case OGE, OGT:
return +1
}
Fatalf("walkinrange cmpdir %v", o)
return 0
}
if cmpdir(opl) != cmpdir(opr) {
// Not a range check; something like b < a && b < c.
return n
}
switch opl {
case OGE, OGT:
// We have something like a > b && b โฅ c.
// Switch and reverse ops and rename constants,
// to make it look like a โค b && b < c.
a, c = c, a
opl, opr = brrev(opr), brrev(opl)
}
// We must ensure that c-a is non-negative.
// For now, require a and c to be constants.
// In the future, we could also support a == 0 and c == len/cap(...).
// Unfortunately, by this point, most len/cap expressions have been
// stored into temporary variables.
if !Isconst(a, CTINT) || !Isconst(c, CTINT) {
return n
}
if opl == OLT {
// We have a < b && ...
// We need a โค b && ... to safely use unsigned comparison tricks.
// If a is not the maximum constant for b's type,
// we can increment a and switch to โค.
if a.Int64() >= maxintval[b.Type.Etype].Int64() {
return n
}
a = nodintconst(a.Int64() + 1)
opl = OLE
}
bound := c.Int64() - a.Int64()
if bound < 0 {
// Bad news. Something like 5 <= x && x < 3.
// Rare in practice, and we still need to generate side-effects,
// so just leave it alone.
return n
}
// We have a โค b && b < c (or a โค b && b โค c).
// This is equivalent to (a-a) โค (b-a) && (b-a) < (c-a),
// which is equivalent to 0 โค (b-a) && (b-a) < (c-a),
// which is equivalent to uint(b-a) < uint(c-a).
ut := b.Type.ToUnsigned()
lhs := conv(nod(OSUB, b, a), ut)
rhs := nodintconst(bound)
if negateResult {
// Negate top level.
opr = brcom(opr)
}
cmp := nod(opr, lhs, rhs)
cmp.Pos = n.Pos
cmp = addinit(cmp, l.Ninit.Slice())
cmp = addinit(cmp, r.Ninit.Slice())
// Typecheck the AST rooted at cmp...
cmp = typecheck(cmp, Erv)
// ...but then reset cmp's type to match n's type.
cmp.Type = n.Type
cmp = walkexpr(cmp, init)
return cmp
}
// return 1 if integer n must be in range [0, max), 0 otherwise
func bounded(n *Node, max int64) bool {
if n.Type == nil || !n.Type.IsInteger() {
return false
}
sign := n.Type.IsSigned()
bits := int32(8 * n.Type.Width)
if smallintconst(n) {
v := n.Int64()
return 0 <= v && v < max
}
switch n.Op {
case OAND:
v := int64(-1)
if smallintconst(n.Left) {
v = n.Left.Int64()
} else if smallintconst(n.Right) {
v = n.Right.Int64()
}
if 0 <= v && v < max {
return true
}
case OMOD:
if !sign && smallintconst(n.Right) {
v := n.Right.Int64()
if 0 <= v && v <= max {
return true
}
}
case ODIV:
if !sign && smallintconst(n.Right) {
v := n.Right.Int64()
for bits > 0 && v >= 2 {
bits--
v >>= 1
}
}
case ORSH:
if !sign && smallintconst(n.Right) {
v := n.Right.Int64()
if v > int64(bits) {
return true
}
bits -= int32(v)
}
}
if !sign && bits <= 62 && 1<<uint(bits) <= max {
return true
}
return false
}
// usemethod checks interface method calls for uses of reflect.Type.Method.
func usemethod(n *Node) {
t := n.Left.Type
// Looking for either of:
// Method(int) reflect.Method
// MethodByName(string) (reflect.Method, bool)
//
// TODO(crawshaw): improve precision of match by working out
// how to check the method name.
if n := t.NumParams(); n != 1 {
return
}
if n := t.NumResults(); n != 1 && n != 2 {
return
}
p0 := t.Params().Field(0)
res0 := t.Results().Field(0)
var res1 *types.Field
if t.NumResults() == 2 {
res1 = t.Results().Field(1)
}
if res1 == nil {
if p0.Type.Etype != TINT {
return
}
} else {
if !p0.Type.IsString() {
return
}
if !res1.Type.IsBoolean() {
return
}
}
// Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
// (including global variables such as numImports - was issue #19028).
if s := res0.Type.Sym; s != nil && s.Name == "Method" && s.Pkg != nil && s.Pkg.Path == "reflect" {
Curfn.Func.SetReflectMethod(true)
}
}
func usefield(n *Node) {
if objabi.Fieldtrack_enabled == 0 {
return
}
switch n.Op {
default:
Fatalf("usefield %v", n.Op)
case ODOT, ODOTPTR:
break
}
if n.Sym == nil {
// No field name. This DOTPTR was built by the compiler for access
// to runtime data structures. Ignore.
return
}
t := n.Left.Type
if t.IsPtr() {
t = t.Elem()
}
field := dotField[typeSymKey{t.Orig, n.Sym}]
if field == nil {
Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
}
if !strings.Contains(field.Note, "go:\"track\"") {
return
}
outer := n.Left.Type
if outer.IsPtr() {
outer = outer.Elem()
}
if outer.Sym == nil {
yyerror("tracked field must be in named struct type")
}
if !types.IsExported(field.Sym.Name) {
yyerror("tracked field must be exported (upper case)")
}
sym := tracksym(outer, field)
if Curfn.Func.FieldTrack == nil {
Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
}
Curfn.Func.FieldTrack[sym] = struct{}{}
}
func candiscardlist(l Nodes) bool {
for _, n := range l.Slice() {
if !candiscard(n) {
return false
}
}
return true
}
func candiscard(n *Node) bool {
if n == nil {
return true
}
switch n.Op {
default:
return false
// Discardable as long as the subpieces are.
case ONAME,
ONONAME,
OTYPE,
OPACK,
OLITERAL,
OADD,
OSUB,
OOR,
OXOR,
OADDSTR,
OADDR,
OANDAND,
OARRAYBYTESTR,
OARRAYRUNESTR,
OSTRARRAYBYTE,
OSTRARRAYRUNE,
OCAP,
OCMPIFACE,
OCMPSTR,
OCOMPLIT,
OMAPLIT,
OSTRUCTLIT,
OARRAYLIT,
OSLICELIT,
OPTRLIT,
OCONV,
OCONVIFACE,
OCONVNOP,
ODOT,
OEQ,
ONE,
OLT,
OLE,
OGT,
OGE,
OKEY,
OSTRUCTKEY,
OLEN,
OMUL,
OLSH,
ORSH,
OAND,
OANDNOT,
ONEW,
ONOT,
OCOM,
OPLUS,
OMINUS,
OOROR,
OPAREN,
ORUNESTR,
OREAL,
OIMAG,
OCOMPLEX:
break
// Discardable as long as we know it's not division by zero.
case ODIV, OMOD:
if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
break
}
if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
break
}
return false
// Discardable as long as we know it won't fail because of a bad size.
case OMAKECHAN, OMAKEMAP:
if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
break
}
return false
// Difficult to tell what sizes are okay.
case OMAKESLICE:
return false
}
if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
return false
}
return true
}
// Rewrite
// go builtin(x, y, z)
// into
// go func(a1, a2, a3) {
// builtin(a1, a2, a3)
// }(x, y, z)
// for print, println, and delete.
var wrapCall_prgen int
// The result of wrapCall MUST be assigned back to n, e.g.
// n.Left = wrapCall(n.Left, init)
func wrapCall(n *Node, init *Nodes) *Node {
if n.Ninit.Len() != 0 | tutes the given list of types for
// successive occurrences of the "any" placeholder in the
// type syntax expression n.Type.
// The result of substArgTypes MUST be assigned back to old, e.g.
// n.Left = substArgTypes(n.Left, t1, t2)
func substArgTypes(old *Node, types_ ...*types.Type) *Node {
n := old.copy() // make shallow copy
for _, t := range types_ {
dowidth(t)
}
n.Type = types.SubstAny(n.Type, &types_)
if len(types_) > 0 {
Fatalf("substArgTypes: too many argument types")
}
return n
}
// canMergeLoads reports whether the backend optimization passes for
// the current architecture can combine adjacent loads into a single
// larger, possibly unaligned, load. Note that currently the
// optimizations must be able to handle little endian byte order.
func canMergeLoads() bool {
switch thearch.LinkArch.Family {
case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
return true
case sys.PPC64:
// Load combining only supported on ppc64le.
return thearch.LinkArch.ByteOrder == binary.LittleEndian
}
return false
}
// isRuneCount reports whether n is of the form len([]rune(string)).
// These are optimized into a call to runtime.countrunes.
func isRuneCount(n *Node) bool {
return Debug['N'] == 0 && !instrumenting && n.Op == OLEN && n.Left.Op == OSTRARRAYRUNE
}
| {
walkstmtlist(n.Ninit.Slice())
init.AppendNodes(&n.Ninit)
}
t := nod(OTFUNC, nil, nil)
for i, arg := range n.List.Slice() {
s := lookupN("a", i)
t.List.Append(symfield(s, arg.Type))
}
wrapCall_prgen++
sym := lookupN("wrapยท", wrapCall_prgen)
fn := dclfunc(sym, t)
a := nod(n.Op, nil, nil)
a.List.Set(paramNnames(t.Type))
a = typecheck(a, Etop)
fn.Nbody.Set1(a)
funcbody()
fn = typecheck(fn, Etop)
typecheckslice(fn.Nbody.Slice(), Etop)
xtop = append(xtop, fn)
a = nod(OCALL, nil, nil)
a.Left = fn.Func.Nname
a.List.Set(n.List.Slice())
a = typecheck(a, Etop)
a = walkexpr(a, init)
return a
}
// substArgTypes substi |
app.module.ts | import { Module } from '@nestjs/common'
import { ViewModule } from '~server/modules/view/view.module'
import { DummyModule } from '~server/modules/dummy/dummy.module'
@Module({
imports: [DummyModule, ViewModule],
controllers: [], | })
export class AppModule {} | providers: [] |
messengerdestinationpagewelcomemessage.py | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class MessengerDestinationPageWelcomeMessage(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isMessengerDestinationPageWelcomeMessage = True
super(MessengerDestinationPageWelcomeMessage, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
|
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=MessengerDestinationPageWelcomeMessage,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'id': 'string',
'page_welcome_message_body': 'string',
'page_welcome_message_type': 'string',
'template_name': 'string',
'time_created': 'datetime',
'time_last_used': 'datetime',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| id = 'id'
page_welcome_message_body = 'page_welcome_message_body'
page_welcome_message_type = 'page_welcome_message_type'
template_name = 'template_name'
time_created = 'time_created'
time_last_used = 'time_last_used' |
commands.rs | use super::SystemId;
use crate::{
resource::{Resource, Resources},
Bundle, Component, ComponentError, DynamicBundle, Entity, EntityReserver, World,
};
use bevy_utils::tracing::{debug, warn};
use std::marker::PhantomData;
/// A [World] mutation
pub trait Command: Send + Sync {
fn write(self: Box<Self>, world: &mut World, resources: &mut Resources);
}
#[derive(Debug)]
pub(crate) struct Spawn<T>
where
T: DynamicBundle + Send + Sync + 'static,
{
bundle: T,
}
impl<T> Command for Spawn<T>
where
T: DynamicBundle + Send + Sync + 'static,
{
fn write(self: Box<Self>, world: &mut World, _resources: &mut Resources) {
world.spawn(self.bundle);
}
}
pub(crate) struct SpawnBatch<I>
where
I: IntoIterator,
I::Item: Bundle,
{
bundles_iter: I,
}
impl<I> Command for SpawnBatch<I>
where
I: IntoIterator + Send + Sync,
I::Item: Bundle,
{
fn write(self: Box<Self>, world: &mut World, _resources: &mut Resources) {
world.spawn_batch(self.bundles_iter);
}
}
#[derive(Debug)]
pub(crate) struct Despawn {
entity: Entity,
}
impl Command for Despawn {
fn write(self: Box<Self>, world: &mut World, _resources: &mut Resources) {
if let Err(e) = world.despawn(self.entity) {
debug!("Failed to despawn entity {:?}: {}", self.entity, e);
}
}
}
pub struct Insert<T>
where
T: DynamicBundle + Send + Sync + 'static,
{
entity: Entity,
bundle: T,
}
impl<T> Command for Insert<T>
where
T: DynamicBundle + Send + Sync + 'static,
{
fn write(self: Box<Self>, world: &mut World, _resources: &mut Resources) {
world.insert(self.entity, self.bundle).unwrap();
}
}
#[derive(Debug)]
pub(crate) struct InsertOne<T>
where
T: Component,
{
entity: Entity,
component: T,
}
impl<T> Command for InsertOne<T>
where
T: Component,
{
fn write(self: Box<Self>, world: &mut World, _resources: &mut Resources) {
world.insert(self.entity, (self.component,)).unwrap();
}
}
#[derive(Debug)]
pub(crate) struct RemoveOne<T>
where
T: Component,
{
entity: Entity,
phantom: PhantomData<T>,
}
impl<T> Command for RemoveOne<T>
where
T: Component,
{
fn write(self: Box<Self>, world: &mut World, _resources: &mut Resources) {
if world.get::<T>(self.entity).is_ok() {
world.remove_one::<T>(self.entity).unwrap();
}
}
}
#[derive(Debug)]
pub(crate) struct Remove<T>
where
T: Bundle + Send + Sync + 'static,
{
entity: Entity,
phantom: PhantomData<T>,
}
impl<T> Command for Remove<T>
where
T: Bundle + Send + Sync + 'static,
{
fn write(self: Box<Self>, world: &mut World, _resources: &mut Resources) {
match world.remove::<T>(self.entity) {
Ok(_) => (),
Err(ComponentError::MissingComponent(e)) => {
warn!(
"Failed to remove components {:?} with error: {}. Falling back to inefficient one-by-one component removing.",
std::any::type_name::<T>(),
e
);
if let Err(e) = world.remove_one_by_one::<T>(self.entity) {
debug!(
"Failed to remove components {:?} with error: {}",
std::any::type_name::<T>(),
e
);
}
}
Err(e) => {
debug!(
"Failed to remove components {:?} with error: {}",
std::any::type_name::<T>(),
e
);
}
}
}
}
pub struct InsertResource<T: Resource> {
resource: T,
}
impl<T: Resource> Command for InsertResource<T> {
fn write(self: Box<Self>, _world: &mut World, resources: &mut Resources) {
resources.insert(self.resource);
}
}
#[derive(Debug)]
pub(crate) struct InsertLocalResource<T: Resource> {
resource: T,
system_id: SystemId,
}
impl<T: Resource> Command for InsertLocalResource<T> {
fn write(self: Box<Self>, _world: &mut World, resources: &mut Resources) {
resources.insert_local(self.system_id, self.resource);
}
}
/// A list of commands that will be run to populate a `World` and `Resources`.
#[derive(Default)]
pub struct Commands {
commands: Vec<Box<dyn Command>>,
current_entity: Option<Entity>,
entity_reserver: Option<EntityReserver>,
}
impl Commands {
/// Creates a new entity with the components contained in `bundle`.
///
/// Note that `bundle` is a [DynamicBundle], which is a collection of components. [DynamicBundle] is automatically implemented for tuples of components. You can also create your own bundle types by deriving [`derive@Bundle`]. If you would like to spawn an entity with a single component, consider wrapping the component in a tuple (which [DynamicBundle] is implemented for).
///
/// See [`Self::set_current_entity`], [`Self::insert`].
///
/// # Example
///
/// ```
/// use bevy_ecs::prelude::*;
///
/// struct Component1;
/// struct Component2;
///
/// #[derive(Bundle)]
/// struct ExampleBundle {
/// a: Component1,
/// b: Component2,
/// }
///
/// fn example_system(mut commands: &mut Commands) {
/// // Create a new entity with a component bundle.
/// commands.spawn(ExampleBundle {
/// a: Component1,
/// b: Component2,
/// });
///
/// // Create a new entity with a single component.
/// commands.spawn((Component1,));
/// // Create a new entity with two components.
/// commands.spawn((Component1, Component2));
/// }
/// ```
pub fn spawn(&mut self, bundle: impl DynamicBundle + Send + Sync + 'static) -> &mut Self {
let entity = self
.entity_reserver
.as_ref()
.expect("Entity reserver has not been set.")
.reserve_entity();
self.set_current_entity(entity);
self.insert(entity, bundle);
self
}
/// Equivalent to iterating `bundles_iter` and calling [`Self::spawn`] on each bundle, but slightly more performant.
pub fn spawn_batch<I>(&mut self, bundles_iter: I) -> &mut Self
where
I: IntoIterator + Send + Sync + 'static,
I::Item: Bundle,
{
self.add_command(SpawnBatch { bundles_iter })
}
/// Despawns only the specified entity, not including its children.
pub fn despawn(&mut self, entity: Entity) -> &mut Self {
self.add_command(Despawn { entity })
}
/// Inserts a bundle of components into `entity`.
///
/// See [`World::insert`].
pub fn insert(
&mut self,
entity: Entity,
bundle: impl DynamicBundle + Send + Sync + 'static,
) -> &mut Self {
self.add_command(Insert { entity, bundle })
}
/// Inserts a single component into `entity`.
///
/// See [`World::insert_one`].
pub fn insert_one(&mut self, entity: Entity, component: impl Component) -> &mut Self {
self.add_command(InsertOne { entity, component })
}
pub fn insert_resource<T: Resource>(&mut self, resource: T) -> &mut Self {
self.add_command(InsertResource { resource })
}
/// Insert a resource that is local to a specific system.
///
/// See [`crate::System::id`].
pub fn insert_local_resource<T: Resource>(
&mut self,
system_id: SystemId,
resource: T,
) -> &mut Self {
self.add_command(InsertLocalResource {
system_id,
resource,
})
}
/// See [`World::remove_one`].
pub fn remove_one<T>(&mut self, entity: Entity) -> &mut Self
where
T: Component,
{
self.add_command(RemoveOne::<T> {
entity,
phantom: PhantomData,
})
}
/// See [`World::remove`].
pub fn remove<T>(&mut self, entity: Entity) -> &mut Self
where
T: Bundle + Send + Sync + 'static,
{
self.add_command(Remove::<T> {
entity,
phantom: PhantomData,
})
}
/// Adds a bundle of components to the current entity.
///
/// See [`Self::with`], [`Self::current_entity`].
pub fn with_bundle(&mut self, bundle: impl DynamicBundle + Send + Sync + 'static) -> &mut Self {
let current_entity = self.current_entity.expect("Cannot add bundle because the 'current entity' is not set. You should spawn an entity first.");
self.commands.push(Box::new(Insert {
entity: current_entity,
bundle,
}));
self
}
/// Adds a single component to the current entity.
///
/// See [`Self::with_bundle`], [`Self::current_entity`].
///
/// # Warning
///
/// It's possible to call this with a bundle, but this is likely not intended and [`Self::with_bundle`] should be used instead. If `with` is called with a bundle, the bundle itself will be added as a component instead of the bundles' inner components each being added.
///
/// # Example
///
/// `with` can be chained with [`Self::spawn`].
///
/// ```
/// use bevy_ecs::prelude::*;
///
/// struct Component1;
/// struct Component2;
///
/// fn example_system(mut commands: Commands) {
/// // Create a new entity with a `Component1` and `Component2`.
/// commands.spawn((Component1,)).with(Component2);
///
/// // Psst! These are also equivalent to the line above!
/// commands.spawn((Component1, Component2));
/// commands.spawn(()).with(Component1).with(Component2);
/// #[derive(Bundle)]
/// struct ExampleBundle {
/// a: Component1,
/// b: Component2,
/// }
/// commands.spawn(()).with_bundle(ExampleBundle {
/// a: Component1,
/// b: Component2,
/// });
/// }
/// ```
pub fn with(&mut self, component: impl Component) -> &mut Self |
/// Adds a command directly to the command list. Prefer this to [`Self::add_command_boxed`] if the type of `command` is statically known.
pub fn add_command<C: Command + 'static>(&mut self, command: C) -> &mut Self {
self.commands.push(Box::new(command));
self
}
/// See [`Self::add_command`].
pub fn add_command_boxed(&mut self, command: Box<dyn Command>) -> &mut Self {
self.commands.push(command);
self
}
/// Runs all the stored commands on `world` and `resources`. The command buffer is emptied as a part of this call.
pub fn apply(&mut self, world: &mut World, resources: &mut Resources) {
for command in self.commands.drain(..) {
command.write(world, resources);
}
}
/// Returns the current entity, set by [`Self::spawn`] or with [`Self::set_current_entity`].
pub fn current_entity(&self) -> Option<Entity> {
self.current_entity
}
pub fn set_current_entity(&mut self, entity: Entity) {
self.current_entity = Some(entity);
}
pub fn clear_current_entity(&mut self) {
self.current_entity = None;
}
pub fn for_current_entity(&mut self, f: impl FnOnce(Entity)) -> &mut Self {
let current_entity = self
.current_entity
.expect("The 'current entity' is not set. You should spawn an entity first.");
f(current_entity);
self
}
pub fn set_entity_reserver(&mut self, entity_reserver: EntityReserver) {
self.entity_reserver = Some(entity_reserver);
}
}
#[cfg(test)]
mod tests {
use crate::{resource::Resources, Commands, World};
#[test]
fn command_buffer() {
let mut world = World::default();
let mut resources = Resources::default();
let mut command_buffer = Commands::default();
command_buffer.set_entity_reserver(world.get_entity_reserver());
command_buffer.spawn((1u32, 2u64));
let entity = command_buffer.current_entity().unwrap();
command_buffer.insert_resource(3.14f32);
command_buffer.apply(&mut world, &mut resources);
let results = world
.query::<(&u32, &u64)>()
.map(|(a, b)| (*a, *b))
.collect::<Vec<_>>();
assert_eq!(results, vec![(1u32, 2u64)]);
assert_eq!(*resources.get::<f32>().unwrap(), 3.14f32);
// test entity despawn
command_buffer.despawn(entity);
command_buffer.despawn(entity); // double despawn shouldn't panic
command_buffer.apply(&mut world, &mut resources);
let results2 = world
.query::<(&u32, &u64)>()
.map(|(a, b)| (*a, *b))
.collect::<Vec<_>>();
assert_eq!(results2, vec![]);
}
#[test]
fn remove_components() {
let mut world = World::default();
let mut resources = Resources::default();
let mut command_buffer = Commands::default();
command_buffer.set_entity_reserver(world.get_entity_reserver());
command_buffer.spawn((1u32, 2u64));
let entity = command_buffer.current_entity().unwrap();
command_buffer.apply(&mut world, &mut resources);
let results_before = world
.query::<(&u32, &u64)>()
.map(|(a, b)| (*a, *b))
.collect::<Vec<_>>();
assert_eq!(results_before, vec![(1u32, 2u64)]);
// test component removal
command_buffer.remove_one::<u32>(entity);
command_buffer.remove::<(u32, u64)>(entity);
command_buffer.apply(&mut world, &mut resources);
let results_after = world
.query::<(&u32, &u64)>()
.map(|(a, b)| (*a, *b))
.collect::<Vec<_>>();
assert_eq!(results_after, vec![]);
let results_after_u64 = world.query::<&u64>().map(|a| *a).collect::<Vec<_>>();
assert_eq!(results_after_u64, vec![]);
}
}
| {
let current_entity = self.current_entity.expect("Cannot add component because the 'current entity' is not set. You should spawn an entity first.");
self.commands.push(Box::new(InsertOne {
entity: current_entity,
component,
}));
self
} |
test_meetup.py | # # Third party imports
# import unittest
# import json
# # Local imports
# from app import create_app
# class TestMeetups(unittest.TestCase):
# def setUp(self):
# self.app = create_app("testing")
# self.client = self.app.test_client()
# self.meetup_incomplete ={
# "topic" : "Programming"
# }
# self.meetup_complete ={
# "id": "1",
# "topic" : "Udacity welcom",
# "location" : "San Fransisco or remotely via zoom",
# "happeningOn" : "Tommorow"
# }
# # Test validity of json data in request
# def test_post_meetup(self):
# response = self.client.post('api/v1/meetups')
# result = json.loads(response.data)
# self.assertEqual(result["message"],"Only Application/JSON input expected")
# self.assertEqual(response.status_code, 400)
# # Test empty fields
# def test_post_empty_meetup(self):
# response = self.client.post('api/v1/meetups',data=json.dumps(self.meetup_incomplete),
# content_type="application/json")
# result = json.loads(response.data)
# self.assertEqual(result["message"],"All fields must be populated with data")
# self.assertEqual(response.status_code, 400)
# # Test valid input for meetup creation
# def test_post_meetup_success(self):
# response = self.client.post('api/v1/meetups', data=json.dumps(self.meetup_complete),
# content_type="application/json")
# result = json.loads(response.data)
# self.assertEqual(result["message"],"Meetup created succesfully")
# self.assertEqual(response.status_code, 201)
import unittest
import json
from app import create_app
| def setUp(self):
self.app = create_app("testing")
self.client = self.app.test_client()
def create_record(self):
response = self.client.post('/api/v1/meetups',
data=json.dumps({
"title": "Football",
"description": "Playing football on 25th",
"date": "25th of November",
"location": "Kasarani"
}),
headers={"content-type": "application/json"})
return response
# Test meetups creation
def test_01_post_meetups(self):
response = self.create_record()
self.assertEqual(response.status_code, 201)
# Test for fetching all meetup records
def test_02_get_all(self):
response = self.client.get('/api/v1/meetups',
headers={"content-type": "application/json"})
self.assertEqual(response.status_code, 200)
# Test for getting a specific meetup record
def test_03_get_specific(self):
self.create_record()
response = self.client.get('/api/v1/meetups/1',
headers={"content-type": "application/json"})
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.status_code, 404) | class TestMeetups(unittest.TestCase):
|
run-testing.py | import cv2
import numpy as np
import torch
from path import Path
from tqdm import tqdm
from dvmvs.config import Config
from dvmvs.dataset_loader import PreprocessImage, load_image
from dvmvs.pairnet.model import FeatureExtractor, FeatureShrinker, CostVolumeEncoder, CostVolumeDecoder
from dvmvs.utils import cost_volume_fusion, save_results, visualize_predictions, InferenceTimer, get_warp_grid_for_cost_volume_calculation
def | ():
print("System: PAIRNET")
device = torch.device("cuda")
feature_extractor = FeatureExtractor()
feature_shrinker = FeatureShrinker()
cost_volume_encoder = CostVolumeEncoder()
cost_volume_decoder = CostVolumeDecoder()
feature_extractor = feature_extractor.to(device)
feature_shrinker = feature_shrinker.to(device)
cost_volume_encoder = cost_volume_encoder.to(device)
cost_volume_decoder = cost_volume_decoder.to(device)
model = [feature_extractor, feature_shrinker, cost_volume_encoder, cost_volume_decoder]
for i in range(len(model)):
try:
checkpoint = sorted(Path("weights").files())[i]
weights = torch.load(checkpoint)
model[i].load_state_dict(weights)
model[i].eval()
print("Loaded weights for", checkpoint)
except Exception as e:
print(e)
print("Could not find the checkpoint for module", i)
exit(1)
feature_extractor = model[0]
feature_shrinker = model[1]
cost_volume_encoder = model[2]
cost_volume_decoder = model[3]
warp_grid = get_warp_grid_for_cost_volume_calculation(width=int(Config.test_image_width / 2),
height=int(Config.test_image_height / 2),
device=device)
scale_rgb = 255.0
mean_rgb = [0.485, 0.456, 0.406]
std_rgb = [0.229, 0.224, 0.225]
min_depth = 0.25
max_depth = 20.0
n_depth_levels = 64
data_path = Path(Config.test_offline_data_path)
if Config.test_dataset_name is None:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files())
else:
keyframe_index_files = sorted((Path(Config.test_offline_data_path) / "indices").files("*" + Config.test_dataset_name + "*"))
for iteration, keyframe_index_file in enumerate(keyframe_index_files):
keyframing_type, dataset_name, scene_name, _, n_measurement_frames = keyframe_index_file.split("/")[-1].split("+")
scene_folder = data_path / dataset_name / scene_name
print("Predicting for scene:", dataset_name + "-" + scene_name, " - ", iteration, "/", len(keyframe_index_files))
keyframe_index_file_lines = np.loadtxt(keyframe_index_file, dtype=str, delimiter="\n")
K = np.loadtxt(scene_folder / 'K.txt').astype(np.float32)
poses = np.fromfile(scene_folder / "poses.txt", dtype=float, sep="\n ").reshape((-1, 4, 4))
image_filenames = sorted((scene_folder / 'images').files("*.png"))
depth_filenames = sorted((scene_folder / 'depth').files("*.png"))
input_filenames = []
for image_filename in image_filenames:
input_filenames.append(image_filename.split("/")[-1])
inference_timer = InferenceTimer()
predictions = []
reference_depths = []
with torch.no_grad():
for i in tqdm(range(0, len(keyframe_index_file_lines))):
keyframe_index_file_line = keyframe_index_file_lines[i]
if keyframe_index_file_line == "TRACKING LOST":
continue
else:
current_input_filenames = keyframe_index_file_line.split(" ")
current_indices = [input_filenames.index(current_input_filenames[x]) for x in range(len(current_input_filenames))]
reference_index = current_indices[0]
measurement_indices = current_indices[1:]
reference_pose = poses[reference_index]
reference_image = load_image(image_filenames[reference_index])
reference_depth = cv2.imread(depth_filenames[reference_index], -1).astype(float) / 1000.0
preprocessor = PreprocessImage(K=K,
old_width=reference_image.shape[1],
old_height=reference_image.shape[0],
new_width=Config.test_image_width,
new_height=Config.test_image_height,
distortion_crop=Config.test_distortion_crop,
perform_crop=Config.test_perform_crop)
reference_image = preprocessor.apply_rgb(image=reference_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
reference_depth = preprocessor.apply_depth(reference_depth)
reference_image_torch = torch.from_numpy(np.transpose(reference_image, (2, 0, 1))).float().to(device).unsqueeze(0)
reference_pose_torch = torch.from_numpy(reference_pose).float().to(device).unsqueeze(0)
measurement_poses_torch = []
measurement_images_torch = []
for measurement_index in measurement_indices:
measurement_image = load_image(image_filenames[measurement_index])
measurement_image = preprocessor.apply_rgb(image=measurement_image,
scale_rgb=scale_rgb,
mean_rgb=mean_rgb,
std_rgb=std_rgb)
measurement_image_torch = torch.from_numpy(np.transpose(measurement_image, (2, 0, 1))).float().to(device).unsqueeze(0)
measurement_pose_torch = torch.from_numpy(poses[measurement_index]).float().to(device).unsqueeze(0)
measurement_images_torch.append(measurement_image_torch)
measurement_poses_torch.append(measurement_pose_torch)
full_K_torch = torch.from_numpy(preprocessor.get_updated_intrinsics()).float().to(device).unsqueeze(0)
half_K_torch = full_K_torch.clone().cuda()
half_K_torch[:, 0:2, :] = half_K_torch[:, 0:2, :] / 2.0
inference_timer.record_start_time()
measurement_feature_halfs = []
for measurement_image_torch in measurement_images_torch:
measurement_feature_half, _, _, _ = feature_shrinker(*feature_extractor(measurement_image_torch))
measurement_feature_halfs.append(measurement_feature_half)
reference_feature_half, reference_feature_quarter, \
reference_feature_one_eight, reference_feature_one_sixteen = feature_shrinker(*feature_extractor(reference_image_torch))
cost_volume = cost_volume_fusion(image1=reference_feature_half,
image2s=measurement_feature_halfs,
pose1=reference_pose_torch,
pose2s=measurement_poses_torch,
K=half_K_torch,
warp_grid=warp_grid,
min_depth=min_depth,
max_depth=max_depth,
n_depth_levels=n_depth_levels,
device=device,
dot_product=True)
skip0, skip1, skip2, skip3, bottom = cost_volume_encoder(features_half=reference_feature_half,
features_quarter=reference_feature_quarter,
features_one_eight=reference_feature_one_eight,
features_one_sixteen=reference_feature_one_sixteen,
cost_volume=cost_volume)
prediction, _, _, _, _ = cost_volume_decoder(reference_image_torch, skip0, skip1, skip2, skip3, bottom)
inference_timer.record_end_time_and_elapsed_time()
prediction = prediction.cpu().numpy().squeeze()
reference_depths.append(reference_depth)
predictions.append(prediction)
if Config.test_visualize:
visualize_predictions(numpy_reference_image=reference_image,
numpy_measurement_image=measurement_image,
numpy_predicted_depth=prediction,
normalization_mean=mean_rgb,
normalization_std=std_rgb,
normalization_scale=scale_rgb)
inference_timer.print_statistics()
system_name = "{}_{}_{}_{}_{}_dvmvs_pairnet".format(keyframing_type,
dataset_name,
Config.test_image_width,
Config.test_image_height,
n_measurement_frames)
save_results(predictions=predictions,
groundtruths=reference_depths,
system_name=system_name,
scene_name=scene_name,
save_folder=Config.test_result_folder)
if __name__ == '__main__':
predict()
| predict |
lib.rs | use whistle_common::Keyword;
use whistle_common::Literal;
use whistle_common::Operator;
use whistle_common::Punc;
use whistle_common::Range;
use whistle_common::Tip;
use whistle_common::Token;
use whistle_common::TokenItem;
mod error;
pub use error::LexerError;
pub use error::LexerErrorKind;
mod tokenizer;
use tokenizer::Tokenizer;
macro_rules! ok_or_term {
($self:ident, $token:expr) => {
let start = $self.tokenizer.index;
let token: Result<Token, LexerErrorKind> = $token;
let end = $self.tokenizer.index;
if let Ok(token) = token {
return Some(Ok(TokenItem {
token,
range: Range { start, end },
}));
} else if let Err(err) = token {
if err.is_terminable() {
return Some(Err(LexerError::new(err, Range { start, end })));
} else {
$self.tokenizer.index = start;
}
}
};
}
#[derive(Debug, Clone)]
pub struct Lexer {
tokenizer: Tokenizer,
}
impl Lexer {
pub fn new(source: &str) -> Self {
Self {
tokenizer: Tokenizer::new(source),
}
}
fn usize_from_binary(bin: &str) -> usize {
usize::from_str_radix(&*bin, 2).unwrap()
}
fn usize_from_octal(oct: &str) -> usize {
usize::from_str_radix(&*oct, 8).unwrap()
}
fn usize_from_hex(hex: &str) -> usize {
usize::from_str_radix(&*hex, 16).unwrap()
}
fn usize_from_decimal(dec: &str) -> usize {
usize::from_str_radix(dec, 10).unwrap()
}
fn read_ident(&mut self) -> Option<String> {
if let Some(ch) = self.tokenizer.peek() {
if Lexer::is_letter(ch) {
let mut ident = String::new();
if let Some(ch) = self.tokenizer.step() {
ident.push(ch);
}
ident.push_str(
&self
.tokenizer
.read_while(|c| Lexer::is_letter(c) || Lexer::is_number(c))
.unwrap_or_default(),
);
return Some(ident);
}
}
None
}
fn read_esc(&mut self) -> Option<char> {
if self.tokenizer.eat_char('\\').is_some() {
match self.tokenizer.peek() {
Some('"') => {
self.tokenizer.step();
Some('"')
}
Some('\\') => {
self.tokenizer.step();
Some('\\')
}
Some('r') => {
self.tokenizer.step();
Some('\r')
}
Some('n') => {
self.tokenizer.step();
Some('\n')
}
Some('t') => {
self.tokenizer.step();
Some('\t')
}
Some('0') => {
self.tokenizer.step();
Some('\0')
}
Some('\'') => {
self.tokenizer.step();
Some('\'')
}
Some(_) => None,
None => None,
}
} else {
None
}
}
fn read_inner(&mut self) -> Option<String> {
let mut inner = self
.tokenizer
.read_while(|ch| ch != '\\' && ch != '"')
.unwrap_or_default();
if let Some(esc) = self.read_esc() {
inner.push(esc);
if let Some(string) = self.read_inner() {
inner.push_str(&*string);
}
}
Some(inner)
}
fn is_letter(ch: char) -> bool {
ch == '_' || ch.is_alphabetic()
}
fn is_number(ch: char) -> bool {
ch.is_numeric()
}
fn is_decimal(ch: char) -> bool {
Lexer::is_octal(ch) || ch == '8' || ch == '9'
}
fn is_binary(ch: char) -> bool {
ch == '0' || ch == '1'
}
fn is_octal(ch: char) -> bool {
Lexer::is_binary(ch)
|| ch == '2'
|| ch == '3'
|| ch == '4'
|| ch == '5'
|| ch == '6'
|| ch == '7'
}
fn is_hex(ch: char) -> bool {
Lexer::is_decimal(ch)
|| ch == 'a'
|| ch == 'b'
|| ch == 'c'
|| ch == 'd'
|| ch == 'e'
|| ch == 'f'
|| ch == 'A'
|| ch == 'B'
|| ch == 'C'
|| ch == 'D'
|| ch == 'E'
|| ch == 'F'
}
fn whitespace(&mut self) -> bool {
let index = self.tokenizer.index;
while let Some(' ') | Some('\t') | Some('\r') | Some('\n') = self.tokenizer.peek() {
self.tokenizer.step();
}
index != self.tokenizer.index
}
fn comment(&mut self) -> bool {
self.comment_line().is_ok() || self.comment_inline().is_ok()
}
fn comment_line(&mut self) -> Result<Token, LexerErrorKind> {
let mut comment = String::new();
if self.tokenizer.eat_str("//").is_some() {
loop {
if let Some(ch) = self.tokenizer.peek() {
match ch {
'\n' => break,
_ => comment.push(self.tokenizer.step().unwrap()),
}
}
}
Ok(Token::CommentLine(comment))
} else {
Err(LexerErrorKind::ExpectedCommentLine)
}
}
fn comment_inline(&mut self) -> Result<Token, LexerErrorKind> {
let mut comment = String::new();
if self.tokenizer.eat_str("/*").is_some() {
let mut depth = 1;
loop {
if self.tokenizer.eat_str("/*").is_some() {
depth += 1;
} else if self.tokenizer.eat_str("*/").is_some() {
depth -= 1;
} else if let Some(ch) = self.tokenizer.step() {
comment.push(ch);
}
if depth == 0 {
break;
}
}
Ok(Token::CommentInline(comment))
} else {
Err(LexerErrorKind::ExpectedCommentInline)
}
}
fn ident_or_keyword(&mut self) -> Result<Token, LexerErrorKind> {
if let Some(ident) = self.read_ident() {
if let Some(keyword) = Keyword::from(&*ident) {
Ok(Token::Keyword(keyword))
} else {
Ok(Token::Ident(ident))
}
} else {
Err(LexerErrorKind::ExpectedIdentOrKeyword)
}
}
fn operator(&mut self) -> Result<Token, LexerErrorKind> {
for operator in Operator::operators().iter() {
if self.tokenizer.eat_str(operator).is_some() {
if let Some(op) = Operator::from(operator) {
return Ok(Token::Operator(op));
}
}
}
Err(LexerErrorKind::ExpectedOperator)
}
fn float_lit(&mut self) -> Result<Token, LexerErrorKind> {
let mut float = String::new();
let mut dec_or_exp = false;
if let Some(start) = self.tokenizer.read_while(Lexer::is_decimal) {
float.push_str(&*start);
if self.tokenizer.eat_char('.').is_some() {
float.push('.');
if let Some(dec) = self.tokenizer.read_while(Lexer::is_decimal) {
float.push_str(&*dec);
} else {
return Err(LexerErrorKind::ExpectedDec);
}
dec_or_exp = true;
}
if let Some(next) = self.tokenizer.peek() {
if next == 'e' || next == 'E' {
self.tokenizer.step();
float.push('e');
if self.tokenizer.eat_char('+').is_some() {
float.push('+');
} else if self.tokenizer.eat_char('-').is_some() {
float.push('-');
}
if let Some(dec) = self.tokenizer.read_while(Lexer::is_decimal) {
float.push_str(&*dec);
} else {
return Err(LexerErrorKind::ExpectedExp);
}
dec_or_exp = true;
}
}
if !dec_or_exp {
return Err(LexerErrorKind::ExpectedDecOrExp);
}
} else {
return Err(LexerErrorKind::ExpectedFloatLit);
}
if let Ok(float) = float.parse::<f64>() {
Ok(Token::Literal(Literal::Float(float)))
} else {
Err(LexerErrorKind::CouldNotParseFloat)
}
}
fn int_lit(&mut self) -> Result<Token, LexerErrorKind> {
if self.tokenizer.eat_str("0b").is_some() {
if let Some(bin) = self.tokenizer.read_while(Lexer::is_binary) {
Ok(Token::Literal(Literal::Int(Lexer::usize_from_binary(
&*bin,
))))
} else {
Err(LexerErrorKind::ExpectedBin)
}
} else if self.tokenizer.eat_str("0o").is_some() {
if let Some(oct) = self.tokenizer.read_while(Lexer::is_octal) {
Ok(Token::Literal(Literal::Int(Lexer::usize_from_octal(&*oct))))
} else {
Err(LexerErrorKind::ExpectedOct)
}
} else if self.tokenizer.eat_str("0x").is_some() {
if let Some(hex) = self.tokenizer.read_while(Lexer::is_hex) {
Ok(Token::Literal(Literal::Int(Lexer::usize_from_hex(&*hex))))
} else {
Err(LexerErrorKind::ExpectedHex)
}
} else if let Some(dec) = self.tokenizer.read_while(Lexer::is_decimal) {
Ok(Token::Literal(Literal::Int(Lexer::usize_from_decimal(
&*dec,
))))
} else {
Err(LexerErrorKind::ExpectedIntLit)
}
}
fn str_lit(&mut self) -> Result<Token, LexerErrorKind> {
let mut inner = String::new();
if self.tokenizer.eat_char('"').is_none() {
return Err(LexerErrorKind::ExpectedStringStartDelim);
}
if let Some(string) = self.read_inner() {
inner.push_str(&*string);
} else {
return Err(LexerErrorKind::ExpectedStringInner);
}
if self.tokenizer.eat_char('"').is_none() {
return Err(LexerErrorKind::ExpectedStringEndDelim);
}
Ok(Token::Literal(Literal::Str(inner)))
}
fn char_lit(&mut self) -> Result<Token, LexerErrorKind> {
if self.tokenizer.eat_char('\'').is_none() {
return Err(LexerErrorKind::ExpectedCharStartDelim);
}
let inner = if let Some(esc) = self.read_esc() {
esc
} else if let Some(ch) = self.tokenizer.step() {
ch
} else {
return Err(LexerErrorKind::UnexpectedEOF);
};
if self.tokenizer.eat_char('\'').is_none() {
return Err(LexerErrorKind::ExpectedCharEndDelim);
}
Ok(Token::Literal(Literal::Char(inner)))
}
fn bool_lit(&mut self) -> Result<Token, LexerErrorKind> {
if self.tokenizer.eat_str("true").is_some() {
Ok(Token::Literal(Literal::Bool(true)))
} else if self.tokenizer.eat_str("false").is_some() {
Ok(Token::Literal(Literal::Bool(false)))
} else {
Err(LexerErrorKind::ExpectedBoolLit)
}
}
fn tip(&mut self) -> Result<Token, LexerErrorKind> {
if self.tokenizer.eat_char('#').is_none() |
if self.tokenizer.eat_char('(').is_none() {
return Err(LexerErrorKind::ExpectedLeftParen);
}
self.whitespace();
let ident = if let Some(i) = self.read_ident() {
i
} else {
return Err(LexerErrorKind::ExpectedTipIdent);
};
self.whitespace();
if self.tokenizer.eat_char(')').is_none() {
return Err(LexerErrorKind::ExpectedRightParen);
}
self.whitespace();
let value = if self.tokenizer.eat_char('{').is_some() {
let mut val = String::new();
let mut depth = 1;
loop {
if self.tokenizer.eat_char('{').is_some() {
depth += 1;
} else if self.tokenizer.eat_char('}').is_some() {
depth -= 1;
} else if let Some(ch) = self.tokenizer.step() {
val.push(ch);
}
if depth == 0 {
break;
}
}
val
} else if let Some(val) = self.tokenizer.read_while(|ch| ch != '\n') {
val
} else {
return Err(LexerErrorKind::ExpectedNewline);
};
Ok(Token::Tip(Tip { ident, value }))
}
fn punc(&mut self) -> Result<Token, LexerErrorKind> {
if let Some(ch) = self.tokenizer.peek() {
if let Some(punc) = Punc::from(ch) {
self.tokenizer.step();
Ok(Token::Punc(punc))
} else {
Err(LexerErrorKind::ExpectedPunc)
}
} else {
Err(LexerErrorKind::UnexpectedEOF)
}
}
}
impl Iterator for Lexer {
type Item = Result<TokenItem, LexerError>;
fn next(&mut self) -> Option<Result<TokenItem, LexerError>> {
if !self.tokenizer.within() {
return None;
}
if self.whitespace() || self.comment() {
return self.next();
}
ok_or_term!(self, self.bool_lit());
ok_or_term!(self, self.ident_or_keyword());
ok_or_term!(self, self.operator());
ok_or_term!(self, self.float_lit());
ok_or_term!(self, self.int_lit());
ok_or_term!(self, self.str_lit());
ok_or_term!(self, self.char_lit());
ok_or_term!(self, self.tip());
ok_or_term!(self, self.punc());
Some(Err(LexerError::new(
LexerErrorKind::NoMatch,
Range {
start: self.tokenizer.index,
end: self.tokenizer.index,
},
)))
}
}
#[cfg(test)]
mod tests {
use crate::*;
use whistle_common::Range;
#[test]
fn whitespace() {
let mut lexer = Lexer::new(" \t\r\n");
assert_eq!(lexer.next(), None);
}
#[test]
fn comments() {
let mut lexer = Lexer::new(
"// line comment
/* inline comment */",
);
assert_eq!(lexer.next(), None);
}
#[test]
fn ident() {
let mut lexer = Lexer::new("hello_w0r1d ไฝ ๅฅฝๅ");
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Ident("hello_w0r1d".to_string()),
range: Range { start: 0, end: 11 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Ident("ไฝ ๅฅฝๅ".to_string()),
range: Range { start: 12, end: 15 }
}))
);
}
#[test]
fn keyword() {
let lexer = Lexer::new("import as from export fun return if else while break continue var val for in match type struct trait");
for tok in lexer {
assert!(tok.is_ok());
assert!(matches!(tok.unwrap().token, Token::Keyword(_)));
}
}
#[test]
fn operator() {
let lexer = Lexer::new("~ ! + - * / % ** == != <= < > >= && || << >> & | ^ += -= * /= %= **= &&= ||= <<= >>= &= |= ^=");
for tok in lexer {
assert!(tok.is_ok());
assert!(matches!(tok.unwrap().token, Token::Operator(_)));
}
}
#[test]
fn float_lit() {
let mut lexer = Lexer::new("123e10 123.123e10 123.123");
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Float(123e10)),
range: Range { start: 0, end: 6 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Float(123.123e10)),
range: Range { start: 7, end: 17 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Float(123.123)),
range: Range { start: 18, end: 25 }
}))
);
}
#[test]
fn int_lit() {
let mut lexer = Lexer::new("123 0b01 0o07 0x0f");
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Int(123)),
range: Range { start: 0, end: 3 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Int(1)),
range: Range { start: 4, end: 8 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Int(7)),
range: Range { start: 9, end: 13 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Int(15)),
range: Range { start: 14, end: 18 }
}))
);
}
#[test]
fn string_lit() {
let mut lexer = Lexer::new("\"\" \"asd\" \"\\\"\"");
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Str(String::new())),
range: Range { start: 0, end: 2 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Str("asd".to_string())),
range: Range { start: 3, end: 8 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Str("\"".to_string())),
range: Range { start: 9, end: 13 }
}))
);
}
#[test]
fn char_lit() {
let mut lexer = Lexer::new("'a' '\''");
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Char('a')),
range: Range { start: 0, end: 3 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Char('\'')),
range: Range { start: 4, end: 7 }
}))
);
}
#[test]
fn bool_lit() {
let mut lexer = Lexer::new("true false");
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Bool(true)),
range: Range { start: 0, end: 4 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Literal(Literal::Bool(false)),
range: Range { start: 5, end: 10 }
}))
);
}
#[test]
fn tip() {
let mut lexer = Lexer::new(
"#(tip) tip
#(tip) { tip }",
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Tip(Tip {
ident: "tip".to_string(),
value: "tip".to_string()
}),
range: Range { start: 0, end: 10 }
}))
);
assert_eq!(
lexer.next(),
Some(Ok(TokenItem {
token: Token::Tip(Tip {
ident: "tip".to_string(),
value: " tip ".to_string()
}),
range: Range { start: 43, end: 57 }
}))
);
}
#[test]
fn punc() {
let lexer = Lexer::new(", : . [ ] { } ( )");
for tok in lexer {
assert!(tok.is_ok());
assert!(matches!(tok.unwrap().token, Token::Punc(_)));
}
}
#[test]
fn no_match() {
let mut lexer = Lexer::new("ยจ");
assert_eq!(
lexer.next(),
Some(Err(LexerError::new(
LexerErrorKind::NoMatch,
Range { start: 0, end: 0 }
)))
);
}
}
| {
return Err(LexerErrorKind::ExpectedHash);
} |
index.ts | import { combineReducers } from "redux";
import azureProfileData from "./azureLoginReducers";
import modals from "./modalReducers";
import vscodeApi from "./vscodeApiReducer";
import wizardContent from "./wizardContentReducers";
import selection from "./wizardSelectionReducers";
import wizardRoutes from "./wizardRoutes";
import generationStatus from "./generationStatus"; |
const rootReducer = combineReducers({
vscode: vscodeApi,
wizardContent,
selection,
azureProfileData,
modals,
wizardRoutes,
generationStatus,
versions
});
export type AppState = ReturnType<typeof rootReducer>;
export default rootReducer; | import versions from "./versionsReducer"; |
issues.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"fmt"
"github.com/gocarina/gocsv"
"github.com/google/pullsheet/pkg/summary"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/google/pullsheet/pkg/client"
)
// issuesCmd represents the subcommand for `pullsheet issues`
var issuesCmd = &cobra.Command{
Use: "issues",
Short: "Generate data around issues",
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runIssues(rootOpts)
},
}
func init() |
func runIssues(rootOpts *rootOptions) error {
ctx := context.Background()
c, err := client.New(ctx, client.Config{GitHubTokenPath: rootOpts.tokenPath})
if err != nil {
return err
}
data, err := summary.Issues(ctx, c, rootOpts.repos, rootOpts.users, rootOpts.sinceParsed, rootOpts.untilParsed)
if err != nil {
return err
}
out, err := gocsv.MarshalString(&data)
if err != nil {
return err
}
logrus.Infof("%d bytes of issue output", len(out))
fmt.Print(out)
return nil
}
| {
rootCmd.AddCommand(issuesCmd)
} |
example02.rs | use std::thread;
use std::time::Duration;
fn | () {
thread::spawn(|| {
for i in 1..100 {
println!("T1: {}", i);
thread::sleep(Duration::from_millis(1));
}
});
thread::spawn(|| {
for i in 1..100 {
println!("T2: {}", i);
thread::sleep(Duration::from_millis(1));
}
});
// Wait 1 second, for the other threads to finish
thread::sleep(Duration::from_millis(1000));
}
| main |
data_format_options.rs | use serde::Serialize;
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct | {
/// Output timestamp as usec int64. Default is false.
#[serde(skip_serializing_if = "Option::is_none")]
use_int64_timestamp: Option<bool>,
}
| DataFormatOptions |
IntegratesWithSection.tsx | import React from 'react'
import { SupportedProgrammingLanguagesLink } from './SupportedProgrammingLanguagesLink'
interface IntegrationEntry {
type: 'codeHost' | 'service' | 'plugin' | 'language'
codeReviewIntegration?: true
iconUrl: string
description: string
width?: number
url?: string
}
const ENTRIES: IntegrationEntry[] = [
{
type: 'codeHost',
codeReviewIntegration: true,
iconUrl: '/external-logos/github-horizontal-logo.svg',
description: 'GitHub.com and GitHub Enterprise Server (code hosting and review)',
width: 95,
url: '/blog/universal-code-intelligence-github-sourcegraph-browser-extension',
},
{
type: 'codeHost',
codeReviewIntegration: true,
iconUrl: '/external-logos/gitlab-logo.svg',
description: 'GitLab.com and self-managed GitLab CE/EE instances (code hosting and review)',
},
{
type: 'codeHost',
codeReviewIntegration: true,
iconUrl: '/external-logos/bitbucket-server-logo.svg',
description: 'Bitbucket Server (code hosting and review)',
},
{
type: 'codeHost',
codeReviewIntegration: true,
iconUrl: '/external-logos/phabricator-logo.svg',
description: 'Phabricator (code hosting and review)',
},
{
type: 'codeHost',
iconUrl: '/external-logos/azure-devops.svg',
description: 'Azure DevOps (code hosting)',
width: 45,
},
{
type: 'codeHost',
iconUrl: '/external-logos/aws-codecommit.svg',
description: 'AWS CodeCommit (code hosting)',
width: 45,
},
{
type: 'codeHost',
iconUrl: '/external-logos/git-logo.svg',
description: 'Any Git repository',
width: 70,
},
{
type: 'service',
iconUrl: '/external-logos/datadog-logo.svg',
description: 'Datadog (APM)',
},
{
type: 'service',
iconUrl: '/external-logos/sentry-logo.svg',
description: 'Sentry (error monitoring)',
},
{
type: 'service',
iconUrl: '/external-logos/lightstep-logo.svg',
description: 'LightStep (APM and tracing)',
},
{
type: 'service',
iconUrl: '/external-logos/codecov-logo.svg',
description: 'Codecov (code coverage)',
width: 40,
},
{
type: 'service',
iconUrl: '/external-logos/jira-logo.svg',
description: 'Jira (project management)',
},
{
type: 'service',
iconUrl: '/external-logos/hubspot-logo.svg',
description: 'HubSpot (customer relationship management)',
width: 90,
},
{
type: 'plugin',
iconUrl: '/integrations/chrome.svg',
description: 'Google Chrome (browser extension)',
},
{
type: 'plugin',
iconUrl: '/integrations/firefox.svg',
description: 'Mozilla Firefox (browser extension)',
},
{
type: 'plugin',
iconUrl: '/integrations/vscode.svg',
description: 'Visual Studio Code (editor extension)',
},
{
type: 'plugin',
iconUrl: '/integrations/jetbrains.svg',
description: 'IntelliJ, WebStorm, PyCharm, GoLand, and other JetBrains editors (editor extension)',
},
{
type: 'plugin',
iconUrl: '/integrations/vim.svg',
description: 'Vim (editor extension)',
width: 50,
},
{
type: 'plugin',
iconUrl: '/integrations/sublime.svg',
description: 'Sublime Text 3 (editor extension)',
},
{
type: 'plugin',
iconUrl: '/integrations/atom.svg',
description: 'Atom (editor extension)',
},
]
const IntegrationEntriesRow: React.FunctionComponent<{
text: string
entries?: IntegrationEntry[]
children?: React.ReactNode
}> = ({ text, entries, children }) => (
<div className="integration-entries-row row my-5 pt-3 border-top mx-md-5">
<div className="integration-entries-row__text col-md-3 my-2 small text-muted text-uppercase font-weight-bold">
{text}
</div>
<div className="col-md-9">
{children}
{entries && (
<div className="d-flex flex-wrap align-items-center">
{entries.map(({ description, iconUrl, width, url = '' }, i) =>
url ? (
<a href={url}>
<img
key={i}
className="integrates-with-section__logo mx-2"
src={iconUrl} | </a>
) : (
<img
key={i}
className="integrates-with-section__logo mx-2"
src={iconUrl}
title={description}
style={width !== undefined ? { width: `${width}px` } : undefined}
/>
)
)}
</div>
)}
</div>
</div>
)
export const IntegratesWithSection: React.FunctionComponent<{
className?: string
showTypes?: IntegrationEntry['type'][]
showOnlyCodeHostsWithCodeReviewIntegration?: boolean
customTypeLabels?: Partial<Record<IntegrationEntry['type'], string>>
}> = ({ className = '', showTypes, showOnlyCodeHostsWithCodeReviewIntegration, customTypeLabels }) => (
<div className={`integrates-with-section ${className} mx-auto px-4`}>
<h4 className="text-center font-weight-light">Integrates with your existing tools and workflow</h4>
<div className="mt-6 mb-2">
{(!showTypes || showTypes.includes('codeHost')) && (
<IntegrationEntriesRow
text={(customTypeLabels && customTypeLabels.codeHost) || 'Code hosting & review'}
entries={ENTRIES.filter(
e =>
e.type === 'codeHost' &&
(!showOnlyCodeHostsWithCodeReviewIntegration || e.codeReviewIntegration)
)}
/>
)}
{(!showTypes || showTypes.includes('service')) && (
<IntegrationEntriesRow
text={(customTypeLabels && customTypeLabels.service) || 'Other services in your workflow'}
entries={ENTRIES.filter(e => e.type === 'service')}
/>
)}
{(!showTypes || showTypes.includes('plugin')) && (
<IntegrationEntriesRow
text={(customTypeLabels && customTypeLabels.plugin) || 'Browser & editor integrations'}
entries={ENTRIES.filter(e => e.type === 'plugin')}
/>
)}
{(!showTypes || showTypes.includes('language')) && (
<IntegrationEntriesRow
text={(customTypeLabels && customTypeLabels.language) || 'Programming languages'}
>
<div className="mt-1">
All programming languages are supported. <SupportedProgrammingLanguagesLink /> have additional
code intelligence support.
</div>
</IntegrationEntriesRow>
)}
</div>
</div>
) | title={description}
style={width !== undefined ? { width: `${width}px` } : undefined}
/> |
dc.py | class palindrome:
def __init__(self):
self.a=""
def input(self,k1):
self.a=k1
def calculate(self):
f=0
j=len(k1)-1
while i<len(k1)/2:
if k1[i]!=k1[j]:
| f=1
else:
i=i+1
j=j-1
if f==0:
print "self.a is palindrome"
else:
print "self.a is not a palindrome"
x=palindrome()
a=input("enter string:")
x.input(a)
x.calculate() | |
lib.rs | //! Handling of data in Basic Encoding Rules.
//!
//! This crate allows decoding and encoding of data encoded in ASN.1โs _Basic
//! Encoding Rules_ as defined in ITU recommendation X.690 as well as their
//! stricter companions _Cannonical Encoding Rules_ and _Distringuished
//! Encoding Rules._
//!
//! You will find a short introduction to ASN.1 and encoding rules as well
//! as a discussion of how decoding and encoding with the crate work in
//! the [guide] module. The documentation with all the other
//! modules serves as a reference documentation.
//!
//! The most important modules of the crate are [decode] and [encode] that | //! Additionally, the crate provides a number of types that help dealing
//! with the more complex universal types in ASN.1. Specifically, the
//! module [int] provides variable length integers, the module
//! [string] contains types for the various kinds of strings defined in
//! ASN.1, and [oid] deals with object identifiers. Finally, [captured]
//! provides a way to keep encoded data around for later processing.
//! The most important types from these modules are also re-exported at
//! library level.
//!
//! [guide]: guide/index.html
//! [decode]: decode/index.html
//! [encode]: encode/index.thml
//! [about_asn1]: about_asn1/index.html
//! [int]: int/index.html
//! [string]: string/index.html
//! [oid]: oid/index.html
//! [captured]: captured/index.html
// We have seemingly redundant closures (i.e., closures where just providing
// a function would also work) that cannot be removed due to lifetime issues.
#![allow(clippy::redundant_closure)]
//--- Re-exports
pub use self::captured::Captured;
pub use self::int::{Integer, Unsigned};
pub use self::mode::Mode;
pub use self::oid::{ConstOid, Oid};
pub use self::string::{
BitString, OctetString,
Ia5String, NumericString, PrintableString, Utf8String,
};
pub use self::tag::Tag;
//--- Public modules
#[macro_use] pub mod debug;
pub mod decode;
pub mod encode;
pub mod captured;
pub mod int;
pub mod oid;
pub mod string;
//--- Private modules
mod length;
mod mode;
mod tag;
//--- Elaborate documentation
//
pub mod guide; | //! provide the machinery for implementing decoding and encoding of data.
//! |
smart-table.component_20190313230933.ts | import { Component } from '@angular/core';
import { Day, CardService } from '../../card.service';
import { formatDate } from '@angular/common';
import { LocalDataSource } from 'ng2-smart-table';
import { CurrencyIndex } from '@angular/common/src/i18n/locale_data';
@Component({
selector: 'ngx-smart-table',
templateUrl: './smart-table.component.html',
styles: [`
nb-card {
transform: translate3d(0, 0, 0);
}
`],
})
export class | {
myDate = '2019-03-09';
// myDate = formatDate(new Date(), 'yyyy-MM-dd', 'en');
tables: Day[] = [];
abc = [];
settings = {
columns: {
index: {
title: 'ๅบๅท',
type: 'string',
valuePrepareFunction(value: any, row: any, cell: { row: { index: number; }; }) {
return cell.row.index + 1;
},
},
username: {
title: '็จๆทๅ',
type: 'string',
},
solvedQuestion: {
title: 'ๅท้ขๆฐ',
type: 'number',
},
submission: {
title: '่ฟไธๅนดๆๅกๅคฉๆฐ',
type: 'number',
},
isChecked: {
title: 'ไปๆฅๆฅๅก',
type: 'string',
valuePrepareFunction(isChecked) {
return isChecked === 1 ? 'ๅทฒๆๅก' : '็ผบๅก';
},
},
gmt_modified: {
title: 'ๆฐๆฎๆดๆฐๆถ้ด',
type: 'string',
valuePrepareFunction(gmt_modified) {
return formatDate(new Date(gmt_modified), 'medium', 'en');
},
},
},
};
source: LocalDataSource = new LocalDataSource();
constructor(private infoService: CardService) {
this.infoService.getCheckDayInfoDay(this.myDate).subscribe((res) => {
this.tables = res;
this.source.load(this.tables);
// tslint:disable-next-line: no-console
console.log(this.tables.map(table => (table)));
});
}
}
| SmartTableComponent |
useProductDescription.d.ts | interface Props {
slug: string;
suspense?: boolean;
} | declare const useProductDescription: ({ slug, suspense }: Props) => {
data: string;
};
export declare const query: never;
export default useProductDescription; | |
sessions_controller_test.go | package web_test
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"testing"
"time"
"github.com/smartcontractkit/chainlink/core/internal/cltest"
"github.com/smartcontractkit/chainlink/core/services/pg"
"github.com/smartcontractkit/chainlink/core/sessions"
"github.com/smartcontractkit/chainlink/core/web"
"github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSessionsController_Create(t *testing.T) {
t.Parallel()
app := cltest.NewApplicationEVMDisabled(t)
require.NoError(t, app.Start())
config := app.GetConfig()
client := http.Client{}
tests := []struct {
name string
email string
password string
wantSession bool
}{
{"incorrect pwd", cltest.APIEmail, "incorrect", false},
{"incorrect email", "[email protected]", cltest.Password, false},
{"correct", cltest.APIEmail, cltest.Password, true},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
body := fmt.Sprintf(`{"email":"%s","password":"%s"}`, test.email, test.password)
request, err := http.NewRequest("POST", config.ClientNodeURL()+"/sessions", bytes.NewBufferString(body))
assert.NoError(t, err)
resp, err := client.Do(request)
assert.NoError(t, err)
defer resp.Body.Close()
if test.wantSession {
require.Equal(t, http.StatusOK, resp.StatusCode)
cookies := resp.Cookies()
sessionCookie := web.FindSessionCookie(cookies)
require.NotNil(t, sessionCookie)
decrypted, err := cltest.DecodeSessionCookie(sessionCookie.Value)
require.NoError(t, err)
user, err := app.SessionORM().AuthorizedUserWithSession(decrypted)
assert.NoError(t, err)
assert.Equal(t, test.email, user.Email)
b, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Contains(t, string(b), `"attributes":{"authenticated":true}`)
} else {
require.True(t, resp.StatusCode >= 400, "Should not be able to create session")
// Ignore fixture session
sessions, err := app.SessionORM().Sessions(1, 2)
assert.NoError(t, err)
assert.Empty(t, sessions)
}
})
}
}
func mustInsertSession(t *testing.T, q pg.Q, session *sessions.Session) {
err := q.GetNamed(`INSERT INTO sessions (id, last_used, created_at) VALUES (:id, :last_used, :created_at) RETURNING *`, session, session)
require.NoError(t, err)
}
func TestSessionsController_Create_ReapSessions(t *testing.T) {
t.Parallel()
app := cltest.NewApplicationEVMDisabled(t)
require.NoError(t, app.Start())
staleSession := cltest.NewSession()
staleSession.LastUsed = time.Now().Add(-cltest.MustParseDuration(t, "241h"))
q := pg.NewQ(app.GetSqlxDB(), app.GetLogger(), app.GetConfig())
mustInsertSession(t, q, &staleSession)
body := fmt.Sprintf(`{"email":"%s","password":"%s"}`, cltest.APIEmail, cltest.Password)
resp, err := http.Post(app.Config.ClientNodeURL()+"/sessions", "application/json", bytes.NewBufferString(body))
assert.NoError(t, err)
defer resp.Body.Close()
assert.Equal(t, http.StatusOK, resp.StatusCode)
var s []sessions.Session
gomega.NewWithT(t).Eventually(func() []sessions.Session {
s, err = app.SessionORM().Sessions(0, 10)
assert.NoError(t, err)
return s
}).Should(gomega.HaveLen(1))
for _, session := range s {
assert.NotEqual(t, session.ID, staleSession.ID)
}
}
func TestSessionsController_Destroy(t *testing.T) {
t.Parallel()
app := cltest.NewApplicationEVMDisabled(t)
require.NoError(t, app.Start())
correctSession := sessions.NewSession()
q := pg.NewQ(app.GetSqlxDB(), app.GetLogger(), app.GetConfig())
mustInsertSession(t, q, &correctSession)
config := app.GetConfig()
client := http.Client{}
tests := []struct {
name, sessionID string
success bool
}{
{"correct cookie", correctSession.ID, true},
{"incorrect cookie", "wrongsessionid", false},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cookie := cltest.MustGenerateSessionCookie(t, test.sessionID)
request, err := http.NewRequest("DELETE", config.ClientNodeURL()+"/sessions", nil)
assert.NoError(t, err)
request.AddCookie(cookie)
resp, err := client.Do(request)
assert.NoError(t, err)
_, err = app.SessionORM().AuthorizedUserWithSession(test.sessionID)
assert.Error(t, err)
if test.success {
assert.Equal(t, http.StatusOK, resp.StatusCode)
} else {
assert.True(t, resp.StatusCode >= 400, "Should get an erroneous status code for deleting a nonexistent session id")
}
})
}
}
func TestSessionsController_Destroy_ReapSessions(t *testing.T) {
t.Parallel()
client := http.Client{}
app := cltest.NewApplicationEVMDisabled(t)
q := pg.NewQ(app.GetSqlxDB(), app.GetLogger(), app.GetConfig()) | require.NoError(t, app.Start())
correctSession := sessions.NewSession()
mustInsertSession(t, q, &correctSession)
cookie := cltest.MustGenerateSessionCookie(t, correctSession.ID)
staleSession := cltest.NewSession()
staleSession.LastUsed = time.Now().Add(-cltest.MustParseDuration(t, "241h"))
mustInsertSession(t, q, &staleSession)
request, err := http.NewRequest("DELETE", app.Config.ClientNodeURL()+"/sessions", nil)
assert.NoError(t, err)
request.AddCookie(cookie)
resp, err := client.Do(request)
assert.NoError(t, err)
assert.Equal(t, http.StatusOK, resp.StatusCode)
gomega.NewWithT(t).Eventually(func() []sessions.Session {
sessions, err := app.SessionORM().Sessions(0, 10)
assert.NoError(t, err)
return sessions
}).Should(gomega.HaveLen(0))
} | |
libstd_collections_hash_set_rs_0016.rs | fn | () {
use std::collections::HashSet;
let mut v = HashSet::new();
assert!(v.is_empty());
v.insert(1);
assert!(!v.is_empty());
}
| main |
formatting.rs | //! # Formatting
//!
//! There are three main forms of formatting within Polar:
//!
//! 1. Debug strings: super verbose, mostly Rust-auto derived from fmt::Debug trait
//! 2. Display string: nice user-facing versions, which could be used for things like a debugger
//! 3. Polar strings: not always implemented, but is same syntax the parser accepts
//!
//! In addition, there are special cases like traces and sources that have their own
//! formatting requirements.
use crate::rules::*;
use crate::sources::*;
use crate::terms::*;
use crate::traces::*;
pub use display::*;
pub use to_polar::*;
impl Trace {
/// Return the string representation of this `Trace`
pub fn draw(&self, vm: &crate::vm::PolarVirtualMachine) -> String {
let mut res = String::new();
self.draw_trace(vm, 0, &mut res);
res
}
fn draw_trace(&self, vm: &crate::vm::PolarVirtualMachine, nest: usize, res: &mut String) {
if matches!(&self.node, Node::Term(term)
if matches!(term.value(), Value::Expression(Operation { operator: Operator::And, ..})))
{
for c in &self.children {
c.draw_trace(vm, nest + 1, res);
}
} else {
let polar_str = match self.node {
Node::Rule(ref r) => vm.rule_source(r),
Node::Term(ref t) => vm.term_source(t, false),
};
let indented = polar_str
.split('\n')
.map(|s| " ".repeat(nest) + s)
.collect::<Vec<String>>()
.join("\n");
res.push_str(&indented);
res.push_str(" [");
if !self.children.is_empty() {
res.push('\n');
for c in &self.children {
c.draw_trace(vm, nest + 1, res);
}
for _ in 0..nest {
res.push_str(" ");
}
}
res.push_str("]\n");
}
}
}
/// Traverse a [`Source`](../types/struct.Source.html) line by line until `offset` is reached,
/// and return the source line containing the `offset` character as well as `num_lines` lines
/// above and below it.
// @TODO: Can we have the caret under the whole range of the expression instead of just the beginning.
pub fn source_lines(source: &Source, offset: usize, num_lines: usize) -> String {
// Sliding window of lines: current line + indicator + additional context above + below.
let max_lines = num_lines * 2 + 2;
let push_line = |lines: &mut Vec<String>, line: String| {
if lines.len() == max_lines {
lines.remove(0);
}
lines.push(line);
};
let mut index = 0;
let mut lines = Vec::new();
let mut target = None;
let prefix_len = "123: ".len();
for (lineno, line) in source.src.lines().enumerate() {
push_line(&mut lines, format!("{:03}: {}", lineno + 1, line));
let end = index + line.len() + 1; // Adding one to account for new line byte.
if target.is_none() && end >= offset {
target = Some(lineno);
let spaces = " ".repeat(offset - index + prefix_len);
push_line(&mut lines, format!("{}^", spaces));
}
index = end;
if target.is_some() && lineno == target.unwrap() + num_lines {
break;
}
}
lines.join("\n")
}
/// Formats a vector of terms as a string-separated list
/// When providing an operator, parentheses are applied suitably
/// (see: to_polar_parens)
pub fn format_args(op: Operator, args: &[Term], sep: &str) -> String {
args.iter()
.map(|t| to_polar_parens(op, t))
.collect::<Vec<String>>()
.join(sep)
}
/// Formats a vector of parameters
pub fn format_params(args: &[Parameter], sep: &str) -> String {
args.iter()
.map(|parameter| parameter.to_polar())
.collect::<Vec<String>>()
.join(sep)
}
/// Formats a vector of rules as a string-separated list.
#[allow(clippy::ptr_arg)]
pub fn format_rules(rules: &Rules, sep: &str) -> String {
rules
.iter()
.map(|rule| rule.to_polar())
.collect::<Vec<String>>()
.join(sep)
}
fn precedence(o: &Operator) -> i32 {
match o {
Operator::Print => 11,
Operator::Debug => 11,
Operator::New => 10,
Operator::Cut => 10,
Operator::ForAll => 10,
Operator::Dot => 9,
Operator::In => 8,
Operator::Isa => 8,
Operator::Mul => 7,
Operator::Div => 7,
Operator::Mod => 7,
Operator::Rem => 7,
Operator::Add => 6,
Operator::Sub => 6,
Operator::Eq => 5,
Operator::Geq => 5,
Operator::Leq => 5,
Operator::Neq => 5,
Operator::Gt => 5,
Operator::Lt => 5,
Operator::Unify => 4,
Operator::Assign => 4,
Operator::Not => 3,
Operator::And => 2,
Operator::Or => 1,
}
}
/// Helper method: uses the operator precedence to determine if `t`
/// has a lower precedence than `op`.
fn has_lower_pred(op: Operator, t: &Term) -> bool {
match t.value() {
Value::Expression(Operation {
operator: other, ..
}) => precedence(&op) > precedence(other),
_ => false,
}
}
pub fn to_polar_parens(op: Operator, t: &Term) -> String {
if has_lower_pred(op, t) {
format!("({})", t.to_polar())
} else {
t.to_polar()
}
}
pub mod display {
use crate::formatting::{format_args, format_params};
use std::fmt;
use std::sync::Arc;
use super::ToPolarString;
use crate::bindings::Binding;
use crate::numerics::Numeric; | use crate::rules::Rule;
use crate::terms::{Operation, Operator, Symbol, Term, Value};
use crate::vm::*;
impl fmt::Display for Binding {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{} = {}", self.0.to_polar(), self.1.to_polar())
}
}
impl fmt::Display for Symbol {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{}", self.0)
}
}
impl fmt::Display for Term {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "{}", self.to_polar())
}
}
impl fmt::Display for Choice {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"[{}] ++ [{}]",
self.goals
.iter()
.map(|g| g.to_string())
.collect::<Vec<String>>()
.join(", "),
self.alternatives
.iter()
.map(|alt| format!(
"[{}]",
alt.iter()
.map(|g| g.to_string())
.collect::<Vec<String>>()
.join(",")
))
.collect::<Vec<String>>()
.join(", ")
)
}
}
impl fmt::Display for Goal {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fn fmt_rules(rules: &[Arc<Rule>]) -> String {
rules
.iter()
.map(|rule| rule.to_polar())
.collect::<Vec<String>>()
.join(" ")
}
match self {
Goal::Isa { left, right } => {
write!(fmt, "Isa({}, {})", left.to_polar(), right.to_polar())
}
Goal::IsMoreSpecific { left, right, args } => write!(
fmt,
"IsMoreSpecific({} {} ({}))",
left.to_polar(),
right.to_polar(),
args.iter()
.map(|a| a.to_polar())
.collect::<Vec<String>>()
.join(", ")
),
Goal::IsSubspecializer {
left, right, arg, ..
} => write!(
fmt,
"IsSubspecializer({}, {}, {})",
left.to_polar(),
right.to_polar(),
arg.to_polar()
),
Goal::Lookup { dict, field, value } => write!(
fmt,
"Lookup({}.{} = {})",
dict.to_polar(),
field.to_polar(),
value.to_polar()
),
Goal::LookupExternal {
instance, field, ..
} => write!(
fmt,
"LookupExternal({}.{})",
instance.to_polar(),
field.to_polar(),
),
Goal::PopQuery { term } => write!(fmt, "PopQuery({})", term.to_polar()),
Goal::Query { term } => write!(fmt, "Query({})", term.to_polar()),
Goal::Run { .. } => write!(fmt, "Run(...)"),
Goal::FilterRules {
applicable_rules,
unfiltered_rules,
..
} => write!(
fmt,
"FilterRules([{}], [{}])",
fmt_rules(applicable_rules),
fmt_rules(unfiltered_rules),
),
Goal::SortRules {
rules,
outer,
inner,
..
} => write!(
fmt,
"SortRules([{}], outer={}, inner={})",
fmt_rules(rules),
outer,
inner,
),
Goal::TraceRule { trace: _ } => write!(
fmt,
"TraceRule(...)" // FIXME: draw trace?
),
Goal::Unify { left, right } => {
write!(fmt, "Unify({}, {})", left.to_polar(), right.to_polar())
}
g => write!(fmt, "{:?}", g),
}
}
}
impl fmt::Display for Rule {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match &self.body.value() {
Value::Expression(Operation {
operator: Operator::And,
args,
}) => {
if args.is_empty() {
write!(
fmt,
"{}({});",
self.name.to_polar(),
format_params(&self.params, ", ")
)
} else {
write!(
fmt,
"{}({}) if {};",
self.name.to_polar(),
format_params(&self.params, ", "),
format_args(Operator::And, args, ",\n "),
)
}
}
_ => panic!("Not any sorta rule I parsed"),
}
}
}
impl fmt::Display for Numeric {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Self::Integer(i) => write!(f, "{}", i),
Self::Float(float) => write!(f, "{}", float),
}
}
}
}
pub mod to_polar {
use crate::formatting::{format_args, format_params, to_polar_parens};
use crate::resource_block::{BlockType, ResourceBlock, ShorthandRule};
use crate::rules::*;
use crate::terms::*;
/// Effectively works as a reverse-parser. Allows types to be turned
/// back into polar-parseable strings.
pub trait ToPolarString {
fn to_polar(&self) -> String;
}
impl ToPolarString for Dictionary {
fn to_polar(&self) -> String {
let fields = self
.fields
.iter()
.map(|(k, v)| format!("{}: {}", k.to_polar(), v.to_polar()))
.collect::<Vec<String>>()
.join(", ");
format!("{{{}}}", fields)
}
}
impl ToPolarString for ExternalInstance {
fn to_polar(&self) -> String {
if let Some(ref repr) = self.repr {
repr.clone()
} else {
// Print out external instances like ^{id: 123}
// NOTE: this format is used by host libraries to enrich output
// messages with native representations of the instances.
format!("^{{id: {}}}", self.instance_id)
}
}
}
impl ToPolarString for InstanceLiteral {
fn to_polar(&self) -> String {
format!("{}{}", self.tag.to_polar(), self.fields.to_polar())
}
}
impl ToPolarString for Operator {
fn to_polar(&self) -> String {
use Operator::*;
match self {
Not => "not",
Mul => "*",
Div => "/",
Mod => "mod",
Rem => "rem",
Add => "+",
Sub => "-",
Eq => "==",
Geq => ">=",
Leq => "<=",
Neq => "!=",
Gt => ">",
Lt => "<",
Or => "or",
And => "and",
New => "new",
Dot => ".",
Unify => "=",
Assign => ":=",
In => "in",
Cut => "cut",
ForAll => "forall",
Debug => "debug",
Print => "print",
Isa => "matches",
}
.to_string()
}
}
impl ToPolarString for Operation {
fn to_polar(&self) -> String {
use Operator::*;
// Adds parentheses when sub expressions have lower precedence (which is what you would have had to have during initial parse)
// Lets us spit out strings that would reparse to the same ast.
match self.operator {
Debug => "debug()".to_owned(),
Print => format!("print({})", format_args(self.operator, &self.args, ", ")),
Cut => "cut".to_owned(),
ForAll => format!(
"forall({}, {})",
self.args[0].to_polar(),
self.args[1].to_polar()
),
New => {
if self.args.len() == 1 {
format!("new {}", to_polar_parens(self.operator, &self.args[0]))
} else {
format!(
"new ({}, {})",
to_polar_parens(self.operator, &self.args[0]),
self.args[1].to_polar()
)
}
}
// Lookup operator
Dot => {
let call_term = if let Value::String(s) = self.args[1].value() {
s.to_string()
} else {
self.args[1].to_polar()
};
match self.args.len() {
2 => format!("{}.{}", self.args[0].to_polar(), call_term),
3 => format!(
"{}.{} = {}",
self.args[0].to_polar(),
call_term,
self.args[2].to_polar()
),
// Invalid
_ => format!(".({})", format_args(self.operator, &self.args, ", ")),
}
}
// Unary operators
Not => format!(
"{} {}",
self.operator.to_polar(),
to_polar_parens(self.operator, &self.args[0])
),
// Binary operators
Mul | Div | Mod | Rem | Add | Sub | Eq | Geq | Leq | Neq | Gt | Lt | Unify
| Isa | In | Assign => match self.args.len() {
2 => format!(
"{} {} {}",
to_polar_parens(self.operator, &self.args[0]),
self.operator.to_polar(),
to_polar_parens(self.operator, &self.args[1]),
),
3 => format!(
"{} {} {} = {}",
to_polar_parens(self.operator, &self.args[0]),
self.operator.to_polar(),
to_polar_parens(self.operator, &self.args[1]),
to_polar_parens(self.operator, &self.args[2]),
),
// Invalid
_ => format!(
"{}({})",
self.operator.to_polar(),
format_args(self.operator, &self.args, ", ")
),
},
// n-ary operators
And if self.args.is_empty() => "(true)".to_string(),
And => format_args(
self.operator,
&self.args,
&format!(" {} ", self.operator.to_polar()),
),
Or if self.args.is_empty() => "(false)".to_string(),
Or => format_args(
self.operator,
&self.args,
&format!(" {} ", self.operator.to_polar()),
),
}
}
}
impl ToPolarString for Parameter {
fn to_polar(&self) -> String {
match &self.specializer {
None => self.parameter.to_polar(),
Some(specializer) => {
format!("{}: {}", self.parameter.to_polar(), specializer.to_polar())
}
}
}
}
impl ToPolarString for Call {
fn to_polar(&self) -> String {
let args = format_args(Operator::And, &self.args, ", ");
let combined_args = match &self.kwargs {
Some(dict) => {
let kwargs = dict
.iter()
.map(|(k, v)| format!("{}: {}", k.to_polar(), v.to_polar()))
.collect::<Vec<String>>()
.join(", ");
if args.is_empty() {
kwargs
} else {
vec![args, kwargs].join(", ")
}
}
None => args,
};
format!("{}({})", self.name.to_polar(), combined_args)
}
}
impl ToPolarString for Rule {
fn to_polar(&self) -> String {
match &self.body.value() {
Value::Expression(Operation {
operator: Operator::And,
args,
}) => {
if args.is_empty() {
format!(
"{}({});",
self.name.to_polar(),
format_params(&self.params, ", ")
)
} else {
format!(
"{}({}) if {};",
self.name.to_polar(),
format_params(&self.params, ", "),
format_args(Operator::And, args, " and "),
)
}
}
_ => panic!("Not any sorta rule I parsed"),
}
}
}
impl ToPolarString for Symbol {
fn to_polar(&self) -> String {
self.0.to_string()
}
}
impl ToPolarString for Term {
fn to_polar(&self) -> String {
self.value().to_polar()
}
}
impl ToPolarString for Pattern {
fn to_polar(&self) -> String {
match self {
Pattern::Dictionary(d) => d.to_polar(),
Pattern::Instance(i) => i.to_polar(),
}
}
}
impl ToPolarString for Value {
fn to_polar(&self) -> String {
match self {
Value::Number(i) => format!("{}", i),
Value::String(s) => format!("\"{}\"", s),
Value::Boolean(b) => {
if *b {
"true".to_string()
} else {
"false".to_string()
}
}
Value::Dictionary(i) => i.to_polar(),
Value::Pattern(i) => i.to_polar(),
Value::ExternalInstance(i) => i.to_polar(),
Value::Call(c) => c.to_polar(),
Value::List(l) => format!("[{}]", format_args(Operator::And, l, ", "),),
Value::Variable(s) => s.to_polar(),
Value::RestVariable(s) => format!("*{}", s.to_polar()),
Value::Expression(e) => e.to_polar(),
}
}
}
impl ToPolarString for ShorthandRule {
fn to_polar(&self) -> String {
let Self {
head,
body: (implier, relation),
} = self;
if let Some(relation) = relation {
format!(
"{} if {} on {};",
head.to_polar(),
implier.to_polar(),
relation.to_polar()
)
} else {
format!("{} if {};", head.to_polar(), implier.to_polar())
}
}
}
impl ToPolarString for BlockType {
fn to_polar(&self) -> String {
match self {
Self::Actor => "actor".to_owned(),
Self::Resource => "resource".to_owned(),
}
}
}
impl ToPolarString for ResourceBlock {
fn to_polar(&self) -> String {
let mut s = format!(
"{} {} {{\n",
self.block_type.to_polar(),
self.resource.to_polar()
);
if let Some(ref roles) = self.roles {
s += &format!(" roles = {};\n", roles.to_polar());
}
if let Some(ref permissions) = self.permissions {
s += &format!(" permissions = {};\n", permissions.to_polar());
}
if let Some(ref relations) = self.relations {
s += &format!(" relations = {};\n", relations.to_polar());
}
for rule in &self.shorthand_rules {
s += &format!(" {}\n", rule.to_polar());
}
s += "}";
s
}
}
} | |
workspace.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package latest
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// A workspace
// Latest API Version: 2020-12-01.
//
// Deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-native:synapse:Workspace'.
type Workspace struct {
pulumi.CustomResourceState
// The ADLA resource ID.
AdlaResourceId pulumi.StringOutput `pulumi:"adlaResourceId"`
// Connectivity endpoints
ConnectivityEndpoints pulumi.StringMapOutput `pulumi:"connectivityEndpoints"`
// Workspace default data lake storage account details
DefaultDataLakeStorage DataLakeStorageAccountDetailsResponsePtrOutput `pulumi:"defaultDataLakeStorage"`
// The encryption details of the workspace
Encryption EncryptionDetailsResponsePtrOutput `pulumi:"encryption"`
// Workspace level configs and feature flags
ExtraProperties pulumi.MapOutput `pulumi:"extraProperties"`
// Identity of the workspace
Identity ManagedIdentityResponsePtrOutput `pulumi:"identity"`
// The geo-location where the resource lives
Location pulumi.StringOutput `pulumi:"location"`
// Workspace managed resource group. The resource group name uniquely identifies the resource group within the user subscriptionId. The resource group name must be no longer than 90 characters long, and must be alphanumeric characters (Char.IsLetterOrDigit()) and '-', '_', '(', ')' and'.'. Note that the name cannot end with '.'
ManagedResourceGroupName pulumi.StringPtrOutput `pulumi:"managedResourceGroupName"`
// Setting this to 'default' will ensure that all compute for this workspace is in a virtual network managed on behalf of the user.
ManagedVirtualNetwork pulumi.StringPtrOutput `pulumi:"managedVirtualNetwork"`
// Managed Virtual Network Settings
ManagedVirtualNetworkSettings ManagedVirtualNetworkSettingsResponsePtrOutput `pulumi:"managedVirtualNetworkSettings"`
// The name of the resource
Name pulumi.StringOutput `pulumi:"name"`
// Private endpoint connections to the workspace
PrivateEndpointConnections PrivateEndpointConnectionResponseArrayOutput `pulumi:"privateEndpointConnections"`
// Resource provisioning state
ProvisioningState pulumi.StringOutput `pulumi:"provisioningState"`
// Purview Configuration
PurviewConfiguration PurviewConfigurationResponsePtrOutput `pulumi:"purviewConfiguration"`
// Login for workspace SQL active directory administrator
SqlAdministratorLogin pulumi.StringPtrOutput `pulumi:"sqlAdministratorLogin"`
// SQL administrator login password
SqlAdministratorLoginPassword pulumi.StringPtrOutput `pulumi:"sqlAdministratorLoginPassword"`
// Resource tags.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type pulumi.StringOutput `pulumi:"type"`
// Virtual Network profile
VirtualNetworkProfile VirtualNetworkProfileResponsePtrOutput `pulumi:"virtualNetworkProfile"`
// Git integration settings
WorkspaceRepositoryConfiguration WorkspaceRepositoryConfigurationResponsePtrOutput `pulumi:"workspaceRepositoryConfiguration"`
// The workspace unique identifier
WorkspaceUID pulumi.StringOutput `pulumi:"workspaceUID"`
}
// NewWorkspace registers a new resource with the given unique name, arguments, and options.
func NewWorkspace(ctx *pulumi.Context,
name string, args *WorkspaceArgs, opts ...pulumi.ResourceOption) (*Workspace, error) {
if args == nil |
if args.ResourceGroupName == nil {
return nil, errors.New("invalid value for required argument 'ResourceGroupName'")
}
aliases := pulumi.Aliases([]pulumi.Alias{
{
Type: pulumi.String("azure-nextgen:synapse/latest:Workspace"),
},
{
Type: pulumi.String("azure-native:synapse:Workspace"),
},
{
Type: pulumi.String("azure-nextgen:synapse:Workspace"),
},
{
Type: pulumi.String("azure-native:synapse/v20190601preview:Workspace"),
},
{
Type: pulumi.String("azure-nextgen:synapse/v20190601preview:Workspace"),
},
{
Type: pulumi.String("azure-native:synapse/v20201201:Workspace"),
},
{
Type: pulumi.String("azure-nextgen:synapse/v20201201:Workspace"),
},
{
Type: pulumi.String("azure-native:synapse/v20210301:Workspace"),
},
{
Type: pulumi.String("azure-nextgen:synapse/v20210301:Workspace"),
},
})
opts = append(opts, aliases)
var resource Workspace
err := ctx.RegisterResource("azure-native:synapse/latest:Workspace", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetWorkspace gets an existing Workspace resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetWorkspace(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *WorkspaceState, opts ...pulumi.ResourceOption) (*Workspace, error) {
var resource Workspace
err := ctx.ReadResource("azure-native:synapse/latest:Workspace", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Workspace resources.
type workspaceState struct {
// The ADLA resource ID.
AdlaResourceId *string `pulumi:"adlaResourceId"`
// Connectivity endpoints
ConnectivityEndpoints map[string]string `pulumi:"connectivityEndpoints"`
// Workspace default data lake storage account details
DefaultDataLakeStorage *DataLakeStorageAccountDetailsResponse `pulumi:"defaultDataLakeStorage"`
// The encryption details of the workspace
Encryption *EncryptionDetailsResponse `pulumi:"encryption"`
// Workspace level configs and feature flags
ExtraProperties map[string]interface{} `pulumi:"extraProperties"`
// Identity of the workspace
Identity *ManagedIdentityResponse `pulumi:"identity"`
// The geo-location where the resource lives
Location *string `pulumi:"location"`
// Workspace managed resource group. The resource group name uniquely identifies the resource group within the user subscriptionId. The resource group name must be no longer than 90 characters long, and must be alphanumeric characters (Char.IsLetterOrDigit()) and '-', '_', '(', ')' and'.'. Note that the name cannot end with '.'
ManagedResourceGroupName *string `pulumi:"managedResourceGroupName"`
// Setting this to 'default' will ensure that all compute for this workspace is in a virtual network managed on behalf of the user.
ManagedVirtualNetwork *string `pulumi:"managedVirtualNetwork"`
// Managed Virtual Network Settings
ManagedVirtualNetworkSettings *ManagedVirtualNetworkSettingsResponse `pulumi:"managedVirtualNetworkSettings"`
// The name of the resource
Name *string `pulumi:"name"`
// Private endpoint connections to the workspace
PrivateEndpointConnections []PrivateEndpointConnectionResponse `pulumi:"privateEndpointConnections"`
// Resource provisioning state
ProvisioningState *string `pulumi:"provisioningState"`
// Purview Configuration
PurviewConfiguration *PurviewConfigurationResponse `pulumi:"purviewConfiguration"`
// Login for workspace SQL active directory administrator
SqlAdministratorLogin *string `pulumi:"sqlAdministratorLogin"`
// SQL administrator login password
SqlAdministratorLoginPassword *string `pulumi:"sqlAdministratorLoginPassword"`
// Resource tags.
Tags map[string]string `pulumi:"tags"`
// The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type *string `pulumi:"type"`
// Virtual Network profile
VirtualNetworkProfile *VirtualNetworkProfileResponse `pulumi:"virtualNetworkProfile"`
// Git integration settings
WorkspaceRepositoryConfiguration *WorkspaceRepositoryConfigurationResponse `pulumi:"workspaceRepositoryConfiguration"`
// The workspace unique identifier
WorkspaceUID *string `pulumi:"workspaceUID"`
}
type WorkspaceState struct {
// The ADLA resource ID.
AdlaResourceId pulumi.StringPtrInput
// Connectivity endpoints
ConnectivityEndpoints pulumi.StringMapInput
// Workspace default data lake storage account details
DefaultDataLakeStorage DataLakeStorageAccountDetailsResponsePtrInput
// The encryption details of the workspace
Encryption EncryptionDetailsResponsePtrInput
// Workspace level configs and feature flags
ExtraProperties pulumi.MapInput
// Identity of the workspace
Identity ManagedIdentityResponsePtrInput
// The geo-location where the resource lives
Location pulumi.StringPtrInput
// Workspace managed resource group. The resource group name uniquely identifies the resource group within the user subscriptionId. The resource group name must be no longer than 90 characters long, and must be alphanumeric characters (Char.IsLetterOrDigit()) and '-', '_', '(', ')' and'.'. Note that the name cannot end with '.'
ManagedResourceGroupName pulumi.StringPtrInput
// Setting this to 'default' will ensure that all compute for this workspace is in a virtual network managed on behalf of the user.
ManagedVirtualNetwork pulumi.StringPtrInput
// Managed Virtual Network Settings
ManagedVirtualNetworkSettings ManagedVirtualNetworkSettingsResponsePtrInput
// The name of the resource
Name pulumi.StringPtrInput
// Private endpoint connections to the workspace
PrivateEndpointConnections PrivateEndpointConnectionResponseArrayInput
// Resource provisioning state
ProvisioningState pulumi.StringPtrInput
// Purview Configuration
PurviewConfiguration PurviewConfigurationResponsePtrInput
// Login for workspace SQL active directory administrator
SqlAdministratorLogin pulumi.StringPtrInput
// SQL administrator login password
SqlAdministratorLoginPassword pulumi.StringPtrInput
// Resource tags.
Tags pulumi.StringMapInput
// The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
Type pulumi.StringPtrInput
// Virtual Network profile
VirtualNetworkProfile VirtualNetworkProfileResponsePtrInput
// Git integration settings
WorkspaceRepositoryConfiguration WorkspaceRepositoryConfigurationResponsePtrInput
// The workspace unique identifier
WorkspaceUID pulumi.StringPtrInput
}
func (WorkspaceState) ElementType() reflect.Type {
return reflect.TypeOf((*workspaceState)(nil)).Elem()
}
type workspaceArgs struct {
// Connectivity endpoints
ConnectivityEndpoints map[string]string `pulumi:"connectivityEndpoints"`
// Workspace default data lake storage account details
DefaultDataLakeStorage *DataLakeStorageAccountDetails `pulumi:"defaultDataLakeStorage"`
// The encryption details of the workspace
Encryption *EncryptionDetails `pulumi:"encryption"`
// Identity of the workspace
Identity *ManagedIdentity `pulumi:"identity"`
// The geo-location where the resource lives
Location *string `pulumi:"location"`
// Workspace managed resource group. The resource group name uniquely identifies the resource group within the user subscriptionId. The resource group name must be no longer than 90 characters long, and must be alphanumeric characters (Char.IsLetterOrDigit()) and '-', '_', '(', ')' and'.'. Note that the name cannot end with '.'
ManagedResourceGroupName *string `pulumi:"managedResourceGroupName"`
// Setting this to 'default' will ensure that all compute for this workspace is in a virtual network managed on behalf of the user.
ManagedVirtualNetwork *string `pulumi:"managedVirtualNetwork"`
// Managed Virtual Network Settings
ManagedVirtualNetworkSettings *ManagedVirtualNetworkSettings `pulumi:"managedVirtualNetworkSettings"`
// Private endpoint connections to the workspace
PrivateEndpointConnections []PrivateEndpointConnectionType `pulumi:"privateEndpointConnections"`
// Purview Configuration
PurviewConfiguration *PurviewConfiguration `pulumi:"purviewConfiguration"`
// The name of the resource group. The name is case insensitive.
ResourceGroupName string `pulumi:"resourceGroupName"`
// Login for workspace SQL active directory administrator
SqlAdministratorLogin *string `pulumi:"sqlAdministratorLogin"`
// SQL administrator login password
SqlAdministratorLoginPassword *string `pulumi:"sqlAdministratorLoginPassword"`
// Resource tags.
Tags map[string]string `pulumi:"tags"`
// Virtual Network profile
VirtualNetworkProfile *VirtualNetworkProfile `pulumi:"virtualNetworkProfile"`
// The name of the workspace
WorkspaceName *string `pulumi:"workspaceName"`
// Git integration settings
WorkspaceRepositoryConfiguration *WorkspaceRepositoryConfiguration `pulumi:"workspaceRepositoryConfiguration"`
}
// The set of arguments for constructing a Workspace resource.
type WorkspaceArgs struct {
// Connectivity endpoints
ConnectivityEndpoints pulumi.StringMapInput
// Workspace default data lake storage account details
DefaultDataLakeStorage DataLakeStorageAccountDetailsPtrInput
// The encryption details of the workspace
Encryption EncryptionDetailsPtrInput
// Identity of the workspace
Identity ManagedIdentityPtrInput
// The geo-location where the resource lives
Location pulumi.StringPtrInput
// Workspace managed resource group. The resource group name uniquely identifies the resource group within the user subscriptionId. The resource group name must be no longer than 90 characters long, and must be alphanumeric characters (Char.IsLetterOrDigit()) and '-', '_', '(', ')' and'.'. Note that the name cannot end with '.'
ManagedResourceGroupName pulumi.StringPtrInput
// Setting this to 'default' will ensure that all compute for this workspace is in a virtual network managed on behalf of the user.
ManagedVirtualNetwork pulumi.StringPtrInput
// Managed Virtual Network Settings
ManagedVirtualNetworkSettings ManagedVirtualNetworkSettingsPtrInput
// Private endpoint connections to the workspace
PrivateEndpointConnections PrivateEndpointConnectionTypeArrayInput
// Purview Configuration
PurviewConfiguration PurviewConfigurationPtrInput
// The name of the resource group. The name is case insensitive.
ResourceGroupName pulumi.StringInput
// Login for workspace SQL active directory administrator
SqlAdministratorLogin pulumi.StringPtrInput
// SQL administrator login password
SqlAdministratorLoginPassword pulumi.StringPtrInput
// Resource tags.
Tags pulumi.StringMapInput
// Virtual Network profile
VirtualNetworkProfile VirtualNetworkProfilePtrInput
// The name of the workspace
WorkspaceName pulumi.StringPtrInput
// Git integration settings
WorkspaceRepositoryConfiguration WorkspaceRepositoryConfigurationPtrInput
}
func (WorkspaceArgs) ElementType() reflect.Type {
return reflect.TypeOf((*workspaceArgs)(nil)).Elem()
}
type WorkspaceInput interface {
pulumi.Input
ToWorkspaceOutput() WorkspaceOutput
ToWorkspaceOutputWithContext(ctx context.Context) WorkspaceOutput
}
func (*Workspace) ElementType() reflect.Type {
return reflect.TypeOf((*Workspace)(nil))
}
func (i *Workspace) ToWorkspaceOutput() WorkspaceOutput {
return i.ToWorkspaceOutputWithContext(context.Background())
}
func (i *Workspace) ToWorkspaceOutputWithContext(ctx context.Context) WorkspaceOutput {
return pulumi.ToOutputWithContext(ctx, i).(WorkspaceOutput)
}
type WorkspaceOutput struct {
*pulumi.OutputState
}
func (WorkspaceOutput) ElementType() reflect.Type {
return reflect.TypeOf((*Workspace)(nil))
}
func (o WorkspaceOutput) ToWorkspaceOutput() WorkspaceOutput {
return o
}
func (o WorkspaceOutput) ToWorkspaceOutputWithContext(ctx context.Context) WorkspaceOutput {
return o
}
func init() {
pulumi.RegisterOutputType(WorkspaceOutput{})
}
| {
return nil, errors.New("missing one or more required arguments")
} |
genesis_test.go | package types
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
)
func | (t *testing.T) {
specs := map[string]struct {
srcMutator func(*GenesisState)
expError bool
}{
"all good": {
srcMutator: func(s *GenesisState) {},
},
"params invalid": {
srcMutator: func(s *GenesisState) {
s.Params = Params{}
},
expError: true,
},
"codeinfo invalid": {
srcMutator: func(s *GenesisState) {
s.Codes[0].CodeInfo.CodeHash = nil
},
expError: true,
},
"contract invalid": {
srcMutator: func(s *GenesisState) {
s.Contracts[0].ContractAddress = "invalid"
},
expError: true,
},
"sequence invalid": {
srcMutator: func(s *GenesisState) {
s.Sequences[0].IDKey = nil
},
expError: true,
},
}
for msg, spec := range specs {
t.Run(msg, func(t *testing.T) {
state := GenesisFixture(spec.srcMutator)
got := state.ValidateBasic()
if spec.expError {
require.Error(t, got)
return
}
require.NoError(t, got)
})
}
}
func TestCodeValidateBasic(t *testing.T) {
specs := map[string]struct {
srcMutator func(*Code)
expError bool
}{
"all good": {srcMutator: func(_ *Code) {}},
"code id invalid": {
srcMutator: func(c *Code) {
c.CodeID = 0
},
expError: true,
},
"codeinfo invalid": {
srcMutator: func(c *Code) {
c.CodeInfo.CodeHash = nil
},
expError: true,
},
"codeBytes empty": {
srcMutator: func(c *Code) {
c.CodeBytes = []byte{}
},
expError: true,
},
"codeBytes nil": {
srcMutator: func(c *Code) {
c.CodeBytes = nil
},
expError: true,
},
"codeBytes greater limit": {
srcMutator: func(c *Code) {
c.CodeBytes = bytes.Repeat([]byte{0x1}, MaxWasmSize+1)
},
expError: true,
},
}
for msg, spec := range specs {
t.Run(msg, func(t *testing.T) {
state := CodeFixture(spec.srcMutator)
got := state.ValidateBasic()
if spec.expError {
require.Error(t, got)
return
}
require.NoError(t, got)
})
}
}
func TestContractValidateBasic(t *testing.T) {
specs := map[string]struct {
srcMutator func(*Contract)
expError bool
}{
"all good": {srcMutator: func(_ *Contract) {}},
"contract address invalid": {
srcMutator: func(c *Contract) {
c.ContractAddress = "invalid"
},
expError: true,
},
"contract info invalid": {
srcMutator: func(c *Contract) {
c.ContractInfo.Creator = "invalid"
},
expError: true,
},
"contract with created set": {
srcMutator: func(c *Contract) {
c.ContractInfo.Created = &AbsoluteTxPosition{}
},
expError: true,
},
"contract state invalid": {
srcMutator: func(c *Contract) {
c.ContractState = append(c.ContractState, Model{})
},
expError: true,
},
}
for msg, spec := range specs {
t.Run(msg, func(t *testing.T) {
state := ContractFixture(spec.srcMutator)
got := state.ValidateBasic()
if spec.expError {
require.Error(t, got)
return
}
require.NoError(t, got)
})
}
}
| TestValidateGenesisState |
comparison-store.ts | /**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Commit, ActionTree, Dispatch } from 'vuex';
import axios, { AxiosResponse } from 'axios';
import graph from '@/graph';
import { cancelToken } from '@/utils/cancelToken';
import * as types from '../../mutation-types';
import { DurationTime } from '@/types/global';
import { queryChartData } from '@/utils/queryChartData';
import fragmentAll from '@/graph/query/comparison';
import { ICurrentOptions, DataSourceType, ISelectConfig, MetricsType } from '@/types/comparison';
import {
ComparisonOption, InitSource, LinearType, ComparisonType,
ObjectType, ServiceType, ChangeType, StatusType, PercentileItem,
} from './comparison-const';
type GenericIdentityFn<T> = (arg: T) => T;
function | <T>(arg: T): T {
return arg;
}
export interface State {
currentOptions: ICurrentOptions;
dataSource: DataSourceType;
chartSource: GenericIdentityFn<any>;
isPrevious: StatusType;
metricSource: MetricsType;
}
interface ActionsParamType {
duration: DurationTime;
}
const initState: State = {
currentOptions: ComparisonOption,
dataSource: InitSource,
chartSource: identity,
isPrevious: StatusType.Init,
metricSource: {} as MetricsType,
};
// getters
const getters = {
queryPreValue(state: State) {
const { preMetrics } = state.currentOptions;
const fragments = [];
let variable = null;
for (const metric of preMetrics) {
const preMetric = metric.key;
const preParam = (fragmentAll as any)[preMetric];
if (preParam) {
variable = preParam.variable;
fragments.push(preParam.fragment);
}
}
return `query queryData(${variable}) {${fragments.join(',')}}`;
},
queryNextValue(state: State) {
const { nextMetrics } = state.currentOptions;
const fragments = [];
let variable = null;
for (const metric of nextMetrics) {
const nextParam = (fragmentAll as any)[metric.key];
if (nextParam) {
variable = nextParam.variable;
fragments.push(nextParam.fragment);
}
}
return `query queryData(${variable}) {${fragments.join(',')}}`;
},
preConfig(state: State) {
const { currentOptions } = state;
const variablesData = {
serviceId: currentOptions.preService.key,
} as any;
const { key } = currentOptions.preType;
if (key === ObjectType.ServiceEndpoint) {
variablesData.endpointId = currentOptions.preObject.key;
variablesData.endpointName = currentOptions.preObject.label;
} else if (key === ObjectType.ServiceInstance) {
variablesData.instanceId = currentOptions.preObject.key;
} else if (key === ObjectType.Database) {
delete variablesData.serviceId;
variablesData.databaseId = currentOptions.preObject.key;
} else if (key === ObjectType.ServiceDependency) {
delete variablesData.serviceId;
variablesData.id = currentOptions.preObject.key;
}
return variablesData;
},
nextConfig(state: State) {
const { currentOptions } = state;
const { nextType, nextService, nextObject } = currentOptions;
let variablesData = {serviceId: nextService.key} as any;
if (nextType.key === ObjectType.ServiceEndpoint) {
variablesData = {
...variablesData,
endpointId: nextObject.key,
endpointName: nextObject.label,
};
} else if (nextType.key === ObjectType.ServiceInstance) {
variablesData = {
...variablesData,
instanceId: nextObject.key,
};
} else if (nextType.key === ObjectType.Database) {
delete variablesData.serviceId;
variablesData.databaseId = nextObject.key;
} else if (nextType.key === ObjectType.ServiceDependency) {
delete variablesData.serviceId;
variablesData.id = nextObject.key;
}
return variablesData;
},
ChangeType() {
return {
PreService: ChangeType.PreService,
PreType: ChangeType.PreType,
PreObject: ChangeType.PreObject,
PreMetrics: ChangeType.PreMetrics,
NextService: ChangeType.NextService,
NextType: ChangeType.NextType,
NextObject: ChangeType.NextObject,
NextMetrics: ChangeType.NextMetrics,
};
},
AllMetrics() {
const { service, database } = queryChartData;
const MetricsObj = {
Service: [],
ServiceEndpoint: [],
ServiceInstance: [],
Database: [],
ServiceDependency: [],
} as MetricsType;
for (const item of service) {
if (!LinearType.includes(item.c)) {
continue;
}
if (item.o === ObjectType.Service) {
MetricsObj.Service.push({
label: item.t,
key: item.d,
});
} else if (item.o === ObjectType.ServiceInstance) {
MetricsObj.ServiceInstance.push({
label: item.t,
key: item.d,
});
} else if (item.o === ObjectType.ServiceEndpoint) {
MetricsObj.ServiceEndpoint.push({
label: item.t,
key: item.d,
});
} else if (item.o === ObjectType.ServiceDependency) {
MetricsObj.ServiceDependency.push({
label: item.t,
key: item.d,
});
}
}
for (const data of database) {
if (!LinearType.includes(data.c)) {
continue;
}
if (data.o === ObjectType.Database) {
MetricsObj.Database.push({
label: data.t,
key: data.d,
});
}
}
return MetricsObj;
},
};
// mutations
const mutations = {
[types.SET_ISPREVIOUS](state: State, data: StatusType) {
state.isPrevious = data;
},
[types.SET_METRICSOURCE](state: State, source: MetricsType) {
state.metricSource = source;
},
[types.SET_SERVICES](state: State, data: {services: any[]}) {
const { services } = data;
if (!services.length) {
return;
}
state.dataSource.preServiceSource = services;
state.dataSource.nextServiceSource = services;
state.currentOptions.preService = services[0];
state.currentOptions.nextService = services[0];
},
[types.SET_CONFIG](state: State, data: any[]) {
if (!data.length) {
data = [{
key: '',
label: '',
}];
}
const { isPrevious, currentOptions, metricSource } = state as any;
const type = isPrevious === StatusType.Pre ? currentOptions.preType.key : currentOptions.nextType.key;
if (isPrevious === StatusType.Pre) {
state.dataSource.preObjectSource = data;
state.currentOptions.preObject = data[0];
state.dataSource.preMetricsSource = metricSource[type];
state.currentOptions.preMetrics = [metricSource[type][0]];
} else if (isPrevious === StatusType.Next) {
state.dataSource.nextObjectSource = data;
state.currentOptions.nextObject = data[0];
state.dataSource.nextMetricsSource = metricSource[type];
state.currentOptions.nextMetrics = [metricSource[type][1]];
} else {
state.currentOptions = {
...state.currentOptions,
nextObject: data[0],
preObject: data[0],
preMetrics: [metricSource[type][0]],
nextMetrics: [metricSource[type][1]],
preType: ComparisonType[2],
nextType: ComparisonType[2],
};
state.dataSource = {
...state.dataSource,
nextObjectSource: data,
preObjectSource: data,
preMetricsSource: metricSource[type],
nextMetricsSource: metricSource[type],
};
}
},
[types.SET_CHARTVAL](state: State, data: {value: any, type: string}) {
const { preObject, preService, preType } = state.currentOptions;
const { nextObject, nextService, nextType } = state.currentOptions;
const obj = {} as any;
for (const key of Object.keys(data.value)) {
let value = [] as any;
if (Array.isArray(data.value[key].values)) {
value = data.value[key].values.map((d: {value: number}) => d.value);
} else {
value = {};
PercentileItem.forEach((item, index) => {
value[item] = data.value[key][index].values.map((d: {value: number}) => d.value);
});
}
obj[key] = value;
}
for (const key of Object.keys(obj)) {
if (data.type === ServiceType.PREVIOUS) {
const str = `${preService.label}_`;
const strKeyPre = `${preType.key === ObjectType.Database ?
'' : str}${preType.key === ObjectType.Service ? '' : preObject.label}_${key}`;
obj[strKeyPre] = obj[key];
delete obj[key];
} else {
const str = `${nextObject.label}`;
const servicesLabel = `${nextService.label}_`;
const strKeyNext = `${nextType.key === ObjectType.Database ?
'' : servicesLabel}${nextType.key === ObjectType.Service ? '' : str}_${key}`;
obj[strKeyNext] = obj[key];
delete obj[key];
}
}
state.chartSource = {
...obj,
...state.chartSource,
};
},
[types.UPDATE_CONFIG](state: any, data: ISelectConfig) {
const {type, option} = data;
const { currentOptions, isPrevious } = state;
const { nextType, preType } = currentOptions;
if (type === ChangeType.NextMetrics || type === ChangeType.PreMetrics) {
const metrics = currentOptions[type];
const item = metrics.findIndex((d: any) => d.key === option.key);
if (item > -1) {
state.currentOptions[type] = metrics.filter((d: any) => d.key !== option.key);
} else {
state.currentOptions[type].push(option);
}
} else {
state.currentOptions[type] = option;
}
},
[types.CLEAR_CHART_VAL](state: State) {
state.chartSource = {} as any;
},
[types.SELECT_TYPE_SERVICES](state: State) {
const { preType, nextType } = state.currentOptions;
const { isPrevious, metricSource } = state as any;
if (isPrevious === StatusType.Pre) {
state.dataSource.preMetricsSource = metricSource[preType.key] || [];
state.currentOptions.preMetrics = [metricSource[preType.key][0]];
} else {
state.dataSource.nextMetricsSource = metricSource[nextType.key] || [];
state.currentOptions.nextMetrics = [metricSource[nextType.key][0]];
}
},
[types.SELECT_INSTANCE_DATABASE](state: State, data: any) {
const { preType, nextType } = state.currentOptions;
const { metricSource, isPrevious } = state as any;
if (isPrevious === StatusType.Next) {
state.dataSource.nextMetricsSource = metricSource[nextType.key];
state.currentOptions.nextMetrics = [metricSource[nextType.key][0]];
state.currentOptions.nextObject = data[0];
state.dataSource.nextObjectSource = data;
} else if (isPrevious === StatusType.Pre) {
state.dataSource.preMetricsSource = metricSource[preType.key];
state.currentOptions.preMetrics = [metricSource[preType.key][0]];
state.currentOptions.preObject = data[0];
state.dataSource.preObjectSource = data;
}
},
[types.SET_SERVICE_TOPOLOGY](state: State, data: any) {
const { calls, nodes } = data;
const { metricSource } = state as any;
const { preType, nextType } = state.currentOptions;
for (const call of calls) {
for (const node of nodes) {
if (node.id === call.source) {
call.sourceLabel = node.name;
}
if (node.id === call.target) {
call.targetLabel = node.name;
}
}
}
const objectSource = calls.map((call: any) => {
return {
key: call.id,
label: `${call.sourceLabel}-${call.targetLabel}`,
};
});
if (state.isPrevious === StatusType.Next) {
state.dataSource.nextMetricsSource = metricSource[nextType.key];
state.currentOptions.nextMetrics = [metricSource[nextType.key][0]];
state.currentOptions.nextObject = objectSource[0];
state.dataSource.nextObjectSource = objectSource;
} else {
state.dataSource.preMetricsSource = metricSource[preType.key];
state.currentOptions.preMetrics = [metricSource[preType.key][0]];
state.currentOptions.preObject = objectSource[0];
state.dataSource.preObjectSource = objectSource;
}
},
};
// actions
const actions: ActionTree<State, ActionsParamType> = {
GET_SERVICES(context: {commit: Commit, dispatch: Dispatch, getters: any, state: State}, params: {
duration: string;
}) {
if (context.state.isPrevious !== StatusType.Init) {
return;
}
context.commit(types.SET_METRICSOURCE, context.getters.AllMetrics);
context.commit(types.SET_ISPREVIOUS, StatusType.Init);
return graph.query('queryServices').params(params)
.then((res: AxiosResponse) => {
if (!res.data.data) {
return;
}
context.commit(types.SET_SERVICES, {services: res.data.data.services});
}).then(() => {
context.dispatch('GET_SERVICE_ENDPOINTS', params.duration);
});
},
GET_SERVICE_ENDPOINTS(context: { commit: Commit, state: State, dispatch: Dispatch }, date: string) {
if (!context.state.currentOptions.preService) {
return new Promise((resolve) => resolve());
}
const { isPrevious, currentOptions } = context.state;
const servicesId = isPrevious === StatusType.Pre ? currentOptions.preService.key : currentOptions.nextService.key;
graph
.query('queryEndpoints')
.params({serviceId: servicesId, keyword: ''})
.then((res: AxiosResponse) => {
if (!res.data.data) {
return;
}
context.commit(types.SET_CONFIG, res.data.data.getEndpoints);
return res.data.data;
}).then((data) => {
if (!data.getEndpoints) {
return;
}
if (isPrevious === StatusType.Init) {
context.dispatch('RENDER_CHART', date);
}
});
},
GET_SERVICE_INSTANCES(context: { commit: Commit, state: State }, params) {
const { isPrevious, currentOptions } = context.state;
params.serviceId = isPrevious === StatusType.Pre ? currentOptions.preService.key : currentOptions.nextService.key;
return graph
.query('queryInstances')
.params(params)
.then((res: AxiosResponse) => {
if (!res.data) {
return;
}
context.commit(types.SELECT_INSTANCE_DATABASE, res.data.data.getServiceInstances);
});
},
GET_DATABASES(context: { commit: Commit, state: State }, params: {duration: string}) {
return graph
.query('queryDatabases')
.params(params)
.then((res: AxiosResponse) => {
if (!res.data) {
return;
}
context.commit(types.SELECT_INSTANCE_DATABASE, res.data.data.services);
});
},
GET_SERVICE_TOPOLOGY(context: { commit: Commit, state: State }, params) {
const { isPrevious, currentOptions } = context.state;
params.serviceId = isPrevious === StatusType.Pre ? currentOptions.preService.key : currentOptions.nextService.key;
return graph
.query('queryServiceTopo')
.params(params)
.then((res: AxiosResponse) => {
if (!res.data.data) {
return;
}
context.commit(types.SET_SERVICE_TOPOLOGY, res.data.data.topo);
});
},
RENDER_CHART(context: {dispatch: Dispatch, commit: Commit}, date: string) {
context.commit(types.CLEAR_CHART_VAL);
context.dispatch('GET_COMPARISON', {duration: date, type: ServiceType.PREVIOUS});
context.dispatch('GET_COMPARISON', {duration: date, type: ServiceType.NEXT});
},
SELECT_CONFIG(context: {commit: Commit, state: State, dispatch: Dispatch}, params: any) {
const isPrevious = params.type.includes(StatusType.Next) ? StatusType.Next : StatusType.Pre;
context.commit(types.SET_ISPREVIOUS, isPrevious);
context.commit(types.UPDATE_CONFIG, params);
const { currentOptions } = context.state;
const objType = isPrevious === StatusType.Next ? currentOptions.nextType : currentOptions.preType;
const typeList = [ChangeType.PreService, ChangeType.NextService, ChangeType.PreType, ChangeType.NextType];
if (typeList.includes(params.type)) {
if (objType.key === ObjectType.Service) {
context.commit(types.SELECT_TYPE_SERVICES);
} else if (objType.key === ObjectType.ServiceInstance) {
context.dispatch('GET_SERVICE_INSTANCES', {
duration: params.duration,
});
} else if (objType.key === ObjectType.ServiceEndpoint) {
context.dispatch('GET_SERVICE_ENDPOINTS', params.duration);
} else if (objType.key === ObjectType.Database) {
context.dispatch('GET_DATABASES', {duration: params.duration});
} else if (objType.key === ObjectType.ServiceDependency) {
context.dispatch('GET_SERVICE_TOPOLOGY', {duration: params.duration});
}
}
},
GET_COMPARISON(
context: {commit: Commit, state: State, dispatch: Dispatch, getters: any}, param: {duration: string, type: string},
) {
let variablesData = {
duration: param.duration,
} as any;
let queryVal = '';
if (param.type === ServiceType.PREVIOUS) {
variablesData = {
...variablesData,
...context.getters.preConfig,
};
queryVal = context.getters.queryPreValue;
} else {
variablesData = {
...variablesData,
...context.getters.nextConfig,
};
queryVal = context.getters.queryNextValue;
}
return axios.post('/graphql', {
query: queryVal,
variables: variablesData,
}, {cancelToken: cancelToken()}).then((res: AxiosResponse<any>) => {
const data = res.data.data;
if (!data) {
return;
}
context.dispatch('FORMAT_VALUE', {value: data, type: param.type});
});
},
FORMAT_VALUE(context: {commit: Commit, state: State, dispatch: Dispatch}, params: {value: any, type: string}) {
if (!(params && params.value)) {
return;
}
if (params.value.endpointSLA) {
params.value.endpointSLA.values = params.value.endpointSLA.values.map((i: any) => {
return {value: i.value / 100};
});
}
if (params.value.databaseSLA) {
params.value.databaseSLA.values = params.value.databaseSLA.values.map((i: any) => {
return {value: i.value / 100};
});
}
if (params.value.serviceSLA) {
params.value.serviceSLA.values = params.value.serviceSLA.values.map((i: any) => {
return {value: i.value / 100};
});
}
if (params.value.instanceSLA) {
params.value.instanceSLA.values = params.value.instanceSLA.values.map((i: any) => {
return {value: i.value / 100};
});
}
if (params.value.serviceApdexScore) {
params.value.serviceApdexScore.values = params.value.serviceApdexScore.values.map((i: any) => {
return {value: (i.value / 10000).toFixed(2)};
});
}
if (params.value.heap && params.value.maxHeap) {
params.value.heap.values = params.value.heap.values.map((i: any) => {
return {value: (i.value / 1048576).toFixed(2)};
});
params.value.maxHeap.values = params.value.maxHeap.values.map((i: any, index: number) => {
const val = i.value > -1 ? ((i.value / 1048576) - params.value.heap.values[index].value).toFixed(2) : 0;
return {value: val};
});
if (Math.max.apply(Math, params.value.maxHeap.values) === -1) {
params.value.maxHeap.values = 'Max Heap Unlimited';
}
}
if (params.value.nonheap && params.value.maxNonHeap) {
params.value.nonheap.values = params.value.nonheap.values.map((i: any) => {
return {value : (i.value / 1048576).toFixed(2)};
});
params.value.maxNonHeap.values = params.value.maxNonHeap.values
.map((i: any, index: number) => {
const val = i.value > -1 ? ((i.value / 1048576) - params.value.nonheap.values[index].value).toFixed(2) : 0;
return {value: val};
});
if (Math.max.apply(Math, params.value.maxNonHeap.values) === -1) {
params.value.maxNonHeap.values = 'Max NonHeap Unlimited';
}
}
if (params.value.clrHeap) {
params.value.clrHeap.values =
params.value.clrHeap.values.map((i: any) => {
return { value: (i.value / 1048576 ).toFixed(2)};
});
}
context.commit(types.SET_CHARTVAL, params);
},
};
export default {
namespaced: true,
state: initState,
getters,
actions,
mutations,
};
| identity |
helheim_test.go | // This file was generated by github.com/nelsam/hel. Do not
// edit this code by hand unless you *really* know what you're
// doing. Expect any changes made manually to be overwritten
// the next time hel regenerates this file.
package conns_test
import (
"time"
"github.com/cloudfoundry/dropsonde/metricbatcher"
)
type mockReader struct {
ReadCalled chan bool
ReadInput struct {
Buf chan []byte
}
ReadOutput struct {
Len chan int
Err chan error
}
}
func newMockReader() *mockReader {
m := &mockReader{}
m.ReadCalled = make(chan bool, 100)
m.ReadInput.Buf = make(chan []byte, 100)
m.ReadOutput.Len = make(chan int, 100)
m.ReadOutput.Err = make(chan error, 100)
return m
}
func (m *mockReader) Read(buf []byte) (len int, err error) {
m.ReadCalled <- true
m.ReadInput.Buf <- buf
return <-m.ReadOutput.Len, <-m.ReadOutput.Err
}
type mockRanger struct {
DelayRangeCalled chan bool
DelayRangeOutput struct {
Min, Max chan time.Duration
}
}
func newMockRanger() *mockRanger {
m := &mockRanger{}
m.DelayRangeCalled = make(chan bool, 100)
m.DelayRangeOutput.Min = make(chan time.Duration, 100)
m.DelayRangeOutput.Max = make(chan time.Duration, 100)
return m
}
func (m *mockRanger) DelayRange() (min, max time.Duration) {
m.DelayRangeCalled <- true
return <-m.DelayRangeOutput.Min, <-m.DelayRangeOutput.Max
}
type mockMetricBatcher struct {
BatchCounterCalled chan bool
BatchCounterInput struct {
Name chan string
}
BatchCounterOutput struct {
Ret0 chan metricbatcher.BatchCounterChainer
}
}
func | () *mockMetricBatcher {
m := &mockMetricBatcher{}
m.BatchCounterCalled = make(chan bool, 100)
m.BatchCounterInput.Name = make(chan string, 100)
m.BatchCounterOutput.Ret0 = make(chan metricbatcher.BatchCounterChainer, 100)
return m
}
func (m *mockMetricBatcher) BatchCounter(name string) metricbatcher.BatchCounterChainer {
m.BatchCounterCalled <- true
m.BatchCounterInput.Name <- name
return <-m.BatchCounterOutput.Ret0
}
type mockBatchCounterChainer struct {
SetTagCalled chan bool
SetTagInput struct {
Key, Value chan string
}
SetTagOutput struct {
Ret0 chan metricbatcher.BatchCounterChainer
}
IncrementCalled chan bool
AddCalled chan bool
AddInput struct {
Value chan uint64
}
}
func newMockBatchCounterChainer() *mockBatchCounterChainer {
m := &mockBatchCounterChainer{}
m.SetTagCalled = make(chan bool, 100)
m.SetTagInput.Key = make(chan string, 100)
m.SetTagInput.Value = make(chan string, 100)
m.SetTagOutput.Ret0 = make(chan metricbatcher.BatchCounterChainer, 100)
m.IncrementCalled = make(chan bool, 100)
m.AddCalled = make(chan bool, 100)
m.AddInput.Value = make(chan uint64, 100)
return m
}
func (m *mockBatchCounterChainer) SetTag(key, value string) metricbatcher.BatchCounterChainer {
m.SetTagCalled <- true
m.SetTagInput.Key <- key
m.SetTagInput.Value <- value
return <-m.SetTagOutput.Ret0
}
func (m *mockBatchCounterChainer) Increment() {
m.IncrementCalled <- true
}
func (m *mockBatchCounterChainer) Add(value uint64) {
m.AddCalled <- true
m.AddInput.Value <- value
}
| newMockMetricBatcher |
applications.py | from corehq.apps.app_manager.dbaccessors import (
get_brief_apps_in_domain,
get_latest_released_app,
get_latest_released_app_versions_by_app_id,
)
from corehq.apps.linked_domain.models import DomainLink
from corehq.apps.linked_domain.remote_accessors import (
get_brief_apps,
get_latest_released_versions_by_app_id,
get_released_app,
)
def get_master_app_briefs(domain_link, family_id):
if domain_link.is_remote:
apps = get_brief_apps(domain_link)
else:
apps = get_brief_apps_in_domain(domain_link.master_domain, include_remote=False)
# Ignore deleted, linked and remote apps
return [app for app in apps if family_id in [app._id, app.family_id] and app.doc_type == 'Application']
def get_latest_master_app_release(domain_link, app_id):
master_domain = domain_link.master_domain
linked_domain = domain_link.linked_domain
if domain_link.is_remote:
return get_released_app(master_domain, app_id, linked_domain, domain_link.remote_details)
else:
return get_latest_released_app(master_domain, app_id)
def get_latest_master_releases_versions(domain_link):
if domain_link.is_remote:
return get_latest_released_versions_by_app_id(domain_link)
else:
return get_latest_released_app_versions_by_app_id(domain_link.master_domain)
def create_linked_app(master_domain, master_id, target_domain, target_name, remote_details=None):
|
def link_app(linked_app, master_domain, master_id, remote_details=None):
DomainLink.link_domains(linked_app.domain, master_domain, remote_details)
linked_app.family_id = master_id
linked_app.doc_type = 'LinkedApplication'
linked_app.save()
return linked_app
| from corehq.apps.app_manager.models import LinkedApplication
linked_app = LinkedApplication(
name=target_name,
domain=target_domain,
)
return link_app(linked_app, master_domain, master_id, remote_details) |
structural.py | from typing import Tuple, Optional, List, Union
import torch
from torch.nn import *
import math
def gmm(x: torch.Tensor, w: torch.Tensor) -> torch.Tensor:
return torch.einsum('ndo,bnd->bno', w, x)
class GraphLinear(Module):
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.in_features = in_features
self.out_features = out_features
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
#stdv = 1. / math.sqrt(self.weight.size(1))
#self.weight.data.uniform_(-stdv, stdv)
#if self.learn_influence:
# self.G.data.uniform_(-stdv, stdv)
if len(self.weight.shape) == 3:
self.weight.data[1:] = self.weight.data[0]
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: torch.Tensor, g: Optional[torch.Tensor] = None) -> torch.Tensor:
if g is None and self.learn_influence:
g = torch.nn.functional.normalize(self.G, p=1., dim=1)
#g = torch.softmax(self.G, dim=1)
elif g is None:
g = self.G
w = self.weight[self.node_type_index]
output = self.mm(input, w.transpose(-2, -1))
if self.bias is not None:
bias = self.bias[self.node_type_index]
output += bias
output = g.matmul(output)
return output
class DynamicGraphLinear(GraphLinear):
def __init__(self, num_node_types: int = 1, *args):
super().__init__(*args)
def forward(self, input: torch.Tensor, g: torch.Tensor = None, t: torch.Tensor = None) -> torch.Tensor:
assert g is not None or t is not None, "Either Graph Influence Matrix or Node Type Vector is needed"
if g is None:
g = self.G[t][:, t]
return super().forward(input, g)
class StaticGraphLinear(GraphLinear):
def __init__(self, *args, bias: bool = True, num_nodes: int = None, graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, node_types: torch.Tensor = None, weights_per_type: bool = False):
"""
:param in_features: Size of each input sample
:param out_features: Size of each output sample
:param num_nodes: Number of nodes.
:param graph_influence: Graph Influence Matrix
:param learn_influence: If set to ``False``, the layer will not learn an the Graph Influence Matrix.
:param node_types: List of Type for each node. All nodes of same type will share weights.
Default: All nodes have unique types.
:param weights_per_type: If set to ``False``, the layer will not learn weights for each node type.
:param bias: If set to ``False``, the layer will not learn an additive bias.
"""
super().__init__(*args)
self.learn_influence = learn_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight = Parameter(torch.Tensor(num_node_types, self.out_features, self.in_features))
self.mm = gmm
self.node_type_index = node_types
else:
self.weight = Parameter(torch.Tensor(self.out_features, self.in_features))
self.mm = torch.matmul
self.node_type_index = None
if bias:
if node_types is not None:
self.bias = Parameter(torch.Tensor(num_node_types, self.out_features))
else:
self.bias = Parameter(torch.Tensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
GraphLSTMState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]
class BN(Module):
def __init__(self, num_nodes, num_features):
super().__init__()
self.num_nodes = num_nodes
self.num_features = num_features
self.bn = BatchNorm1d(num_nodes * num_features)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.bn(x.view(-1, self.num_nodes * self.num_features)).view(-1, self.num_nodes, self.num_features)
class LinearX(Module):
def __init__(self):
super().__init__()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input
class StaticGraphLSTMCell_(Module):
def __init__(self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
"""
:param input_size: The number of expected features in the input `x`
:param hidden_size: The number of features in the hidden state `h`
:param num_nodes:
:param dropout:
:param recurrent_dropout:
:param graph_influence:
:param learn_influence:
:param additive_graph_influence:
:param learn_additive_graph_influence:
:param node_types:
:param weights_per_type:
:param bias:
"""
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(4 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 4 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(4 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(4 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:
weight.data[1:] = weight.data[0]
def forward(self, input: torch.Tensor, state: GraphLSTMState, t: int = 0) -> Tuple[torch.Tensor, GraphLSTMState]:
hx, cx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if cx is None:
cx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
#gx = torch.softmax(self.G, dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(cx)
gates = (self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) +
self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh)
gates = torch.matmul(gx, gates)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 2)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = c_mask * ((forgetgate * cx) + (ingate * cellgate)) + (1 - c_mask) * cx
hy = outgate * torch.tanh(cy)
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
#gx = torch.softmax(gx, dim=1)
return hy, (hy, cy, gx)
class StaticGraphLSTM_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphLSTMCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphLSTMCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphLSTMState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphLSTMState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n, n)] * len(self.layers)
output_states: List[GraphLSTMState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphLSTM(*args, **kwargs):
return torch.jit.script(StaticGraphLSTM_(*args, **kwargs))
GraphGRUState = Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]
class StaticGraphGRUCell_(Module):
def | (self, input_size: int, hidden_size: int, num_nodes: int = None, dropout: float = 0.,
recurrent_dropout: float = 0., graph_influence: Union[torch.Tensor, Parameter] = None,
learn_influence: bool = False, additive_graph_influence: Union[torch.Tensor, Parameter] = None,
learn_additive_graph_influence: bool = False, node_types: torch.Tensor = None,
weights_per_type: bool = False, clockwork: bool = False, bias: bool = True):
"""
:param input_size: The number of expected features in the input `x`
:param hidden_size: The number of features in the hidden state `h`
:param num_nodes:
:param dropout:
:param recurrent_dropout:
:param graph_influence:
:param learn_influence:
:param additive_graph_influence:
:param learn_additive_graph_influence:
:param node_types:
:param weights_per_type:
:param bias:
"""
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.learn_influence = learn_influence
self.learn_additive_graph_influence = learn_additive_graph_influence
if graph_influence is not None:
assert num_nodes == graph_influence.shape[0] or num_nodes is None, 'Number of Nodes or Graph Influence Matrix has to be given.'
num_nodes = graph_influence.shape[0]
if type(graph_influence) is Parameter:
assert learn_influence, "Graph Influence Matrix is a Parameter, therefore it must be learnable."
self.G = graph_influence
elif learn_influence:
self.G = Parameter(graph_influence)
else:
self.register_buffer('G', graph_influence)
else:
assert num_nodes, 'Number of Nodes or Graph Influence Matrix has to be given.'
eye_influence = torch.eye(num_nodes, num_nodes)
if learn_influence:
self.G = Parameter(eye_influence)
else:
self.register_buffer('G', eye_influence)
if additive_graph_influence is not None:
if type(additive_graph_influence) is Parameter:
self.G_add = additive_graph_influence
elif learn_additive_graph_influence:
self.G_add = Parameter(additive_graph_influence)
else:
self.register_buffer('G_add', additive_graph_influence)
else:
if learn_additive_graph_influence:
self.G_add = Parameter(torch.zeros_like(self.G))
else:
self.G_add = 0.
if weights_per_type and node_types is None:
node_types = torch.tensor([i for i in range(num_nodes)])
if node_types is not None:
num_node_types = node_types.max() + 1
self.weight_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size, hidden_size))
self.mm = gmm
self.register_buffer('node_type_index', node_types)
else:
self.weight_ih = Parameter(torch.Tensor(3 * hidden_size, input_size))
self.weight_hh = Parameter(torch.Tensor(3 * hidden_size, hidden_size))
self.mm = torch.matmul
self.register_buffer('node_type_index', None)
if bias:
if node_types is not None:
self.bias_ih = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(num_node_types, 3 * hidden_size))
else:
self.bias_ih = Parameter(torch.Tensor(3 * hidden_size))
self.bias_hh = Parameter(torch.Tensor(3 * hidden_size))
else:
self.bias_ih = None
self.bias_hh = None
self.clockwork = clockwork
if clockwork:
phase = torch.arange(0., hidden_size)
phase = phase - phase.min()
phase = (phase / phase.max()) * 8.
phase += 1.
phase = torch.floor(phase)
self.register_buffer('phase', phase)
else:
phase = torch.ones(hidden_size)
self.register_buffer('phase', phase)
self.dropout = Dropout(dropout)
self.r_dropout = Dropout(recurrent_dropout)
self.num_nodes = num_nodes
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if weight is self.G:
continue
if weight is self.G_add:
continue
weight.data.uniform_(-stdv, stdv)
#if weight is self.weight_hh or weight is self.weight_ih and len(self.weight_ih.shape) == 3:
# weight.data[1:] = weight.data[0]
def forward(self, input: torch.Tensor, state: GraphGRUState, t: int = 0) -> Tuple[torch.Tensor, GraphGRUState]:
hx, gx = state
if hx is None:
hx = torch.zeros(input.shape[0], self.num_nodes, self.hidden_size, dtype=input.dtype, device=input.device)
if gx is None and self.learn_influence:
gx = torch.nn.functional.normalize(self.G, p=1., dim=1)
#gx = torch.softmax(self.G, dim=1)
elif gx is None:
gx = self.G
hx = self.r_dropout(hx)
weight_ih = self.weight_ih[self.node_type_index]
weight_hh = self.weight_hh[self.node_type_index]
if self.bias_hh is not None:
bias_hh = self.bias_hh[self.node_type_index]
else:
bias_hh = 0.
if self.bias_ih is not None:
bias_ih = self.bias_ih[self.node_type_index]
else:
bias_ih = 0.
c_mask = (torch.remainder(torch.tensor(t + 1., device=input.device), self.phase) < 0.01).type_as(hx)
x_results = self.dropout(self.mm(input, weight_ih.transpose(-2, -1))) + bias_ih
h_results = self.mm(hx, weight_hh.transpose(-2, -1)) + bias_hh
x_results = torch.matmul(gx, x_results)
h_results = torch.matmul(gx, h_results)
i_r, i_z, i_n = x_results.chunk(3, 2)
h_r, h_z, h_n = h_results.chunk(3, 2)
r = torch.sigmoid(i_r + h_r)
z = torch.sigmoid(i_z + h_z)
n = torch.tanh(i_n + r * h_n)
hy = n - torch.mul(n, z) + torch.mul(z, hx)
hy = c_mask * hy + (1 - c_mask) * hx
gx = gx + self.G_add
if self.learn_influence or self.learn_additive_graph_influence:
gx = torch.nn.functional.normalize(gx, p=1., dim=1)
#gx = torch.softmax(gx, dim=1)
return hy, (hy, gx)
class StaticGraphGRU_(Module):
def __init__(self, input_size: int, hidden_size: int, num_layers: int = 1, layer_dropout: float = 0.0, **kwargs):
super().__init__()
self.layers = ModuleList([StaticGraphGRUCell_(input_size, hidden_size, **kwargs)]
+ [StaticGraphGRUCell_(hidden_size, hidden_size, **kwargs) for _ in range(num_layers - 1)])
self.dropout = Dropout(layer_dropout)
def forward(self, input: torch.Tensor, states: Optional[List[GraphGRUState]] = None, t_i: int = 0) -> Tuple[torch.Tensor, List[GraphGRUState]]:
if states is None:
n: Optional[torch.Tensor] = None
states = [(n, n)] * len(self.layers)
output_states: List[GraphGRUState] = []
output = input
i = 0
for rnn_layer in self.layers:
state = states[i]
inputs = output.unbind(1)
outputs: List[torch.Tensor] = []
for t, input in enumerate(inputs):
out, state = rnn_layer(input, state, t_i+t)
outputs += [out]
output = torch.stack(outputs, dim=1)
output = self.dropout(output)
output_states += [state]
i += 1
return output, output_states
def StaticGraphGRU(*args, **kwargs):
return torch.jit.script(StaticGraphGRU_(*args, **kwargs))
| __init__ |
rf_numpy.py | import numpy as np
from .. import pad
from .base import Refocus
class RefocusNumpy(Refocus):
"""Refocusing with numpy-based Fourier transform
.. versionadded:: 0.3.0
"""
def _init_fft(self, field, padding):
|
def propagate(self, distance):
fft_kernel = self.get_kernel(distance=distance)
refoc = np.fft.ifft2(self.fft_origin * fft_kernel)
if self.padding:
refoc = pad.pad_rem(refoc)
return refoc
| """Perform initial Fourier transform of the input field
Parameters
----------
field: 2d complex-valued ndarray
Input field to be refocused
padding: bool
Whether or not to perform zero-padding
Returns
-------
fft_field0: 2d complex-valued ndarray
Fourier transform the the initial field
"""
if padding:
field = pad.pad_add(field)
return np.fft.fft2(field) |
util.py | from Crypto.Cipher import Blowfish
def encode_dataset_user(trans, dataset, user):
# encode dataset id as usual
# encode user id using the dataset create time as the key
dataset_hash = trans.security.encode_id(dataset.id)
if user is None:
user_hash = 'None'
else:
user_hash = str(user.id)
# Pad to a multiple of 8 with leading "!"
user_hash = ("!" * (8 - len(user_hash) % 8)) + user_hash
cipher = Blowfish.new(str(dataset.create_time))
user_hash = cipher.encrypt(user_hash).encode('hex') |
def decode_dataset_user(trans, dataset_hash, user_hash):
# decode dataset id as usual
# decode user id using the dataset create time as the key
dataset_id = trans.security.decode_id(dataset_hash)
dataset = trans.sa_session.query(trans.app.model.HistoryDatasetAssociation).get(dataset_id)
assert dataset, "Bad Dataset id provided to decode_dataset_user"
if user_hash in [None, 'None']:
user = None
else:
cipher = Blowfish.new(str(dataset.create_time))
user_id = cipher.decrypt(user_hash.decode('hex')).lstrip("!")
user = trans.sa_session.query(trans.app.model.User).get(int(user_id))
assert user, "A Bad user id was passed to decode_dataset_user"
return dataset, user | return dataset_hash, user_hash |
config_reader.go | package datasources
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/maksimmernikov/grafana/pkg/log"
"gopkg.in/yaml.v2"
)
type configReader struct {
log log.Logger
}
func (cr *configReader) readConfig(path string) ([]*DatasourcesAsConfig, error) {
var datasources []*DatasourcesAsConfig
files, err := ioutil.ReadDir(path)
if err != nil {
cr.log.Error("can't read datasource provisioning files from directory", "path", path, "error", err)
return datasources, nil
}
for _, file := range files {
if strings.HasSuffix(file.Name(), ".yaml") || strings.HasSuffix(file.Name(), ".yml") {
datasource, err := cr.parseDatasourceConfig(path, file)
if err != nil {
return nil, err
}
if datasource != nil {
datasources = append(datasources, datasource)
}
}
}
err = validateDefaultUniqueness(datasources)
if err != nil {
return nil, err
}
return datasources, nil
}
func (cr *configReader) parseDatasourceConfig(path string, file os.FileInfo) (*DatasourcesAsConfig, error) {
filename, _ := filepath.Abs(filepath.Join(path, file.Name()))
yamlFile, err := ioutil.ReadFile(filename)
if err != nil { | err = yaml.Unmarshal(yamlFile, &apiVersion)
if err != nil {
return nil, err
}
if apiVersion == nil {
apiVersion = &ConfigVersion{ApiVersion: 0}
}
if apiVersion.ApiVersion > 0 {
v1 := &DatasourcesAsConfigV1{log: cr.log}
err = yaml.Unmarshal(yamlFile, v1)
if err != nil {
return nil, err
}
return v1.mapToDatasourceFromConfig(apiVersion.ApiVersion), nil
}
var v0 *DatasourcesAsConfigV0
err = yaml.Unmarshal(yamlFile, &v0)
if err != nil {
return nil, err
}
cr.log.Warn("[Deprecated] the datasource provisioning config is outdated. please upgrade", "filename", filename)
return v0.mapToDatasourceFromConfig(apiVersion.ApiVersion), nil
}
func validateDefaultUniqueness(datasources []*DatasourcesAsConfig) error {
defaultCount := map[int64]int{}
for i := range datasources {
if datasources[i].Datasources == nil {
continue
}
for _, ds := range datasources[i].Datasources {
if ds.OrgId == 0 {
ds.OrgId = 1
}
if ds.IsDefault {
defaultCount[ds.OrgId] = defaultCount[ds.OrgId] + 1
if defaultCount[ds.OrgId] > 1 {
return ErrInvalidConfigToManyDefault
}
}
}
for _, ds := range datasources[i].DeleteDatasources {
if ds.OrgId == 0 {
ds.OrgId = 1
}
}
}
return nil
} | return nil, err
}
var apiVersion *ConfigVersion |
ThriftHadoopFileSystem.py | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def setInactivityTimeoutPeriod(self, periodInSeconds):
"""
Parameters:
- periodInSeconds
"""
pass
def shutdown(self, status):
"""
Parameters:
- status
"""
pass
def create(self, path):
"""
Parameters:
- path
"""
pass
def createFile(self, path, mode, overwrite, bufferSize, block_replication, blocksize):
"""
Parameters:
- path
- mode
- overwrite
- bufferSize
- block_replication
- blocksize
"""
pass
def open(self, path):
"""
Parameters:
- path
"""
pass
def append(self, path):
"""
Parameters:
- path
"""
pass
def write(self, handle, data):
"""
Parameters:
- handle
- data
"""
pass
def read(self, handle, offset, size):
"""
Parameters:
- handle
- offset
- size
"""
pass
def close(self, out):
"""
Parameters:
- out
"""
pass
def rm(self, path, recursive):
"""
Parameters:
- path
- recursive
"""
pass
def rename(self, path, dest):
"""
Parameters:
- path
- dest
"""
pass
def mkdirs(self, path):
"""
Parameters:
- path
"""
pass
def exists(self, path):
"""
Parameters:
- path
"""
pass
def stat(self, path):
"""
Parameters:
- path
"""
pass
def listStatus(self, path):
"""
Parameters:
- path
"""
pass
def chmod(self, path, mode):
"""
Parameters:
- path
- mode
"""
pass
def chown(self, path, owner, group):
"""
Parameters:
- path
- owner
- group
"""
pass
def setReplication(self, path, replication):
"""
Parameters:
- path
- replication
"""
pass
def getFileBlockLocations(self, path, start, length):
"""
Parameters:
- path
- start
- length
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def setInactivityTimeoutPeriod(self, periodInSeconds):
"""
Parameters:
- periodInSeconds
"""
self.send_setInactivityTimeoutPeriod(periodInSeconds)
self.recv_setInactivityTimeoutPeriod()
def send_setInactivityTimeoutPeriod(self, periodInSeconds):
self._oprot.writeMessageBegin('setInactivityTimeoutPeriod', TMessageType.CALL, self._seqid)
args = setInactivityTimeoutPeriod_args()
args.periodInSeconds = periodInSeconds
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setInactivityTimeoutPeriod(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setInactivityTimeoutPeriod_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def shutdown(self, status):
"""
Parameters:
- status
"""
self.send_shutdown(status)
self.recv_shutdown()
def send_shutdown(self, status):
self._oprot.writeMessageBegin('shutdown', TMessageType.CALL, self._seqid)
args = shutdown_args()
args.status = status
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_shutdown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = shutdown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def create(self, path):
"""
Parameters:
- path
"""
self.send_create(path)
return self.recv_create()
def send_create(self, path):
self._oprot.writeMessageBegin('create', TMessageType.CALL, self._seqid)
args = create_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_create(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = create_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "create failed: unknown result");
def createFile(self, path, mode, overwrite, bufferSize, block_replication, blocksize):
"""
Parameters:
- path
- mode
- overwrite
- bufferSize
- block_replication
- blocksize
"""
self.send_createFile(path, mode, overwrite, bufferSize, block_replication, blocksize)
return self.recv_createFile()
def send_createFile(self, path, mode, overwrite, bufferSize, block_replication, blocksize):
self._oprot.writeMessageBegin('createFile', TMessageType.CALL, self._seqid)
args = createFile_args()
args.path = path
args.mode = mode
args.overwrite = overwrite
args.bufferSize = bufferSize
args.block_replication = block_replication
args.blocksize = blocksize
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_createFile(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = createFile_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "createFile failed: unknown result");
def open(self, path):
"""
Parameters:
- path
"""
self.send_open(path)
return self.recv_open()
def | (self, path):
self._oprot.writeMessageBegin('open', TMessageType.CALL, self._seqid)
args = open_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_open(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = open_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "open failed: unknown result");
def append(self, path):
"""
Parameters:
- path
"""
self.send_append(path)
return self.recv_append()
def send_append(self, path):
self._oprot.writeMessageBegin('append', TMessageType.CALL, self._seqid)
args = append_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_append(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = append_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "append failed: unknown result");
def write(self, handle, data):
"""
Parameters:
- handle
- data
"""
self.send_write(handle, data)
return self.recv_write()
def send_write(self, handle, data):
self._oprot.writeMessageBegin('write', TMessageType.CALL, self._seqid)
args = write_args()
args.handle = handle
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_write(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = write_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "write failed: unknown result");
def read(self, handle, offset, size):
"""
Parameters:
- handle
- offset
- size
"""
self.send_read(handle, offset, size)
return self.recv_read()
def send_read(self, handle, offset, size):
self._oprot.writeMessageBegin('read', TMessageType.CALL, self._seqid)
args = read_args()
args.handle = handle
args.offset = offset
args.size = size
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_read(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = read_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "read failed: unknown result");
def close(self, out):
"""
Parameters:
- out
"""
self.send_close(out)
return self.recv_close()
def send_close(self, out):
self._oprot.writeMessageBegin('close', TMessageType.CALL, self._seqid)
args = close_args()
args.out = out
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_close(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = close_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "close failed: unknown result");
def rm(self, path, recursive):
"""
Parameters:
- path
- recursive
"""
self.send_rm(path, recursive)
return self.recv_rm()
def send_rm(self, path, recursive):
self._oprot.writeMessageBegin('rm', TMessageType.CALL, self._seqid)
args = rm_args()
args.path = path
args.recursive = recursive
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rm(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rm_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "rm failed: unknown result");
def rename(self, path, dest):
"""
Parameters:
- path
- dest
"""
self.send_rename(path, dest)
return self.recv_rename()
def send_rename(self, path, dest):
self._oprot.writeMessageBegin('rename', TMessageType.CALL, self._seqid)
args = rename_args()
args.path = path
args.dest = dest
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rename(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rename_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "rename failed: unknown result");
def mkdirs(self, path):
"""
Parameters:
- path
"""
self.send_mkdirs(path)
return self.recv_mkdirs()
def send_mkdirs(self, path):
self._oprot.writeMessageBegin('mkdirs', TMessageType.CALL, self._seqid)
args = mkdirs_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mkdirs(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mkdirs_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "mkdirs failed: unknown result");
def exists(self, path):
"""
Parameters:
- path
"""
self.send_exists(path)
return self.recv_exists()
def send_exists(self, path):
self._oprot.writeMessageBegin('exists', TMessageType.CALL, self._seqid)
args = exists_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_exists(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = exists_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "exists failed: unknown result");
def stat(self, path):
"""
Parameters:
- path
"""
self.send_stat(path)
return self.recv_stat()
def send_stat(self, path):
self._oprot.writeMessageBegin('stat', TMessageType.CALL, self._seqid)
args = stat_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_stat(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = stat_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "stat failed: unknown result");
def listStatus(self, path):
"""
Parameters:
- path
"""
self.send_listStatus(path)
return self.recv_listStatus()
def send_listStatus(self, path):
self._oprot.writeMessageBegin('listStatus', TMessageType.CALL, self._seqid)
args = listStatus_args()
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_listStatus(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = listStatus_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "listStatus failed: unknown result");
def chmod(self, path, mode):
"""
Parameters:
- path
- mode
"""
self.send_chmod(path, mode)
self.recv_chmod()
def send_chmod(self, path, mode):
self._oprot.writeMessageBegin('chmod', TMessageType.CALL, self._seqid)
args = chmod_args()
args.path = path
args.mode = mode
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chmod(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chmod_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch != None:
raise result.ouch
return
def chown(self, path, owner, group):
"""
Parameters:
- path
- owner
- group
"""
self.send_chown(path, owner, group)
self.recv_chown()
def send_chown(self, path, owner, group):
self._oprot.writeMessageBegin('chown', TMessageType.CALL, self._seqid)
args = chown_args()
args.path = path
args.owner = owner
args.group = group
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch != None:
raise result.ouch
return
def setReplication(self, path, replication):
"""
Parameters:
- path
- replication
"""
self.send_setReplication(path, replication)
self.recv_setReplication()
def send_setReplication(self, path, replication):
self._oprot.writeMessageBegin('setReplication', TMessageType.CALL, self._seqid)
args = setReplication_args()
args.path = path
args.replication = replication
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setReplication(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setReplication_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.ouch != None:
raise result.ouch
return
def getFileBlockLocations(self, path, start, length):
"""
Parameters:
- path
- start
- length
"""
self.send_getFileBlockLocations(path, start, length)
return self.recv_getFileBlockLocations()
def send_getFileBlockLocations(self, path, start, length):
self._oprot.writeMessageBegin('getFileBlockLocations', TMessageType.CALL, self._seqid)
args = getFileBlockLocations_args()
args.path = path
args.start = start
args.length = length
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getFileBlockLocations(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getFileBlockLocations_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
if result.ouch != None:
raise result.ouch
raise TApplicationException(TApplicationException.MISSING_RESULT, "getFileBlockLocations failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["setInactivityTimeoutPeriod"] = Processor.process_setInactivityTimeoutPeriod
self._processMap["shutdown"] = Processor.process_shutdown
self._processMap["create"] = Processor.process_create
self._processMap["createFile"] = Processor.process_createFile
self._processMap["open"] = Processor.process_open
self._processMap["append"] = Processor.process_append
self._processMap["write"] = Processor.process_write
self._processMap["read"] = Processor.process_read
self._processMap["close"] = Processor.process_close
self._processMap["rm"] = Processor.process_rm
self._processMap["rename"] = Processor.process_rename
self._processMap["mkdirs"] = Processor.process_mkdirs
self._processMap["exists"] = Processor.process_exists
self._processMap["stat"] = Processor.process_stat
self._processMap["listStatus"] = Processor.process_listStatus
self._processMap["chmod"] = Processor.process_chmod
self._processMap["chown"] = Processor.process_chown
self._processMap["setReplication"] = Processor.process_setReplication
self._processMap["getFileBlockLocations"] = Processor.process_getFileBlockLocations
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_setInactivityTimeoutPeriod(self, seqid, iprot, oprot):
args = setInactivityTimeoutPeriod_args()
args.read(iprot)
iprot.readMessageEnd()
result = setInactivityTimeoutPeriod_result()
self._handler.setInactivityTimeoutPeriod(args.periodInSeconds)
oprot.writeMessageBegin("setInactivityTimeoutPeriod", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_shutdown(self, seqid, iprot, oprot):
args = shutdown_args()
args.read(iprot)
iprot.readMessageEnd()
result = shutdown_result()
self._handler.shutdown(args.status)
oprot.writeMessageBegin("shutdown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_create(self, seqid, iprot, oprot):
args = create_args()
args.read(iprot)
iprot.readMessageEnd()
result = create_result()
try:
result.success = self._handler.create(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("create", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_createFile(self, seqid, iprot, oprot):
args = createFile_args()
args.read(iprot)
iprot.readMessageEnd()
result = createFile_result()
try:
result.success = self._handler.createFile(args.path, args.mode, args.overwrite, args.bufferSize, args.block_replication, args.blocksize)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("createFile", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_open(self, seqid, iprot, oprot):
args = open_args()
args.read(iprot)
iprot.readMessageEnd()
result = open_result()
try:
result.success = self._handler.open(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("open", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_append(self, seqid, iprot, oprot):
args = append_args()
args.read(iprot)
iprot.readMessageEnd()
result = append_result()
try:
result.success = self._handler.append(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("append", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_write(self, seqid, iprot, oprot):
args = write_args()
args.read(iprot)
iprot.readMessageEnd()
result = write_result()
try:
result.success = self._handler.write(args.handle, args.data)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("write", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_read(self, seqid, iprot, oprot):
args = read_args()
args.read(iprot)
iprot.readMessageEnd()
result = read_result()
try:
result.success = self._handler.read(args.handle, args.offset, args.size)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("read", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_close(self, seqid, iprot, oprot):
args = close_args()
args.read(iprot)
iprot.readMessageEnd()
result = close_result()
try:
result.success = self._handler.close(args.out)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("close", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rm(self, seqid, iprot, oprot):
args = rm_args()
args.read(iprot)
iprot.readMessageEnd()
result = rm_result()
try:
result.success = self._handler.rm(args.path, args.recursive)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("rm", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rename(self, seqid, iprot, oprot):
args = rename_args()
args.read(iprot)
iprot.readMessageEnd()
result = rename_result()
try:
result.success = self._handler.rename(args.path, args.dest)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("rename", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mkdirs(self, seqid, iprot, oprot):
args = mkdirs_args()
args.read(iprot)
iprot.readMessageEnd()
result = mkdirs_result()
try:
result.success = self._handler.mkdirs(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("mkdirs", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_exists(self, seqid, iprot, oprot):
args = exists_args()
args.read(iprot)
iprot.readMessageEnd()
result = exists_result()
try:
result.success = self._handler.exists(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("exists", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_stat(self, seqid, iprot, oprot):
args = stat_args()
args.read(iprot)
iprot.readMessageEnd()
result = stat_result()
try:
result.success = self._handler.stat(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("stat", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_listStatus(self, seqid, iprot, oprot):
args = listStatus_args()
args.read(iprot)
iprot.readMessageEnd()
result = listStatus_result()
try:
result.success = self._handler.listStatus(args.path)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("listStatus", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_chmod(self, seqid, iprot, oprot):
args = chmod_args()
args.read(iprot)
iprot.readMessageEnd()
result = chmod_result()
try:
self._handler.chmod(args.path, args.mode)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("chmod", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_chown(self, seqid, iprot, oprot):
args = chown_args()
args.read(iprot)
iprot.readMessageEnd()
result = chown_result()
try:
self._handler.chown(args.path, args.owner, args.group)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("chown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setReplication(self, seqid, iprot, oprot):
args = setReplication_args()
args.read(iprot)
iprot.readMessageEnd()
result = setReplication_result()
try:
self._handler.setReplication(args.path, args.replication)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("setReplication", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getFileBlockLocations(self, seqid, iprot, oprot):
args = getFileBlockLocations_args()
args.read(iprot)
iprot.readMessageEnd()
result = getFileBlockLocations_result()
try:
result.success = self._handler.getFileBlockLocations(args.path, args.start, args.length)
except ThriftIOException, ouch:
result.ouch = ouch
oprot.writeMessageBegin("getFileBlockLocations", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class setInactivityTimeoutPeriod_args:
"""
Attributes:
- periodInSeconds
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'periodInSeconds', None, None, ), # 1
)
def __init__(self, periodInSeconds=None,):
self.periodInSeconds = periodInSeconds
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.periodInSeconds = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setInactivityTimeoutPeriod_args')
if self.periodInSeconds != None:
oprot.writeFieldBegin('periodInSeconds', TType.I64, 1)
oprot.writeI64(self.periodInSeconds)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setInactivityTimeoutPeriod_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setInactivityTimeoutPeriod_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class shutdown_args:
"""
Attributes:
- status
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'status', None, None, ), # 1
)
def __init__(self, status=None,):
self.status = status
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.status = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('shutdown_args')
if self.status != None:
oprot.writeFieldBegin('status', TType.I32, 1)
oprot.writeI32(self.status)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class shutdown_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('shutdown_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class create_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ThriftHandle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('create_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createFile_args:
"""
Attributes:
- path
- mode
- overwrite
- bufferSize
- block_replication
- blocksize
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.I16, 'mode', None, None, ), # 2
(3, TType.BOOL, 'overwrite', None, None, ), # 3
(4, TType.I32, 'bufferSize', None, None, ), # 4
(5, TType.I16, 'block_replication', None, None, ), # 5
(6, TType.I64, 'blocksize', None, None, ), # 6
)
def __init__(self, path=None, mode=None, overwrite=None, bufferSize=None, block_replication=None, blocksize=None,):
self.path = path
self.mode = mode
self.overwrite = overwrite
self.bufferSize = bufferSize
self.block_replication = block_replication
self.blocksize = blocksize
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.mode = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.overwrite = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.bufferSize = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I16:
self.block_replication = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.blocksize = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createFile_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.mode != None:
oprot.writeFieldBegin('mode', TType.I16, 2)
oprot.writeI16(self.mode)
oprot.writeFieldEnd()
if self.overwrite != None:
oprot.writeFieldBegin('overwrite', TType.BOOL, 3)
oprot.writeBool(self.overwrite)
oprot.writeFieldEnd()
if self.bufferSize != None:
oprot.writeFieldBegin('bufferSize', TType.I32, 4)
oprot.writeI32(self.bufferSize)
oprot.writeFieldEnd()
if self.block_replication != None:
oprot.writeFieldBegin('block_replication', TType.I16, 5)
oprot.writeI16(self.block_replication)
oprot.writeFieldEnd()
if self.blocksize != None:
oprot.writeFieldBegin('blocksize', TType.I64, 6)
oprot.writeI64(self.blocksize)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class createFile_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ThriftHandle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('createFile_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class open_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ThriftHandle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('open_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class append_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('append_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class append_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ThriftHandle()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('append_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class write_args:
"""
Attributes:
- handle
- data
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'handle', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 1
(2, TType.STRING, 'data', None, None, ), # 2
)
def __init__(self, handle=None, data=None,):
self.handle = handle
self.data = data
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.handle = ThriftHandle()
self.handle.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.data = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('write_args')
if self.handle != None:
oprot.writeFieldBegin('handle', TType.STRUCT, 1)
self.handle.write(oprot)
oprot.writeFieldEnd()
if self.data != None:
oprot.writeFieldBegin('data', TType.STRING, 2)
oprot.writeString(self.data)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class write_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('write_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class read_args:
"""
Attributes:
- handle
- offset
- size
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'handle', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 1
(2, TType.I64, 'offset', None, None, ), # 2
(3, TType.I32, 'size', None, None, ), # 3
)
def __init__(self, handle=None, offset=None, size=None,):
self.handle = handle
self.offset = offset
self.size = size
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.handle = ThriftHandle()
self.handle.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.offset = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.size = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('read_args')
if self.handle != None:
oprot.writeFieldBegin('handle', TType.STRUCT, 1)
self.handle.write(oprot)
oprot.writeFieldEnd()
if self.offset != None:
oprot.writeFieldBegin('offset', TType.I64, 2)
oprot.writeI64(self.offset)
oprot.writeFieldEnd()
if self.size != None:
oprot.writeFieldBegin('size', TType.I32, 3)
oprot.writeI32(self.size)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class read_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('read_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_args:
"""
Attributes:
- out
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'out', (ThriftHandle, ThriftHandle.thrift_spec), None, ), # 1
)
def __init__(self, out=None,):
self.out = out
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.out = ThriftHandle()
self.out.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_args')
if self.out != None:
oprot.writeFieldBegin('out', TType.STRUCT, 1)
self.out.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class close_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('close_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rm_args:
"""
Attributes:
- path
- recursive
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.BOOL, 'recursive', None, None, ), # 2
)
def __init__(self, path=None, recursive=None,):
self.path = path
self.recursive = recursive
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.recursive = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rm_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.recursive != None:
oprot.writeFieldBegin('recursive', TType.BOOL, 2)
oprot.writeBool(self.recursive)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rm_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rm_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_args:
"""
Attributes:
- path
- dest
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'dest', (Pathname, Pathname.thrift_spec), None, ), # 2
)
def __init__(self, path=None, dest=None,):
self.path = path
self.dest = dest
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.dest = Pathname()
self.dest.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.dest != None:
oprot.writeFieldBegin('dest', TType.STRUCT, 2)
self.dest.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mkdirs_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mkdirs_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mkdirs_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mkdirs_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class exists_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('exists_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stat_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stat_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stat_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (FileStatus, FileStatus.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = FileStatus()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stat_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class listStatus_args:
"""
Attributes:
- path
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
)
def __init__(self, path=None,):
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('listStatus_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class listStatus_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(FileStatus, FileStatus.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = FileStatus()
_elem19.read(iprot)
self.success.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('listStatus_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter20 in self.success:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chmod_args:
"""
Attributes:
- path
- mode
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.I16, 'mode', None, None, ), # 2
)
def __init__(self, path=None, mode=None,):
self.path = path
self.mode = mode
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.mode = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chmod_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.mode != None:
oprot.writeFieldBegin('mode', TType.I16, 2)
oprot.writeI16(self.mode)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chmod_result:
"""
Attributes:
- ouch
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, ouch=None,):
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chmod_result')
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chown_args:
"""
Attributes:
- path
- owner
- group
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.STRING, 'owner', None, None, ), # 2
(3, TType.STRING, 'group', None, None, ), # 3
)
def __init__(self, path=None, owner=None, group=None,):
self.path = path
self.owner = owner
self.group = group
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.owner = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.group = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chown_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.owner != None:
oprot.writeFieldBegin('owner', TType.STRING, 2)
oprot.writeString(self.owner)
oprot.writeFieldEnd()
if self.group != None:
oprot.writeFieldBegin('group', TType.STRING, 3)
oprot.writeString(self.group)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chown_result:
"""
Attributes:
- ouch
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, ouch=None,):
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chown_result')
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setReplication_args:
"""
Attributes:
- path
- replication
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.I16, 'replication', None, None, ), # 2
)
def __init__(self, path=None, replication=None,):
self.path = path
self.replication = replication
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.replication = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplication_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.replication != None:
oprot.writeFieldBegin('replication', TType.I16, 2)
oprot.writeI16(self.replication)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setReplication_result:
"""
Attributes:
- ouch
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, ouch=None,):
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplication_result')
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getFileBlockLocations_args:
"""
Attributes:
- path
- start
- length
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'path', (Pathname, Pathname.thrift_spec), None, ), # 1
(2, TType.I64, 'start', None, None, ), # 2
(3, TType.I64, 'length', None, None, ), # 3
)
def __init__(self, path=None, start=None, length=None,):
self.path = path
self.start = start
self.length = length
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.path = Pathname()
self.path.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.start = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.length = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getFileBlockLocations_args')
if self.path != None:
oprot.writeFieldBegin('path', TType.STRUCT, 1)
self.path.write(oprot)
oprot.writeFieldEnd()
if self.start != None:
oprot.writeFieldBegin('start', TType.I64, 2)
oprot.writeI64(self.start)
oprot.writeFieldEnd()
if self.length != None:
oprot.writeFieldBegin('length', TType.I64, 3)
oprot.writeI64(self.length)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getFileBlockLocations_result:
"""
Attributes:
- success
- ouch
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(BlockLocation, BlockLocation.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'ouch', (ThriftIOException, ThriftIOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, ouch=None,):
self.success = success
self.ouch = ouch
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in xrange(_size21):
_elem26 = BlockLocation()
_elem26.read(iprot)
self.success.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ouch = ThriftIOException()
self.ouch.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getFileBlockLocations_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter27 in self.success:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ouch != None:
oprot.writeFieldBegin('ouch', TType.STRUCT, 1)
self.ouch.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| send_open |
paliandromLinkedListStack.py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def isPalindrome(self, head: ListNode) :
num = []
temp = head
isPalin = True
# if head is not None and head.next is None:
# return True
while temp is not None:
num.append(temp.val)
temp = temp.next
while head is not None:
stackVal = num.pop()
| isPalin = False
break
head = head.next
return isPalin | if head.val == stackVal:
isPalin = True
else: |
clientSecretCredential.spec.ts | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
/* eslint-disable @typescript-eslint/no-non-null-asserted-optional-chain */
import { ClientSecretCredential, TokenCachePersistenceOptions } from "../../../../identity/src";
import { MsalTestCleanup, msalNodeTestSetup } from "../../../../identity/test/msalTestUtils";
import { ConfidentialClientApplication } from "@azure/msal-node";
import { MsalNode } from "../../../../identity/src/msal/nodeFlows/msalNodeCommon";
import Sinon from "sinon";
import assert from "assert";
import { createPersistence } from "./setup.spec";
import { env } from "@azure-tools/test-recorder";
const scope = "https://graph.microsoft.com/.default";
describe("ClientSecretCredential (internal)", function (this: Mocha.Suite) {
let cleanup: MsalTestCleanup;
let getTokenSilentSpy: Sinon.SinonSpy;
let doGetTokenSpy: Sinon.SinonSpy;
beforeEach(function (this: Mocha.Context) {
const setup = msalNodeTestSetup(this);
cleanup = setup.cleanup;
getTokenSilentSpy = setup.sandbox.spy(MsalNode.prototype, "getTokenSilent");
// MsalClientSecret calls to this method underneath.
doGetTokenSpy = setup.sandbox.spy(
ConfidentialClientApplication.prototype,
"acquireTokenByClientCredential"
);
});
afterEach(async function () {
await cleanup();
});
it("Accepts tokenCachePersistenceOptions", async function (this: Mocha.Context) {
// OSX asks for passwords on CI, so we need to skip these tests from our automation
if (process.platform === "darwin") {
this.skip();
}
const tokenCachePersistenceOptions: TokenCachePersistenceOptions = {
enabled: true,
name: this.test?.title.replace(/[^a-zA-Z]/g, "_"),
unsafeAllowUnencryptedStorage: true,
};
// Emptying the token cache before we start.
const persistence = await createPersistence(tokenCachePersistenceOptions);
persistence?.save("{}");
const credential = new ClientSecretCredential(
env.AZURE_TENANT_ID,
env.AZURE_CLIENT_ID,
env.AZURE_CLIENT_SECRET,
{ tokenCachePersistenceOptions }
);
await credential.getToken(scope);
const result = await persistence?.load();
const parsedResult = JSON.parse(result!); | });
it("Authenticates silently with tokenCachePersistenceOptions", async function (this: Mocha.Context) {
// OSX asks for passwords on CI, so we need to skip these tests from our automation
if (process.platform === "darwin") {
this.skip();
}
const tokenCachePersistenceOptions: TokenCachePersistenceOptions = {
enabled: true,
name: this.test?.title.replace(/[^a-zA-Z]/g, "_"),
unsafeAllowUnencryptedStorage: true,
};
// Emptying the token cache before we start.
const persistence = await createPersistence(tokenCachePersistenceOptions);
persistence?.save("{}");
const credential = new ClientSecretCredential(
env.AZURE_TENANT_ID,
env.AZURE_CLIENT_ID,
env.AZURE_CLIENT_SECRET,
{ tokenCachePersistenceOptions }
);
await credential.getToken(scope);
assert.equal(getTokenSilentSpy.callCount, 1);
assert.equal(doGetTokenSpy.callCount, 1);
await credential.getToken(scope);
assert.equal(getTokenSilentSpy.callCount, 2);
assert.equal(doGetTokenSpy.callCount, 1);
});
}); | assert.ok(parsedResult.AccessToken); |
gatsby-browser.js | /**
* Implement Gatsby's Browser APIs in this file.
*
* See: https://www.gatsbyjs.com/docs/browser-apis/
*/
export { default as wrapRootElement } from "./src/state/ReduxWrapper" | `Reload to display the latest version?`
)
if (answer === true) {
window.location.reload()
}
} |
export const onServiceWorkerUpdateReady = () => {
const answer = window.confirm(
`This application has been updated. ` + |
app.py | import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class | :
watchDir = os.getcwd()
#watchDir์ ๊ฐ์ํ๋ ค๋ ๋๋ ํ ๋ฆฌ๋ฅผ ๋ช
์ํ๋ค.
def __init__(self):
self.observer = Observer() #observer๊ฐ์ฒด๋ฅผ ๋ง๋ฆ
def run(self):
event_handler = Handler()
self.observer.schedule(event_handler, self.watchDir,
recursive=True)
self.observer.start()
try:
while True:
time.sleep(1)
except:
self.observer.stop()
print("Error")
self.observer.join()
class Handler(FileSystemEventHandler):
#FileSystemEventHandler ํด๋์ค๋ฅผ ์์๋ฐ์.
#์๋ ํธ๋ค๋ฌ๋ค์ ์ค๋ฒ๋ผ์ด๋ ํจ
#ํ์ผ, ๋๋ ํฐ๋ฆฌ๊ฐ move ๋๊ฑฐ๋ rename ๋๋ฉด ์คํ
def on_moved(self, event):
print(event)
def on_created(self, event): #ํ์ผ, ๋๋ ํฐ๋ฆฌ๊ฐ ์์ฑ๋๋ฉด ์คํ
print(event)
def on_deleted(self, event): #ํ์ผ, ๋๋ ํฐ๋ฆฌ๊ฐ ์ญ์ ๋๋ฉด ์คํ
print(event)
def on_modified(self, event): #ํ์ผ, ๋๋ ํฐ๋ฆฌ๊ฐ ์์ ๋๋ฉด ์คํ
print(event)
if __name__ == โ__main__โ: #๋ณธ ํ์ผ์์ ์คํ๋ ๋๋ง ์คํ๋๋๋ก ํจ
w = Target()
w.run()
| Target |
Subsets and Splits