file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
grpcClient.go
// Copyright (c) 2021 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package matching import ( "context" "go.uber.org/yarpc" matchingv1 "github.com/uber/cadence/.gen/proto/matching/v1" "github.com/uber/cadence/common/types" "github.com/uber/cadence/common/types/mapper/proto" ) type grpcClient struct { c matchingv1.MatchingAPIYARPCClient } func NewGRPCClient(c matchingv1.MatchingAPIYARPCClient) Client { return grpcClient{c} } func (g grpcClient) AddActivityTask(ctx context.Context, request *types.AddActivityTaskRequest, opts ...yarpc.CallOption) error { _, err := g.c.AddActivityTask(ctx, proto.FromMatchingAddActivityTaskRequest(request), opts...) return proto.ToError(err) } func (g grpcClient) AddDecisionTask(ctx context.Context, request *types.AddDecisionTaskRequest, opts ...yarpc.CallOption) error { _, err := g.c.AddDecisionTask(ctx, proto.FromMatchingAddDecisionTaskRequest(request), opts...) return proto.ToError(err) } func (g grpcClient) CancelOutstandingPoll(ctx context.Context, request *types.CancelOutstandingPollRequest, opts ...yarpc.CallOption) error { _, err := g.c.CancelOutstandingPoll(ctx, proto.FromMatchingCancelOutstandingPollRequest(request), opts...) return proto.ToError(err) } func (g grpcClient) DescribeTaskList(ctx context.Context, request *types.MatchingDescribeTaskListRequest, opts ...yarpc.CallOption) (*types.DescribeTaskListResponse, error) { response, err := g.c.DescribeTaskList(ctx, proto.FromMatchingDescribeTaskListRequest(request), opts...) return proto.ToMatchingDescribeTaskListResponse(response), proto.ToError(err) } func (g grpcClient) ListTaskListPartitions(ctx context.Context, request *types.MatchingListTaskListPartitionsRequest, opts ...yarpc.CallOption) (*types.ListTaskListPartitionsResponse, error) { response, err := g.c.ListTaskListPartitions(ctx, proto.FromMatchingListTaskListPartitionsRequest(request), opts...) return proto.ToMatchingListTaskListPartitionsResponse(response), proto.ToError(err) } func (g grpcClient) GetTaskListsByDomain(ctx context.Context, request *types.GetTaskListsByDomainRequest, opts ...yarpc.CallOption) (*types.GetTaskListsByDomainResponse, error) { response, err := g.c.GetTaskListsByDomain(ctx, proto.FromMatchingGetTaskListsByDomainRequest(request), opts...) return proto.ToMatchingGetTaskListsByDomainResponse(response), proto.ToError(err) } func (g grpcClient) PollForActivityTask(ctx context.Context, request *types.MatchingPollForActivityTaskRequest, opts ...yarpc.CallOption) (*types.PollForActivityTaskResponse, error) { response, err := g.c.PollForActivityTask(ctx, proto.FromMatchingPollForActivityTaskRequest(request), opts...) return proto.ToMatchingPollForActivityTaskResponse(response), proto.ToError(err) }
func (g grpcClient) PollForDecisionTask(ctx context.Context, request *types.MatchingPollForDecisionTaskRequest, opts ...yarpc.CallOption) (*types.MatchingPollForDecisionTaskResponse, error) { response, err := g.c.PollForDecisionTask(ctx, proto.FromMatchingPollForDecisionTaskRequest(request), opts...) return proto.ToMatchingPollForDecisionTaskResponse(response), proto.ToError(err) } func (g grpcClient) QueryWorkflow(ctx context.Context, request *types.MatchingQueryWorkflowRequest, opts ...yarpc.CallOption) (*types.QueryWorkflowResponse, error) { response, err := g.c.QueryWorkflow(ctx, proto.FromMatchingQueryWorkflowRequest(request), opts...) return proto.ToMatchingQueryWorkflowResponse(response), proto.ToError(err) } func (g grpcClient) RespondQueryTaskCompleted(ctx context.Context, request *types.MatchingRespondQueryTaskCompletedRequest, opts ...yarpc.CallOption) error { _, err := g.c.RespondQueryTaskCompleted(ctx, proto.FromMatchingRespondQueryTaskCompletedRequest(request), opts...) return proto.ToError(err) }
out_val_p1.rs
#[doc = "Register `OUT_VAL_P1` reader"] pub struct R(crate::R<OUT_VAL_P1_SPEC>); impl core::ops::Deref for R { type Target = crate::R<OUT_VAL_P1_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<OUT_VAL_P1_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<OUT_VAL_P1_SPEC>) -> Self { R(reader) } } #[doc = "Register `OUT_VAL_P1` writer"] pub struct W(crate::W<OUT_VAL_P1_SPEC>); impl core::ops::Deref for W { type Target = crate::W<OUT_VAL_P1_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<OUT_VAL_P1_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<OUT_VAL_P1_SPEC>) -> Self { W(writer) } } #[doc = "Field `pin0` reader - P1.0 GPIO Output Drive Value"] pub struct PIN0_R(crate::FieldReader<bool>); impl PIN0_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN0_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN0_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin0` writer - P1.0 GPIO Output Drive Value"] pub struct PIN0_W<'a> { w: &'a mut W, } impl<'a> PIN0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !1) | (value as u32 & 1); self.w } } #[doc = "Field `pin1` reader - P1.1 GPIO Output Drive Value"] pub struct PIN1_R(crate::FieldReader<bool>); impl PIN1_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN1_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN1_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin1` writer - P1.1 GPIO Output Drive Value"] pub struct PIN1_W<'a> { w: &'a mut W, } impl<'a> PIN1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(1 << 1)) | ((value as u32 & 1) << 1); self.w } } #[doc = "Field `pin2` reader - P1.2 GPIO Output Drive Value"] pub struct PIN2_R(crate::FieldReader<bool>); impl PIN2_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN2_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN2_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin2` writer - P1.2 GPIO Output Drive Value"] pub struct PIN2_W<'a> { w: &'a mut W, } impl<'a> PIN2_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(1 << 2)) | ((value as u32 & 1) << 2); self.w } } #[doc = "Field `pin3` reader - P1.3 GPIO Output Drive Value"] pub struct
(crate::FieldReader<bool>); impl PIN3_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN3_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN3_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin3` writer - P1.3 GPIO Output Drive Value"] pub struct PIN3_W<'a> { w: &'a mut W, } impl<'a> PIN3_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(1 << 3)) | ((value as u32 & 1) << 3); self.w } } #[doc = "Field `pin4` reader - P1.4 GPIO Output Drive Value"] pub struct PIN4_R(crate::FieldReader<bool>); impl PIN4_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN4_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN4_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin4` writer - P1.4 GPIO Output Drive Value"] pub struct PIN4_W<'a> { w: &'a mut W, } impl<'a> PIN4_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(1 << 4)) | ((value as u32 & 1) << 4); self.w } } #[doc = "Field `pin5` reader - P1.5 GPIO Output Drive Value"] pub struct PIN5_R(crate::FieldReader<bool>); impl PIN5_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN5_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN5_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin5` writer - P1.5 GPIO Output Drive Value"] pub struct PIN5_W<'a> { w: &'a mut W, } impl<'a> PIN5_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(1 << 5)) | ((value as u32 & 1) << 5); self.w } } #[doc = "Field `pin6` reader - P1.6 GPIO Output Drive Value"] pub struct PIN6_R(crate::FieldReader<bool>); impl PIN6_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN6_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN6_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin6` writer - P1.6 GPIO Output Drive Value"] pub struct PIN6_W<'a> { w: &'a mut W, } impl<'a> PIN6_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(1 << 6)) | ((value as u32 & 1) << 6); self.w } } #[doc = "Field `pin7` reader - P1.7 GPIO Output Drive Value"] pub struct PIN7_R(crate::FieldReader<bool>); impl PIN7_R { #[inline(always)] pub(crate) fn new(bits: bool) -> Self { PIN7_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PIN7_R { type Target = crate::FieldReader<bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `pin7` writer - P1.7 GPIO Output Drive Value"] pub struct PIN7_W<'a> { w: &'a mut W, } impl<'a> PIN7_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(1 << 7)) | ((value as u32 & 1) << 7); self.w } } impl R { #[doc = "Bit 0 - P1.0 GPIO Output Drive Value"] #[inline(always)] pub fn pin0(&self) -> PIN0_R { PIN0_R::new((self.bits & 1) != 0) } #[doc = "Bit 1 - P1.1 GPIO Output Drive Value"] #[inline(always)] pub fn pin1(&self) -> PIN1_R { PIN1_R::new(((self.bits >> 1) & 1) != 0) } #[doc = "Bit 2 - P1.2 GPIO Output Drive Value"] #[inline(always)] pub fn pin2(&self) -> PIN2_R { PIN2_R::new(((self.bits >> 2) & 1) != 0) } #[doc = "Bit 3 - P1.3 GPIO Output Drive Value"] #[inline(always)] pub fn pin3(&self) -> PIN3_R { PIN3_R::new(((self.bits >> 3) & 1) != 0) } #[doc = "Bit 4 - P1.4 GPIO Output Drive Value"] #[inline(always)] pub fn pin4(&self) -> PIN4_R { PIN4_R::new(((self.bits >> 4) & 1) != 0) } #[doc = "Bit 5 - P1.5 GPIO Output Drive Value"] #[inline(always)] pub fn pin5(&self) -> PIN5_R { PIN5_R::new(((self.bits >> 5) & 1) != 0) } #[doc = "Bit 6 - P1.6 GPIO Output Drive Value"] #[inline(always)] pub fn pin6(&self) -> PIN6_R { PIN6_R::new(((self.bits >> 6) & 1) != 0) } #[doc = "Bit 7 - P1.7 GPIO Output Drive Value"] #[inline(always)] pub fn pin7(&self) -> PIN7_R { PIN7_R::new(((self.bits >> 7) & 1) != 0) } } impl W { #[doc = "Bit 0 - P1.0 GPIO Output Drive Value"] #[inline(always)] pub fn pin0(&mut self) -> PIN0_W { PIN0_W { w: self } } #[doc = "Bit 1 - P1.1 GPIO Output Drive Value"] #[inline(always)] pub fn pin1(&mut self) -> PIN1_W { PIN1_W { w: self } } #[doc = "Bit 2 - P1.2 GPIO Output Drive Value"] #[inline(always)] pub fn pin2(&mut self) -> PIN2_W { PIN2_W { w: self } } #[doc = "Bit 3 - P1.3 GPIO Output Drive Value"] #[inline(always)] pub fn pin3(&mut self) -> PIN3_W { PIN3_W { w: self } } #[doc = "Bit 4 - P1.4 GPIO Output Drive Value"] #[inline(always)] pub fn pin4(&mut self) -> PIN4_W { PIN4_W { w: self } } #[doc = "Bit 5 - P1.5 GPIO Output Drive Value"] #[inline(always)] pub fn pin5(&mut self) -> PIN5_W { PIN5_W { w: self } } #[doc = "Bit 6 - P1.6 GPIO Output Drive Value"] #[inline(always)] pub fn pin6(&mut self) -> PIN6_W { PIN6_W { w: self } } #[doc = "Bit 7 - P1.7 GPIO Output Drive Value"] #[inline(always)] pub fn pin7(&mut self) -> PIN7_W { PIN7_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Port P1 GPIO Output Value\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [out_val_p1](index.html) module"] pub struct OUT_VAL_P1_SPEC; impl crate::RegisterSpec for OUT_VAL_P1_SPEC { type Ux = u32; } #[doc = "`read()` method returns [out_val_p1::R](R) reader structure"] impl crate::Readable for OUT_VAL_P1_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [out_val_p1::W](W) writer structure"] impl crate::Writable for OUT_VAL_P1_SPEC { type Writer = W; } #[doc = "`reset()` method sets OUT_VAL_P1 to value 0"] impl crate::Resettable for OUT_VAL_P1_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
PIN3_R
polygon.py
"""Polygons and their linear ring components """ from ctypes import c_double, c_void_p, cast, POINTER from ctypes import ArgumentError import weakref from shapely.algorithms.cga import signed_area from shapely.coords import required from shapely.geos import lgeos from shapely.geometry.base import BaseGeometry from shapely.geometry.linestring import LineString, LineStringAdapter from shapely.geometry.proxy import PolygonProxy __all__ = ['Polygon', 'asPolygon', 'LinearRing', 'asLinearRing'] class LinearRing(LineString): """ A closed one-dimensional feature comprising one or more line segments A LinearRing that crosses itself or touches itself at a single point is invalid and operations on it may fail. """ def __init__(self, coordinates=None): """ Parameters ---------- coordinates : sequence A sequence of (x, y [,z]) numeric coordinate pairs or triples Rings are implicitly closed. There is no need to specific a final coordinate pair identical to the first. Example ------- Construct a square ring. >>> ring = LinearRing( ((0, 0), (0, 1), (1 ,1 ), (1 , 0)) ) >>> ring.is_closed True >>> ring.length 4.0 """ BaseGeometry.__init__(self) if coordinates is not None: self._set_coords(coordinates) @property def __geo_interface__(self): return { 'type': 'LinearRing', 'coordinates': tuple(self.coords) } # Coordinate access _get_coords = BaseGeometry._get_coords def _set_coords(self, coordinates): self.empty() self._geom, self._ndim = geos_linearring_from_py(coordinates) coords = property(_get_coords, _set_coords) @property def is_ccw(self): """True is the ring is oriented counter clock-wise""" return bool(self.impl['is_ccw'](self)) @property def is_simple(self): """True if the geometry is simple, meaning that any self-intersections are only at boundary points, else False""" return LineString(self).is_simple class LinearRingAdapter(LineStringAdapter): __p__ = None def __init__(self, context): self.context = context self.factory = geos_linearring_from_py @property def __geo_interface__(self): return { 'type': 'LinearRing', 'coordinates': tuple(self.coords) } coords = property(BaseGeometry._get_coords) def asLinearRing(context): """Adapt an object to the LinearRing interface""" return LinearRingAdapter(context) class InteriorRingSequence(object): _factory = None _geom = None __p__ = None _ndim = None _index = 0 _length = 0 __rings__ = None _gtag = None def __init__(self, parent): self.__p__ = parent self._geom = parent._geom self._ndim = parent._ndim def __iter__(self): self._index = 0 self._length = self.__len__() return self def next(self): if self._index < self._length: ring = self._get_ring(self._index) self._index += 1 return ring else: raise StopIteration def __len__(self): return lgeos.GEOSGetNumInteriorRings(self._geom) def __getitem__(self, key): m = self.__len__() if isinstance(key, int): if key + m < 0 or key >= m: raise IndexError("index out of range") if key < 0: i = m + key else: i = key return self._get_ring(i) elif isinstance(key, slice): res = [] start, stop, stride = key.indices(m) for i in xrange(start, stop, stride): res.append(self._get_ring(i)) return res else: raise TypeError("key must be an index or slice") @property def _longest(self): max = 0 for g in iter(self): l = len(g.coords) if l > max: max = l def gtag(self): return hash(repr(self.__p__)) def _get_ring(self, i): gtag = self.gtag() if gtag != self._gtag: self.__rings__ = {} if i not in self.__rings__: g = lgeos.GEOSGetInteriorRingN(self._geom, i) ring = LinearRing() ring.__geom__ = g ring.__p__ = self ring._owned = True ring._ndim = self._ndim self.__rings__[i] = weakref.ref(ring) return self.__rings__[i]() class Polygon(BaseGeometry): """ A two-dimensional figure bounded by a linear ring A polygon has a non-zero area. It may have one or more negative-space "holes" which are also bounded by linear rings. If any rings cross each other, the feature is invalid and operations on it may fail. Attributes ---------- exterior : LinearRing The ring which bounds the positive space of the polygon. interiors : sequence A sequence of rings which bound all existing holes. """ _exterior = None _interiors = [] _ndim = 2 def __init__(self, shell=None, holes=None): """ Parameters ---------- shell : sequence A sequence of (x, y [,z]) numeric coordinate pairs or triples holes : sequence A sequence of objects which satisfy the same requirements as the shell parameters above Example ------- Create a square polygon with no holes >>> coords = ((0., 0.), (0., 1.), (1., 1.), (1., 0.), (0., 0.)) >>> polygon = Polygon(coords) >>> polygon.area 1.0 """ BaseGeometry.__init__(self) if shell is not None: self._geom, self._ndim = geos_polygon_from_py(shell, holes) @property def exterior(self): if self.is_empty: return None elif self._exterior is None or self._exterior() is None: g = lgeos.GEOSGetExteriorRing(self._geom) ring = LinearRing() ring.__geom__ = g ring.__p__ = self ring._owned = True ring._ndim = self._ndim self._exterior = weakref.ref(ring) return self._exterior() @property def interiors(self): if self.is_empty: return [] return InteriorRingSequence(self) @property def ctypes(self): if not self._ctypes_data: self._ctypes_data = self.exterior.ctypes return self._ctypes_data @property def __array_interface__(self): raise NotImplementedError( "A polygon does not itself provide the array interface. Its rings do.") def _get_coords(self): raise NotImplementedError( "Component rings have coordinate sequences, but the polygon does not") def _set_coords(self, ob): raise NotImplementedError( "Component rings have coordinate sequences, but the polygon does not") @property def coords(self): raise NotImplementedError( "Component rings have coordinate sequences, but the polygon does not") @property def __geo_interface__(self): coords = [tuple(self.exterior.coords)] for hole in self.interiors: coords.append(tuple(hole.coords)) return { 'type': 'Polygon', 'coordinates': tuple(coords) } class PolygonAdapter(PolygonProxy, Polygon): def __init__(self, shell, holes=None): self.shell = shell self.holes = holes self.context = (shell, holes) self.factory = geos_polygon_from_py @property def _ndim(self): try: # From array protocol array = self.shell.__array_interface__ n = array['shape'][1] assert n == 2 or n == 3 return n except AttributeError: # Fall back on list return len(self.shell[0]) def asPolygon(shell, holes=None): """Adapt objects to the Polygon interface""" return PolygonAdapter(shell, holes) def orient(polygon, sign=1.0): s = float(sign) rings = [] ring = polygon.exterior if signed_area(ring)/s >= 0.0: rings.append(ring) else: rings.append(list(ring.coords)[::-1]) for ring in polygon.interiors: if signed_area(ring)/s <= 0.0: rings.append(ring) else: rings.append(list(ring.coords)[::-1]) return Polygon(rings[0], rings[1:]) def geos_linearring_from_py(ob, update_geom=None, update_ndim=0): # If numpy is present, we use numpy.require to ensure that we have a # C-continguous array that owns its data. View data will be copied. ob = required(ob) try: # From array protocol array = ob.__array_interface__ assert len(array['shape']) == 2 m = array['shape'][0] n = array['shape'][1] if m < 3: raise ValueError( "A LinearRing must have at least 3 coordinate tuples") assert n == 2 or n == 3 # Make pointer to the coordinate array if isinstance(array['data'], tuple): # numpy tuple (addr, read-only) cp = cast(array['data'][0], POINTER(c_double)) else: cp = array['data'] # Add closing coordinates to sequence? if cp[0] != cp[m*n-n] or cp[1] != cp[m*n-n+1]: M = m + 1 else: M = m # Create a coordinate sequence if update_geom is not None: cs = lgeos.GEOSGeom_getCoordSeq(update_geom) if n != update_ndim: raise ValueError( "Wrong coordinate dimensions; this geometry has dimensions: %d" \ % update_ndim) else:
for i in xrange(m): # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, i, cp[n*i]) lgeos.GEOSCoordSeq_setY(cs, i, cp[n*i+1]) if n == 3: lgeos.GEOSCoordSeq_setZ(cs, i, cp[n*i+2]) # Add closing coordinates to sequence? if M > m: # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, M-1, cp[0]) lgeos.GEOSCoordSeq_setY(cs, M-1, cp[1]) if n == 3: lgeos.GEOSCoordSeq_setZ(cs, M-1, cp[2]) except AttributeError: # Fall back on list m = len(ob) n = len(ob[0]) if m < 3: raise ValueError( "A LinearRing must have at least 3 coordinate tuples") assert (n == 2 or n == 3) # Add closing coordinates if not provided if m == 3 or ob[0][0] != ob[-1][0] or ob[0][1] != ob[-1][1]: M = m + 1 else: M = m # Create a coordinate sequence if update_geom is not None: cs = lgeos.GEOSGeom_getCoordSeq(update_geom) if n != update_ndim: raise ValueError( "Wrong coordinate dimensions; this geometry has dimensions: %d" \ % update_ndim) else: cs = lgeos.GEOSCoordSeq_create(M, n) # add to coordinate sequence for i in xrange(m): coords = ob[i] # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, i, coords[0]) lgeos.GEOSCoordSeq_setY(cs, i, coords[1]) if n == 3: try: lgeos.GEOSCoordSeq_setZ(cs, i, coords[2]) except IndexError: raise ValueError("Inconsistent coordinate dimensionality") # Add closing coordinates to sequence? if M > m: coords = ob[0] # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, M-1, coords[0]) lgeos.GEOSCoordSeq_setY(cs, M-1, coords[1]) if n == 3: lgeos.GEOSCoordSeq_setZ(cs, M-1, coords[2]) if update_geom is not None: return None else: return lgeos.GEOSGeom_createLinearRing(cs), n def update_linearring_from_py(geom, ob): geos_linearring_from_py(ob, geom._geom, geom._ndim) def geos_polygon_from_py(shell, holes=None): if shell is not None: geos_shell, ndim = geos_linearring_from_py(shell) if holes: ob = holes L = len(ob) exemplar = ob[0] try: N = len(exemplar[0]) except TypeError: N = exemplar._ndim assert L >= 1 assert N == 2 or N == 3 # Array of pointers to ring geometries geos_holes = (c_void_p * L)() # add to coordinate sequence for l in xrange(L): geom, ndim = geos_linearring_from_py(ob[l]) geos_holes[l] = cast(geom, c_void_p) else: geos_holes = POINTER(c_void_p)() L = 0 return ( lgeos.GEOSGeom_createPolygon( c_void_p(geos_shell), geos_holes, L ), ndim ) # Test runner def _test(): import doctest doctest.testmod() if __name__ == "__main__": _test()
cs = lgeos.GEOSCoordSeq_create(M, n) # add to coordinate sequence
query-order.dto.ts
/* * @Author: xuanyu * @LastEditors: xuanyu * @email: [email protected] * @github: https://github.com/z-xuanyu * @Date: 2022-03-30 14:03:22 * @LastEditTime: 2022-03-30 16:03:40 * @Description: Modify here please */ import { ApiProperty } from '@nestjs/swagger'; import { PaginationParametersDto } from 'libs/common/PaginationParametersDto'; export class
extends PaginationParametersDto { @ApiProperty({ required: false, name: '用户名称' }) userName?: string; @ApiProperty({ required: false, name: '商品名称' }) productName?: string; @ApiProperty({ type: Number, required: false, name: '订单类型', }) type?: number; @ApiProperty({ type: Number, required: false, name: '订单状态', }) status?: number; @ApiProperty({ type: Number, required: false, name: '支付方式', }) paymentType?: number; @ApiProperty({ required: false, name: '订单来源' }) source?: string; }
QueryOrderDto
AOC_06_1.rs
use std::collections::HashMap; use std::io; use std::io::BufRead; type Satellite = String; type Orbits = HashMap<Satellite, Satellite>; type Distance = usize; type Distances = HashMap<Satellite, Distance>; fn main() -> io::Result<()>
fn count(orbiter: &Satellite, orbits: &Orbits, distances: &mut Distances) -> Distance { if !distances.contains_key(orbiter) { let distance = match orbits.get(orbiter) { Some(orbitee) => 1 + count(orbitee, orbits, distances), None => 0, }; distances.insert(orbiter.clone(), distance); } distances[orbiter] }
{ let orbits: Orbits = io::BufReader::new(io::stdin()) .lines() .map(|input| { input.map(|line| { let splits = line.split(")").collect::<Vec<&str>>(); (splits[1].to_string(), splits[0].to_string()) }) }) .collect::<io::Result<_>>()?; let mut distances: Distances = HashMap::new(); orbits.keys().for_each(|orbiter| { count(orbiter, &orbits, &mut distances); }); println!("{}", distances.values().sum::<Distance>()); Ok(()) }
add-impl.d.ts
import { SchematicCommand } from '../models/schematic-command'; export declare class AddCommand extends SchematicCommand { readonly allowPrivateSchematics: boolean;
validate(options: any): boolean; run(options: any): Promise<number | void>; private isPackageInstalled; private executeSchematic; private findProjectVersion; private hasMismatchedPeer; }
readonly packageManager: string; private _parseSchematicOptions;
test_cli_mgmt_cdn.py
# coding: utf-8 #------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- # TEST SCENARIO COVERAGE # ---------------------- # Methods Total : 41 # Methods Covered : 41 # Examples Total : 42 # Examples Tested : 42 # Coverage % : 100 # ---------------------- import os import unittest import azure.mgmt.cdn from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer AZURE_LOCATION = 'eastus' class MgmtCdnTest(AzureMgmtTestCase): def setUp(self): super(MgmtCdnTest, self).setUp() self.mgmt_client = self.create_mgmt_client( azure.mgmt.cdn.CdnManagementClient ) @ResourceGroupPreparer(location=AZURE_LOCATION) def test_cdn(self, resource_group): SUBSCRIPTION_ID = None if self.is_live: SUBSCRIPTION_ID = os.environ.get("AZURE_SUBSCRIPTION_ID", None) if not SUBSCRIPTION_ID: SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID RESOURCE_GROUP = resource_group.name PROFILE_NAME = "profilename" CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME = "policyname" ENDPOINT_NAME = "endpoint9527x" CUSTOM_DOMAIN_NAME = "someDomain" ORIGIN_NAME = "origin1" # Profiles_Create[put] BODY = { "location": "WestUs", "sku": { "name": "Standard_Verizon" } } result = self.mgmt_client.profiles.begin_create(resource_group.name, PROFILE_NAME, BODY) result = result.result() """ # Creates specific policy[put] BODY = { "location": "global", "sku": { "name": "Standard_Microsoft" }, "policy_settings": { "default_redirect_url": "http://www.bing.com", "default_custom_block_response_status_code": "499", "default_custom_block_response_body": "PGh0bWw+CjxoZWFkZXI+PHRpdGxlPkhlbGxvPC90aXRsZT48L2hlYWRlcj4KPGJvZHk+CkhlbGxvIHdvcmxkCjwvYm9keT4KPC9odG1sPg==" }, "rate_limit_rules": { "rules": [ { "name": "RateLimitRule1", "priority": "1", "enabled_state": "Enabled", "rate_limit_duration_in_minutes": "0", "rate_limit_threshold": "1000", "match_conditions": [ { "match_variable": "RemoteAddr", "operator": "IPMatch", "negate_condition": False, "transforms": [], "match_value": [ "192.168.1.0/24", "10.0.0.0/24" ] } ], "action": "Block" } ] }, "custom_rules": { "rules": [ { "name": "CustomRule1", "priority": "2", "enabled_state": "Enabled", "match_conditions": [ { "match_variable": "RemoteAddr", "operator": "GeoMatch", "negate_condition": False, "transforms": [], "match_value": [ "CH" ] }, { "match_variable": "RequestHeader", "selector": "UserAgent", "operator": "Contains", "negate_condition": False, "transforms": [], "match_value": [ "windows" ] }, { "match_variable": "QueryString", "selector": "search", "operator": "Contains", "negate_condition": False, "transforms": [ "UrlDecode", "Lowercase" ], "match_value": [ "<?php", "?>" ] } ], "action": "Block" } ] }, "managed_rules": { "managed_rule_sets": [ { "rule_set_type": "DefaultRuleSet", "rule_set_version": "preview-1.0", "rule_group_overrides": [ { "rule_group_name": "Group1", "rules": [ { "rule_id": "GROUP1-0001", "enabled_state": "Enabled", "action": "Redirect" }, { "rule_id": "GROUP1-0002", "enabled_state": "Disabled" } ] } ] } ] } } result = self.mgmt_client.policies.create_or_update(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME, BODY) result = result.result() """ # Endpoints_Create[put] BODY = { "origin_host_header": "www.bing.com", "origin_path": "/image", "content_types_to_compress": [ "text/html", "application/octet-stream" ], "is_compression_enabled": True, "is_http_allowed": True, "is_https_allowed": True, "query_string_caching_behavior": "BypassCaching", # "delivery_policy": { # "description": "Test description for a policy.", # "rules": [ # { # "name": "rule1", # "order": "1", # "conditions": [ # { # "name": "RemoteAddress", # "parameters": { # "operator": "IPMatch", # "negate_condition": True, # "match_values": [ # "192.168.1.0/24", # "10.0.0.0/24" # ], # "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleRemoteAddressConditionParameters" # } # } # ], # "actions": [ # { # "name": "CacheExpiration", # "parameters": { # "cache_behavior": "Override", # "cache_duration": "10:10:09", # "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters", # "cache_type": "All" # } # }, # { # "name": "ModifyResponseHeader", # "parameters": { # "header_action": "Overwrite", # "header_name": "Access-Control-Allow-Origin", # "value": "*", # "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters" # } # }, # { # "name": "ModifyRequestHeader", # "parameters": { # "header_action": "Overwrite", # "header_name": "Accept-Encoding", # "value": "gzip", # "@odata.type": "#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters" # } # } # ] # } # ] # }, "origins": [ { "name": "origin1", "host_name": "host1.hello.com" } ], # "web_application_firewall_policy_link": { # "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Cdn/CdnWebApplicationFirewallPolicies/" + CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME + "" # }, "location": "WestUs", "tags": { "kay1": "value1" } } result = self.mgmt_client.endpoints.begin_create(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY) result = result.result() """ # CustomDomains_Create[put] # BODY = { # "host_name": "www.someDomain.net" # } HOST_NAME = "www.someDomain.net" result = self.mgmt_client.custom_domains.create(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME, HOST_NAME) result = result.result() # CustomDomains_Get[get] result = self.mgmt_client.custom_domains.get(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME) """ # Origins_Get[get] result = self.mgmt_client.origins.get(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, ORIGIN_NAME) """ # Get Policy[get] result = self.mgmt_client.policies.get(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME) """ # CustomDomains_ListByEndpoint[get] result = self.mgmt_client.custom_domains.list_by_endpoint(resource_group.name, PROFILE_NAME, ENDPOINT_NAME) # Origins_ListByEndpoint[get] result = self.mgmt_client.origins.list_by_endpoint(resource_group.name, PROFILE_NAME, ENDPOINT_NAME) # Endpoints_Get[get] result = self.mgmt_client.endpoints.get(resource_group.name, PROFILE_NAME, ENDPOINT_NAME) # Endpoints_ListByProfile[get] result = self.mgmt_client.endpoints.list_by_profile(resource_group.name, PROFILE_NAME) # List Policies in a Resource Group[get] result = self.mgmt_client.policies.list(resource_group.name) # Profiles_Get[get] result = self.mgmt_client.profiles.get(resource_group.name, PROFILE_NAME) # Profiles_ListByResourceGroup[get] result = self.mgmt_client.profiles.list_by_resource_group(resource_group.name) # List Policies in a Resource Group[get] result = self.mgmt_client.policies.list(resource_group.name) # Profiles_List[get] result = self.mgmt_client.profiles.list() # Operations_List[get] result = self.mgmt_client.operations.list() # EdgeNodes_List[get] result = self.mgmt_client.edge_nodes.list() """ # CustomDomains_DisableCustomHttps[post] result = self.mgmt_client.custom_domains.disable_custom_https(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME) # CustomDomains_EnableCustomHttpsUsingYourOwnCertificate[post] BODY = { "certificate_source": "AzureKeyVault", "protocol_type": "ServerNameIndication", "certificate_source_parameters": { "odata.type": "#Microsoft.Azure.Cdn.Models.KeyVaultCertificateSourceParameters", "subscription_id": "subid", "resource_group_name": "RG", "vault_name": "kv", "secret_name": "secret1", "secret_version": "00000000-0000-0000-0000-000000000000", "update_rule": "NoAction", "delete_rule": "NoAction" } } result = self.mgmt_client.custom_domains.enable_custom_https(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME, BODY) # CustomDomains_EnableCustomHttpsUsingCDNManagedCertificate[post] BODY = { "certificate_source": "Cdn", "protocol_type": "ServerNameIndication", "certificate_source_parameters": { "odata.type": "#Microsoft.Azure.Cdn.Models.CdnCertificateSourceParameters", "certificate_type": "Shared" } } result = self.mgmt_client.custom_domains.enable_custom_https(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME, BODY) """ # Origins_Update[patch] BODY = { "http_port": "42", "https_port": "43" } result = self.mgmt_client.origins.begin_update(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, ORIGIN_NAME, BODY) result = result.result() """ # Creates specific policy[put] BODY = { "location": "WestUs", "sku": { "name": "Standard_Microsoft" }, "policy_settings": { "default_redirect_url": "http://www.bing.com", "default_custom_block_response_status_code": "499", "default_custom_block_response_body": "PGh0bWw+CjxoZWFkZXI+PHRpdGxlPkhlbGxvPC90aXRsZT48L2hlYWRlcj4KPGJvZHk+CkhlbGxvIHdvcmxkCjwvYm9keT4KPC9odG1sPg==" }, "rate_limit_rules": { "rules": [ { "name": "RateLimitRule1", "priority": "1", "enabled_state": "Enabled", "rate_limit_duration_in_minutes": "0", "rate_limit_threshold": "1000", "match_conditions": [ { "match_variable": "RemoteAddr", "operator": "IPMatch", "negate_condition": False, "transforms": [], "match_value": [ "192.168.1.0/24", "10.0.0.0/24" ] } ], "action": "Block" } ] }, "custom_rules": { "rules": [ { "name": "CustomRule1", "priority": "2", "enabled_state": "Enabled", "match_conditions": [ { "match_variable": "RemoteAddr", "operator": "GeoMatch", "negate_condition": False, "transforms": [], "match_value": [ "CH" ] }, { "match_variable": "RequestHeader", "selector": "UserAgent", "operator": "Contains", "negate_condition": False, "transforms": [], "match_value": [ "windows" ] }, { "match_variable": "QueryString", "selector": "search", "operator": "Contains", "negate_condition": False, "transforms": [ "UrlDecode", "Lowercase" ], "match_value": [ "<?php", "?>" ] }
"action": "Block" } ] }, "managed_rules": { "managed_rule_sets": [ { "rule_set_type": "DefaultRuleSet", "rule_set_version": "preview-1.0", "rule_group_overrides": [ { "rule_group_name": "Group1", "rules": [ { "rule_id": "GROUP1-0001", "enabled_state": "Enabled", "action": "Redirect" }, { "rule_id": "GROUP1-0002", "enabled_state": "Disabled" } ] } ] } ] } } result = self.mgmt_client.policies.create_or_update(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME, BODY) result = result.result() """ # Endpoints_ValidateCustomDomain[post] BODY = { "host_name": "www.someDomain.com" } # HOST_NAME = "www.someDomain.com" result = self.mgmt_client.endpoints.validate_custom_domain(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY) # Endpoints_ListResourceUsage[post] result = self.mgmt_client.endpoints.list_resource_usage(resource_group.name, PROFILE_NAME, ENDPOINT_NAME) # Endpoints_PurgeContent[post] BODY = { "content_paths": [ "/folder1" ] } # CONTENT_PATHS = ["/folder1"] result = self.mgmt_client.endpoints.begin_purge_content(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY) result = result.result() # Endpoints_Stop[post] result = self.mgmt_client.endpoints.begin_stop(resource_group.name, PROFILE_NAME, ENDPOINT_NAME) result = result.result() # Endpoints_Start[post] result = self.mgmt_client.endpoints.begin_start(resource_group.name, PROFILE_NAME, ENDPOINT_NAME) result = result.result() # Endpoints_LoadContent[post] BODY = { "content_paths": [ "/folder1" ] } # CONTENT_PATHS = ["/folder1"] result = self.mgmt_client.endpoints.begin_load_content(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY) result = result.result() # Profiles_ListSupportedOptimizationTypes[post] result = self.mgmt_client.profiles.list_supported_optimization_types(resource_group.name, PROFILE_NAME) # Endpoints_Update[patch] BODY = { "tags": { "additional_properties": "Tag1" }, # "web_application_firewall_policy_link": { # "id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Cdn/CdnWebApplicationFirewallPolicies/" + CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME + "" # } } result = self.mgmt_client.endpoints.begin_update(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, BODY) result = result.result() # Profiles_ListResourceUsage[post] result = self.mgmt_client.profiles.list_resource_usage(resource_group.name, PROFILE_NAME) # Profiles_GenerateSsoUri[post] result = self.mgmt_client.profiles.generate_sso_uri(resource_group.name, PROFILE_NAME) # Profiles_Update[patch] BODY = { "tags": { "additional_properties": "Tag1" } } result = self.mgmt_client.profiles.begin_update(resource_group.name, PROFILE_NAME, BODY) result = result.result() # CheckNameAvailabilityWithSubscription[post] BODY = { "name": "sampleName", "type": "Microsoft.Cdn/Profiles/Endpoints" } # CHECK_NAME = "sampleName" result = self.mgmt_client.check_name_availability_with_subscription(BODY) # ResourceUsage_List[post] result = self.mgmt_client.resource_usage.list() # ValidateProbe[post] BODY = { "probe_url": "https://www.bing.com/image" } # PROBEURL = "https://www.bing.com/image" result = self.mgmt_client.validate_probe(BODY) # CheckNameAvailability[post] BODY = { "name": "sampleName", "type": "Microsoft.Cdn/Profiles/Endpoints" } # CHECKNAME = "sampleName" result = self.mgmt_client.check_name_availability(BODY) # CustomDomains_Delete[delete] result = self.mgmt_client.custom_domains.begin_delete(resource_group.name, PROFILE_NAME, ENDPOINT_NAME, CUSTOM_DOMAIN_NAME) result = result.result() """ # Delete protection policy[delete] result = self.mgmt_client.policies.delete(resource_group.name, CDN_WEB_APPLICATION_FIREWALL_POLICY_NAME) """ # Endpoints_Delete[delete] result = self.mgmt_client.endpoints.begin_delete(resource_group.name, PROFILE_NAME, ENDPOINT_NAME) result = result.result() # Profiles_Delete[delete] result = self.mgmt_client.profiles.begin_delete(resource_group.name, PROFILE_NAME) result = result.result() #------------------------------------------------------------------------------ if __name__ == '__main__': unittest.main()
],
lib.ts
// THIS CODE WAS AUTOMATICALLY GENERATED // DO NOT EDIT THIS CODE BY HAND // YOU CAN REGENERATE IT USING yarn generate:lib type Lib =
| 'es5' | 'es6' | 'es2015' | 'es7' | 'es2016' | 'es2017' | 'es2018' | 'es2019' | 'es2020' | 'esnext' | 'dom' | 'dom.iterable' | 'webworker' | 'webworker.importscripts' | 'scripthost' | 'es2015.core' | 'es2015.collection' | 'es2015.generator' | 'es2015.iterable' | 'es2015.promise' | 'es2015.proxy' | 'es2015.reflect' | 'es2015.symbol' | 'es2015.symbol.wellknown' | 'es2016.array.include' | 'es2017.object' | 'es2017.sharedmemory' | 'es2017.string' | 'es2017.intl' | 'es2017.typedarrays' | 'es2018.asyncgenerator' | 'es2018.asynciterable' | 'es2018.intl' | 'es2018.promise' | 'es2018.regexp' | 'es2019.array' | 'es2019.object' | 'es2019.string' | 'es2019.symbol' | 'es2020.bigint' | 'es2020.promise' | 'es2020.string' | 'es2020.symbol.wellknown' | 'es2020.intl' | 'esnext.array' | 'esnext.symbol' | 'esnext.asynciterable' | 'esnext.intl' | 'esnext.bigint' | 'esnext.string' | 'esnext.promise' | 'es2016.full' | 'es2017.full' | 'es2018.full' | 'es2019.full' | 'es2020.full' | 'esnext.full' | 'lib'; export { Lib };
field.py
import re from .field_renderable import FieldRenderable __all__ = ("Field", ) default_error_messages = { "required": "This field is required.", "type": "Invalid type.", "min_num": "You need at least {num} values.", "max_num": "You can have at most {num} values.", } HARD_MAX_NUM = 1000 class Field(FieldRenderable):
r""" Arguments are: *validators, name=None, required=False, strict=True, error_messages=None, prepare=None, clean=None, collection (bool): This field takes an open number of values of the same kind. For example, a list of comma separated tags or email addresses. sep (str): If `collection` is True, string to separate each value (default is ","). Ignored otherwise multiple=False, min_num=None, max_num=None, **extra """ __slots__ = ( "validators", "name", "required", "strict", "error_messages", "multiple", "min_num", "max_num", "collection", "sep", "extra", ) object_value = None input_values = None input_type = "text" error = None error_value = None updated = False def __init__( self, *validators, name=None, required=False, strict=True, error_messages=None, multiple=False, min_num=None, max_num=None, collection=False, sep=",", prepare=None, clean=None, **extra ): self.validators = validators self.name = name or "" self.required = required self.strict = strict self.min_num = min_num if max_num is not None: max_num = min(max_num, HARD_MAX_NUM) self.max_num = max_num self.error_messages = error_messages or {} self.collection = collection if collection: self.sep = sep multiple = False self.multiple = multiple self.custom_prepare = prepare self.custom_clean = clean self.extra = extra def load_data(self, input_values=None, object_value=None): self.input_values = input_values self.object_value = object_value @property def values(self): if self.input_values: return self.input_values if self.object_value: return (self.custom_prepare or self.prepare)(self.object_value) return [] @property def value(self): return self.values[0] if self.values else "" def get_value(self, index=0): if self.values and index < len(self.values): return self.values[index] return "" def prepare(self, object_value): return [object_value] def validate(self): self._reset() values = [str(value).strip() for value in self.input_values or []] if not values: if self.required: self._set_error("required") return None values = self._pre(values) pyvalues = self._typecast_values(values) if self.error: return None # Typecasting with `strict=False` could've emptied the values without erroring. # An empty string is only an error if the field is required if (not pyvalues or pyvalues[0] == "") and self.required: self._set_error("required") return None self._validate_values(pyvalues) if self.error: return None pyvalue = self._post(pyvalues) if self.custom_clean: pyvalue = self.custom_clean(pyvalue) self.updated = pyvalue != self.object_value return pyvalue def type(self, value, **kwargs): return str(value) # Private def _reset(self): self.error = None self.error_value = None self.updated = False def _pre(self, values): if self.collection: rxsep = r"\s*%s\s*" % re.escape(self.sep.strip()) all_values = [] for value in values: all_values += re.split(rxsep, value) return all_values return values def _post(self, values): if self.collection: return self.sep.join(values) elif self.multiple: return values else: return values[0] if values else None def _typecast_values(self, values): pyvalues = [] for value in values: try: pyvalue = self.type(value, **self.extra) except (ValueError, TypeError, IndexError): pyvalue = None if pyvalue is None: if self.strict: self._set_error("type") self.error_value = value return continue # pragma: no cover pyvalues.append(pyvalue) return pyvalues def _validate_values(self, pyvalues): num_values = len(pyvalues) if self.min_num is not None and self.min_num > num_values: self._set_error("min_num", num=self.min_num) return if self.max_num is not None and self.max_num < num_values: self._set_error("max_num", num=self.max_num) return for validator in self.validators: message = "Invalid value" valid = validator(pyvalues) if valid not in (True, False): valid, message = valid if not valid: self.error = message return def _set_error(self, name, **kwargs): msg = self.error_messages.get(name) or default_error_messages.get(name, "") for key, repl in kwargs.items(): msg = msg.replace("{" + key + "}", str(repl)) self.error = msg or name
numpy.rs
use pyo3::prelude::*; use pyo3::{PyResult}; use pyo3::types::PyDict; pub fn
(input: Vec<u32>) -> PyResult<PyObject> { let len = input.len(); //import numpy let gil = Python::acquire_gil(); let py = gil.python(); let locals = PyDict::new(py); locals.set_item("numpy", py.import("numpy")?)?; //create the array let code = format!("numpy.zeros(({},), numpy.uint32)", len); let rr: PyObject = py.eval(&code, None, Some(&locals))?.extract()?; locals.set_item("arr", rr)?; //figure out where the pointer is and turn it into pointer let code = "arr.ctypes.data"; let ptr_py: PyObject = py.eval(code, None, Some(&locals))?.extract()?; let ptr_us: usize = ptr_py.extract(py)?; let ptr: *mut u32 = ptr_us as *mut u32; //write the data let da_array = unsafe { std::slice::from_raw_parts_mut(ptr, len) }; da_array[..len].clone_from_slice(&input[..len]); //return the python object let result: PyObject = py.eval("arr", None, Some(&locals))?.extract()?; Ok(result) } /* pub fn numpy_from_vec_u64(input: Vec<u64>) -> PyResult<PyObject> { let len = input.len(); //import numpy let gil = Python::acquire_gil(); let py = gil.python(); let locals = PyDict::new(py); locals.set_item("numpy", py.import("numpy")?)?; //create the array let code = format!("numpy.zeros(({},), numpy.uint64)", len); let rr: PyObject = py.eval(&code, None, Some(&locals))?.extract()?; locals.set_item("arr", rr)?; //figure out where the pointer is and turn it into pointer let code = "arr.ctypes.data"; let ptr_py: PyObject = py.eval(code, None, Some(&locals))?.extract()?; let ptr_us: usize = ptr_py.extract(py)?; let ptr: *mut u64 = ptr_us as *mut u64; //write the data let da_array = unsafe { std::slice::from_raw_parts_mut(ptr, len) }; da_array[..len].clone_from_slice(&input[..len]); //return the python object let result: PyObject = py.eval("arr", None, Some(&locals))?.extract()?; Ok(result) } pub fn numpy_from_vec_i8(input: Vec<i8>) -> PyResult<PyObject> { let len = input.len(); //import numpy let gil = Python::acquire_gil(); let py = gil.python(); let locals = PyDict::new(py); locals.set_item("numpy", py.import("numpy")?)?; //create the array let code = format!("numpy.zeros(({},), numpy.int8)", len); let rr: PyObject = py.eval(&code, None, Some(&locals))?.extract()?; locals.set_item("arr", rr)?; //figure out where the pointer is and turn it into pointer let code = "arr.ctypes.data"; let ptr_py: PyObject = py.eval(code, None, Some(&locals))?.extract()?; let ptr_us: usize = ptr_py.extract(py)?; let ptr: *mut i8 = ptr_us as *mut i8; //write the data let da_array = unsafe { std::slice::from_raw_parts_mut(ptr, len) }; da_array[..len].clone_from_slice(&input[..len]); //return the python object let result: PyObject = py.eval("arr", None, Some(&locals))?.extract()?; Ok(result) } */
numpy_from_vec_u32
cmpluginmanager.py
# Copyright 2019 Nokia # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import re import logging from cmframework.apis import cmerror class CMPluginManager(object):
def __init__(self, plugins_path): self.pluginlist = {} self.filterdict = {} self.plugins_path = plugins_path # pylint: disable=no-self-use def load_plugin(self): raise cmerror.CMError('Not implemented') # pylint: disable=no-self-use def build_input(self, indata, filtername): search_re = re.compile(filtername) if isinstance(indata, dict): filter_data = {} for key, value in indata.iteritems(): logging.debug('Matching %s against %s', key, filtername) if search_re.match(key): filter_data[key] = value else: filter_data = [] for key in indata: logging.debug('Matching %s against %s', key, filtername) if search_re.match(key): filter_data.append(key) return filter_data
network.d.ts
import { Observable } from 'rxjs/Observable'; /** * @name Network * @description * Requires Cordova plugin: cordova-plugin-network-information. For more info, please see the [Network plugin docs](https://github.com/apache/cordova-plugin-network-information). * * @usage * ```typescript * import { Network } from 'ionic-native'; * * // watch network for a disconnect * let disconnectSubscription = Network.onDisconnect().subscribe(() => { * console.log('network was disconnected :-('); * }); * * // stop disconnect watch * disconnectSubscription.unsubscribe(); * * * // watch network for a connection * let connectSubscription = Network.onConnect().subscribe(() => { * console.log('network connected!'); * // We just got a connection but we need to wait briefly * // before we determine the connection type. Might need to wait * // prior to doing any api requests as well. * setTimeout(() => { * if (Network.connection === 'wifi') { * console.log('we got a wifi connection, woohoo!'); * } * }, 3000); * }); * * // stop connect watch * connectSubscription.unsubscribe(); * * ```
/** * Return the network connection type */ static readonly connection: string; /** * Get notified when the device goes offline * @returns {Observable<any>} Returns an observable. */ static onDisconnect(): Observable<any>; /** * Get notified when the device goes online * @returns {Observable<any>} Returns an observable. */ static onConnect(): Observable<any>; }
* @advanced * The `connection` property will return one of the following connection types: `unknown`, `ethernet`, `wifi`, `2g`, `3g`, `4g`, `cellular`, `none` */ export declare class Network {
body_stream.rs
use std::{ error::Error as StdError, pin::Pin, task::{Context, Poll}, }; use bytes::Bytes; use futures_core::{ready, Stream}; use pin_project_lite::pin_project; use super::{BodySize, MessageBody}; pin_project! { /// Streaming response wrapper. /// /// Response does not contain `Content-Length` header and appropriate transfer encoding is used. pub struct BodyStream<S> { #[pin]
impl<S, E> BodyStream<S> where S: Stream<Item = Result<Bytes, E>>, E: Into<Box<dyn StdError>> + 'static, { pub fn new(stream: S) -> Self { BodyStream { stream } } } impl<S, E> MessageBody for BodyStream<S> where S: Stream<Item = Result<Bytes, E>>, E: Into<Box<dyn StdError>> + 'static, { type Error = E; fn size(&self) -> BodySize { BodySize::Stream } /// Attempts to pull out the next value of the underlying [`Stream`]. /// /// Empty values are skipped to prevent [`BodyStream`]'s transmission being /// ended on a zero-length chunk, but rather proceed until the underlying /// [`Stream`] ends. fn poll_next( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Result<Bytes, Self::Error>>> { loop { let stream = self.as_mut().project().stream; let chunk = match ready!(stream.poll_next(cx)) { Some(Ok(ref bytes)) if bytes.is_empty() => continue, opt => opt, }; return Poll::Ready(chunk); } } } #[cfg(test)] mod tests { use std::{convert::Infallible, time::Duration}; use actix_rt::{ pin, time::{sleep, Sleep}, }; use actix_utils::future::poll_fn; use derive_more::{Display, Error}; use futures_core::ready; use futures_util::{stream, FutureExt as _}; use super::*; use crate::body::to_bytes; #[actix_rt::test] async fn skips_empty_chunks() { let body = BodyStream::new(stream::iter( ["1", "", "2"] .iter() .map(|&v| Ok::<_, Infallible>(Bytes::from(v))), )); pin!(body); assert_eq!( poll_fn(|cx| body.as_mut().poll_next(cx)) .await .unwrap() .ok(), Some(Bytes::from("1")), ); assert_eq!( poll_fn(|cx| body.as_mut().poll_next(cx)) .await .unwrap() .ok(), Some(Bytes::from("2")), ); } #[actix_rt::test] async fn read_to_bytes() { let body = BodyStream::new(stream::iter( ["1", "", "2"] .iter() .map(|&v| Ok::<_, Infallible>(Bytes::from(v))), )); assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12"))); } #[derive(Debug, Display, Error)] #[display(fmt = "stream error")] struct StreamErr; #[actix_rt::test] async fn stream_immediate_error() { let body = BodyStream::new(stream::once(async { Err(StreamErr) })); assert!(matches!(to_bytes(body).await, Err(StreamErr))); } #[actix_rt::test] async fn stream_delayed_error() { let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)])); assert!(matches!(to_bytes(body).await, Err(StreamErr))); #[pin_project::pin_project(project = TimeDelayStreamProj)] #[derive(Debug)] enum TimeDelayStream { Start, Sleep(Pin<Box<Sleep>>), Done, } impl Stream for TimeDelayStream { type Item = Result<Bytes, StreamErr>; fn poll_next( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Option<Self::Item>> { match self.as_mut().get_mut() { TimeDelayStream::Start => { let sleep = sleep(Duration::from_millis(1)); self.as_mut().set(TimeDelayStream::Sleep(Box::pin(sleep))); cx.waker().wake_by_ref(); Poll::Pending } TimeDelayStream::Sleep(ref mut delay) => { ready!(delay.poll_unpin(cx)); self.set(TimeDelayStream::Done); cx.waker().wake_by_ref(); Poll::Pending } TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))), } } } let body = BodyStream::new(TimeDelayStream::Start); assert!(matches!(to_bytes(body).await, Err(StreamErr))); } }
stream: S, } }
grid_dataset.py
# Copyright 2020 ponai Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Union import torch from torch.utils.data import IterableDataset from ponai.data.utils import iter_patch from ponai.utils import NumpyPadMode class GridPatchDataset(IterableDataset): """ Yields patches from arrays read from an input dataset. The patches are chosen in a contiguous grid sampling scheme. """ def __init__( self, dataset, patch_size, start_pos=(), mode: Union[NumpyPadMode, str] = NumpyPadMode.WRAP, **pad_opts ): """ Initializes this dataset in terms of the input dataset and patch size. The `patch_size` is the size of the patch to sample from the input arrays. It is assumed the arrays first dimension is the channel dimension which will be yielded in its entirety so this should not be specified in `patch_size`. For example, for an input 3D array with 1 channel of size (1, 20, 20, 20) a regular grid sampling of eight patches (1, 10, 10, 10) would be specified by a `patch_size` of (10, 10, 10). Args: dataset (Dataset): the dataset to read array data from patch_size (tuple of int or None): size of patches to generate slices for, 0/None selects whole dimension start_pos (tuple of it, optional): starting position in the array, default is 0 for each dimension mode: {``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``, ``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``} One of the listed string values or a user supplied function. Defaults to ``"wrap"``. See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html pad_opts (dict, optional): padding options, see numpy.pad """ self.dataset = dataset self.patch_size = (None,) + tuple(patch_size) self.start_pos = start_pos self.mode: NumpyPadMode = NumpyPadMode(mode) self.pad_opts = pad_opts def
(self): worker_info = torch.utils.data.get_worker_info() iter_start = 0 iter_end = len(self.dataset) if worker_info is not None: # split workload per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers))) worker_id = worker_info.id iter_start = iter_start + worker_id * per_worker iter_end = min(iter_start + per_worker, iter_end) for index in range(iter_start, iter_end): arrays = self.dataset[index] iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.mode, **self.pad_opts) for a in arrays] yield from zip(*iters)
__iter__
DotProductClassifier.py
# Imports
import torch import torch.nn.functional as F class DotProduct_Classifier(nn.Module): def __init__(self, num_classes=1000, feat_dim=2048, *args): super(DotProduct_Classifier, self).__init__() self.fc = nn.Linear(feat_dim, num_classes) def forward(self, x, *args): x = self.fc(x) return x def create_model(feat_dim, num_classes=1000, pretrain=False, pretrain_dir=None, *args): """Initialize the model Args: feat_dim (int): output dimension of the previous feature extractor num_classes (int, optional): Number of classes. Defaults to 1000. Returns: Class: Model """ print("Loading Dot Product Classifier.") clf = DotProduct_Classifier(num_classes, feat_dim) if pretrain: if path.exists(pretrain_dir): print("===> Load Pretrain Initialization for DotProductClassfier") weights = torch.load(pretrain_dir)["state_dict_best"]["classifier"] weights = { k: weights["module." + k] if "module." + k in weights else clf.state_dict()[k] for k in clf.state_dict() } clf.load_state_dict(weights) else: raise Exception(f"Pretrain path doesn't exist!!--{pretrain_dir}") else: print("===> Train classifier from the scratch") return clf
import torch.nn as nn from os import path
mod.rs
// Copyright 2019, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{collections::HashMap, hash::Hash}; #[allow(dead_code)] #[allow(clippy::mutable_key_type)] // Note: Clippy Breaks with Interior Mutability Error pub fn get_item_counts<I>(items: I) -> HashMap<I::Item, usize> where I: IntoIterator, I::Item: Hash + Eq, { items.into_iter().fold(HashMap::new(), |mut counts, item| { let entry = counts.entry(item).or_insert(0); *entry += 1; counts }) } /// Collect $take items from a stream or timeout for Duration $timeout. /// /// Requires the `tokio` runtime and should be used in an async context. /// /// ```edition2018 /// # use tokio::runtime::Runtime; /// # use futures::stream; /// # use std::time::Duration; /// # use tari_test_utils::collect_stream; /// /// let mut rt = Runtime::new().unwrap(); /// let mut stream = stream::iter(1..10); /// assert_eq!(rt.block_on(async { collect_stream!(stream, take=3, timeout=Duration::from_secs(1)) }), vec![1,2,3]); /// ``` #[macro_export] macro_rules! collect_stream { ($stream:expr, take=$take:expr, timeout=$timeout:expr $(,)?) => {{ use futures::{Stream, StreamExt}; use tokio::time;
let mut stream = &mut $stream; let mut items = Vec::new(); loop { if let Some(item) = time::timeout($timeout, stream.next()).await.expect( format!( "Timeout before stream could collect {} item(s). Got {} item(s).", $take, items.len() ) .as_str(), ) { items.push(item); if items.len() == $take { break items; } } else { break items; } } }}; ($stream:expr, timeout=$timeout:expr $(,)?) => {{ use futures::StreamExt; use tokio::time; let mut items = Vec::new(); while let Some(item) = time::timeout($timeout, $stream.next()) .await .expect(format!("Timeout before stream was closed. Got {} items.", items.len()).as_str()) { items.push(item); } items }}; } /// Returns a HashMap of the number of occurrences of a particular item in a stream. /// /// ```edition2018 /// # use tokio::runtime::Runtime; /// # use futures::stream; /// # use std::time::Duration; /// # use tari_test_utils::collect_stream_count; /// /// let mut rt = Runtime::new().unwrap(); /// let mut stream = stream::iter(vec![1,2,2,3,2]); /// assert_eq!(rt.block_on(async { collect_stream_count!(stream, timeout=Duration::from_secs(1)) }).get(&2), Some(&3)); /// ``` #[macro_export] macro_rules! collect_stream_count { ($stream:expr, take=$take:expr, timeout=$timeout:expr$(,)?) => {{ use std::collections::HashMap; let items = $crate::collect_stream!($stream, take = $take, timeout = $timeout); $crate::streams::get_item_counts(items) }}; ($stream:expr, timeout=$timeout:expr $(,)?) => {{ use std::collections::HashMap; let items = $crate::collect_stream!($stream, timeout = $timeout); $crate::streams::get_item_counts(items) }}; }
// Evaluate $stream once, NOT in the loop 🐛🚨
readOnlyFloatingFilter.ts
import { IFloatingFilterComp, IFloatingFilterParams } from "../floatingFilter"; import { Component } from "../../../widgets/component"; import { RefSelector } from "../../../widgets/componentAnnotations"; // optional floating filter for user provided filters - instead of providing a floating filter, // they can provide a getModelAsString() method on the filter instead. this class just displays // the string returned from getModelAsString() export class ReadOnlyFloatingFilter extends Component implements IFloatingFilterComp { @RefSelector('eFloatingFilterText') private eFloatingFilterText: HTMLInputElement; private params: IFloatingFilterParams; constructor() {
public init(params: IFloatingFilterParams): void { this.params = params; this.eFloatingFilterText.disabled = true; } public onParentModelChanged(parentModel: any): void { if (!parentModel) { this.eFloatingFilterText.value = ''; return; } this.params.parentFilterInstance(filterInstance => { // getModelAsString should be present, as we check this // in floatingFilterWrapper if (filterInstance.getModelAsString) { const modelAsString = filterInstance.getModelAsString(parentModel); this.eFloatingFilterText.value = modelAsString; } }); } }
super(`<div class="ag-input-wrapper" role="presentation"><input ref="eFloatingFilterText" class="ag-floating-filter-input"></div>`); }
check_cloud_n_f_s_export_remove_status.go
// Code generated by go-swagger; DO NOT EDIT. package staging // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the generate command import ( "net/http" "github.com/go-openapi/errors" "github.com/go-openapi/runtime/middleware" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/go-openapi/validate" ) // CheckCloudNFSExportRemoveStatusHandlerFunc turns a function with the right signature into a check cloud n f s export remove status handler type CheckCloudNFSExportRemoveStatusHandlerFunc func(CheckCloudNFSExportRemoveStatusParams) middleware.Responder // Handle executing the request and returning a response func (fn CheckCloudNFSExportRemoveStatusHandlerFunc) Handle(params CheckCloudNFSExportRemoveStatusParams) middleware.Responder { return fn(params) } // CheckCloudNFSExportRemoveStatusHandler interface for that can handle valid check cloud n f s export remove status params type CheckCloudNFSExportRemoveStatusHandler interface { Handle(CheckCloudNFSExportRemoveStatusParams) middleware.Responder } // NewCheckCloudNFSExportRemoveStatus creates a new http.Handler for the check cloud n f s export remove status operation func
(ctx *middleware.Context, handler CheckCloudNFSExportRemoveStatusHandler) *CheckCloudNFSExportRemoveStatus { return &CheckCloudNFSExportRemoveStatus{Context: ctx, Handler: handler} } /*CheckCloudNFSExportRemoveStatus swagger:route GET /cloud/remove/{param} staging checkCloudNFSExportRemoveStatus Check the status of a nfs export remove request for the cloud Check the status of a nfs export remove request for the cloud */ type CheckCloudNFSExportRemoveStatus struct { Context *middleware.Context Handler CheckCloudNFSExportRemoveStatusHandler } func (o *CheckCloudNFSExportRemoveStatus) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { r = rCtx } var Params = NewCheckCloudNFSExportRemoveStatusParams() if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params o.Context.Respond(rw, r, route.Produces, route, err) return } res := o.Handler.Handle(Params) // actually handle the request o.Context.Respond(rw, r, route.Produces, route, res) } // CheckCloudNFSExportRemoveStatusOKBody check cloud n f s export remove status o k body // // swagger:model CheckCloudNFSExportRemoveStatusOKBody type CheckCloudNFSExportRemoveStatusOKBody struct { // status // Required: true Status *string `json:"status"` } // Validate validates this check cloud n f s export remove status o k body func (o *CheckCloudNFSExportRemoveStatusOKBody) Validate(formats strfmt.Registry) error { var res []error if err := o.validateStatus(formats); err != nil { res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (o *CheckCloudNFSExportRemoveStatusOKBody) validateStatus(formats strfmt.Registry) error { if err := validate.Required("checkCloudNFSExportRemoveStatusOK"+"."+"status", "body", o.Status); err != nil { return err } return nil } // MarshalBinary interface implementation func (o *CheckCloudNFSExportRemoveStatusOKBody) MarshalBinary() ([]byte, error) { if o == nil { return nil, nil } return swag.WriteJSON(o) } // UnmarshalBinary interface implementation func (o *CheckCloudNFSExportRemoveStatusOKBody) UnmarshalBinary(b []byte) error { var res CheckCloudNFSExportRemoveStatusOKBody if err := swag.ReadJSON(b, &res); err != nil { return err } *o = res return nil }
NewCheckCloudNFSExportRemoveStatus
standalone.rs
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use ballista_core::serde::protobuf::{LogicalPlanNode, PhysicalPlanNode}; use ballista_core::serde::BallistaCodec; use ballista_core::{ error::Result, serde::protobuf::scheduler_grpc_server::SchedulerGrpcServer, BALLISTA_VERSION, }; use datafusion::prelude::ExecutionContext; use log::info; use std::{net::SocketAddr, sync::Arc}; use tokio::net::TcpListener; use tokio::sync::RwLock; use tonic::transport::Server; use crate::{state::StandaloneClient, SchedulerServer}; pub async fn new_standalone_scheduler() -> Result<SocketAddr>
{ let client = StandaloneClient::try_new_temporary()?; let scheduler_server: SchedulerServer<LogicalPlanNode, PhysicalPlanNode> = SchedulerServer::new( Arc::new(client), "ballista".to_string(), Arc::new(RwLock::new(ExecutionContext::new())), BallistaCodec::default(), ); scheduler_server.init().await?; let server = SchedulerGrpcServer::new(scheduler_server); // Let the OS assign a random, free port let listener = TcpListener::bind("localhost:0").await?; let addr = listener.local_addr()?; info!( "Ballista v{} Rust Scheduler listening on {:?}", BALLISTA_VERSION, addr ); tokio::spawn( Server::builder().add_service(server).serve_with_incoming( tokio_stream::wrappers::TcpListenerStream::new(listener), ), ); Ok(addr) }
hub.go
package node import ( "encoding/json" "sync" "github.com/anycable/anycable-go/common" "github.com/apex/log" ) // HubSubscription contains information about session-channel(-stream) subscription type HubSubscription struct { event string session string stream string identifier string } // HubRegistration represents registration event ("add" or "remove") type HubRegistration struct { event string session *Session } // Hub stores all the sessions and the corresponding subscriptions info type Hub struct { // Registered sessions sessions map[string]*Session // Identifiers to session identifiers map[string]map[string]bool // Maps streams to sessions with identifiers // stream -> sid -> identifier -> true streams map[string]map[string]map[string]bool // Maps sessions to identifiers to streams // sid -> identifier -> [stream] sessionsStreams map[string]map[string][]string // Messages for specified stream broadcast chan *common.StreamMessage // Remote disconnect commands disconnect chan *common.RemoteDisconnectMessage // Register requests from the sessions register chan HubRegistration // Subscribe requests to streams subscribe chan HubSubscription // Control channel to shutdown hub shutdown chan struct{} // Synchronization group to wait for gracefully disconnect of all sessions done sync.WaitGroup // Log context log *log.Entry } // NewHub builds new hub instance func NewHub() *Hub { return &Hub{ broadcast: make(chan *common.StreamMessage, 256), disconnect: make(chan *common.RemoteDisconnectMessage, 128), register: make(chan HubRegistration, 2048), subscribe: make(chan HubSubscription, 128), sessions: make(map[string]*Session), identifiers: make(map[string]map[string]bool), streams: make(map[string]map[string]map[string]bool), sessionsStreams: make(map[string]map[string][]string), shutdown: make(chan struct{}), log: log.WithFields(log.Fields{"context": "hub"}), } } // Run makes hub active func (h *Hub) Run() { h.done.Add(1) for { select { case r := <-h.register: if r.event == "add" { h.addSession(r.session) } else { h.removeSession(r.session) } case subinfo := <-h.subscribe: if subinfo.event == "add" { h.subscribeSession(subinfo.session, subinfo.stream, subinfo.identifier) } else if subinfo.event == "removeAll" { h.unsubscribeSessionFromChannel(subinfo.session, subinfo.identifier) } else { h.unsubscribeSession(subinfo.session, subinfo.stream, subinfo.identifier) } case message := <-h.broadcast: h.broadcastToStream(message.Stream, message.Data) case command := <-h.disconnect: h.disconnectSessions(command.Identifier, command.Reconnect) case <-h.shutdown: h.done.Done() return } } } // AddSession enqueues sessions registration func (h *Hub) AddSession(s *Session) { h.register <- HubRegistration{event: "add", session: s} } // RemoveSession enqueues session un-registration func (h *Hub) RemoveSession(s *Session) { h.register <- HubRegistration{event: "remove", session: s} } // AddSubscription enqueues adding a subscription for session-identifier pair to the hub func (h *Hub) AddSubscription(sid string, identifier string, stream string) { h.subscribe <- HubSubscription{event: "add", session: sid, identifier: identifier, stream: stream} } // RemoveSubscription enqueues removing a subscription for session-identifier pair from the hub func (h *Hub) RemoveSubscription(sid string, identifier string, stream string) { h.subscribe <- HubSubscription{event: "remove", session: sid, identifier: identifier, stream: stream} } // RemoveAllSubscriptions enqueues removing all subscription for session-identifier pair from the hub func (h *Hub) RemoveAllSubscriptions(sid string, identifier string) { h.subscribe <- HubSubscription{event: "removeAll", session: sid, identifier: identifier} } // Broadcast enqueues data broadcasting to a stream func (h *Hub) Broadcast(stream string, data string) { h.broadcast <- &common.StreamMessage{Stream: stream, Data: data} } // BroadcastMessage enqueues broadcasting a pre-built StreamMessage func (h *Hub) BroadcastMessage(msg *common.StreamMessage) { h.broadcast <- msg } // RemoteDisconnect enqueues remote disconnect command func (h *Hub) RemoteDisconnect(msg *common.RemoteDisconnectMessage) { h.disconnect <- msg } // Shutdown sends shutdown command to hub func (h *Hub) Shutdown() { h.shutdown <- struct{}{} // Wait for stop listening channels h.done.Wait() } // Size returns a number of active sessions func (h *Hub) Size() int { return len(h.sessions) } // UniqSize returns a number of uniq identifiers func (h *Hub) UniqSize() int { return len(h.identifiers) } // StreamsSize returns a number of uniq streams func (h *Hub) StreamsSize() int { return len(h.streams) } func (h *Hub) addSession(session *Session) { h.sessions[session.UID] = session if _, ok := h.identifiers[session.Identifiers]; !ok { h.identifiers[session.Identifiers] = make(map[string]bool) } h.identifiers[session.Identifiers][session.UID] = true h.log.WithField("sid", session.UID).Debugf( "Registered with identifiers: %s", session.Identifiers, ) } func (h *Hub) removeSession(session *Session) { if _, ok := h.sessions[session.UID]; !ok { h.log.WithField("sid", session.UID).Warn("Session hasn't been registered") return } h.unsubscribeSessionFromAllChannels(session.UID) delete(h.sessions, session.UID) delete(h.identifiers[session.Identifiers], session.UID) if len(h.identifiers[session.Identifiers]) == 0 { delete(h.identifiers, session.Identifiers) } h.log.WithField("sid", session.UID).Debug("Unregistered") } func (h *Hub) unsubscribeSessionFromAllChannels(sid string) { for channel := range h.sessionsStreams[sid] { h.unsubscribeSessionFromChannel(sid, channel) } delete(h.sessionsStreams, sid) } func (h *Hub) unsubscribeSessionFromChannel(sid string, identifier string) { if _, ok := h.sessionsStreams[sid]; !ok { return } for _, stream := range h.sessionsStreams[sid][identifier] { delete(h.streams[stream][sid], identifier) if len(h.streams[stream][sid]) == 0 { delete(h.streams[stream], sid) } if len(h.streams[stream]) == 0 { delete(h.streams, stream) } } h.log.WithFields(log.Fields{ "sid": sid, "channel": identifier, }).Debug("Unsubscribed") } func (h *Hub) subscribeSession(sid string, stream string, identifier string) { if _, ok := h.streams[stream]; !ok { h.streams[stream] = make(map[string]map[string]bool) } if _, ok := h.streams[stream][sid]; !ok { h.streams[stream][sid] = make(map[string]bool) } h.streams[stream][sid][identifier] = true if _, ok := h.sessionsStreams[sid]; !ok { h.sessionsStreams[sid] = make(map[string][]string) } h.sessionsStreams[sid][identifier] = append( h.sessionsStreams[sid][identifier], stream, ) h.log.WithFields(log.Fields{ "sid": sid, "channel": identifier, "stream": stream, }).Debug("Subscribed") } func (h *Hub) unsubscribeSession(sid string, stream string, identifier string) { if _, ok := h.streams[stream]; !ok { return } if _, ok := h.streams[stream][sid]; !ok { return } if _, ok := h.streams[stream][sid][identifier]; !ok { return } delete(h.streams[stream][sid], identifier) h.log.WithFields(log.Fields{ "sid": sid, "channel": identifier, "stream": stream, }).Debug("Unsubscribed") } func (h *Hub) broadcastToStream(stream string, data string) { ctx := h.log.WithField("stream", stream) ctx.Debugf("Broadcast message: %s", data) if _, ok := h.streams[stream]; !ok { ctx.Debug("No sessions") return } buf := make(map[string][]byte) var bdata []byte for sid, ids := range h.streams[stream] { session, ok := h.sessions[sid] if !ok { continue } for id := range ids { if msg, ok := buf[id]; ok { bdata = msg } else { bdata = buildMessage(data, id) buf[id] = bdata } session.Send(bdata) } } } func (h *Hub) disconnectSessions(identifier string, reconnect bool) { ids, ok := h.identifiers[identifier] if !ok { h.log.Debugf("Can not disconnect sessions: unknown identifier %s", identifier) return } disconnectMessage := newDisconnectMessage(remoteDisconnectReason, reconnect) for id := range ids { if ses, ok := h.sessions[id]; ok { ses.Send(disconnectMessage) ses.Disconnect("Closed remotely", CloseNormalClosure) } } } func
(data string, identifier string) []byte { var msg interface{} // We ignore JSON deserialization failures and consider the message to be a string json.Unmarshal([]byte(data), &msg) // nolint:errcheck return (&common.Reply{Identifier: identifier, Message: msg}).ToJSON() }
buildMessage
main.rs
struct Solution; impl Solution { pub fn search(nums: Vec<i32>, target: i32) -> i32 { if nums.len() == 0 { return -1; } let mut left = 0; let mut right = nums.len() - 1; while left <= right { let mid = left + (right - left) / 2; if nums[mid] == target { return mid as i32; } else if nums[left] <= nums[mid] { if target >= nums[left] && target < nums[mid] { right = mid - 1; } else
} else { if target > nums[mid] && target <= nums[right] { left = mid + 1; } else { right = mid - 1; } } } return -1; } } fn main() { let nums = vec![4, 5, 6, 7, 0, 1, 2]; let target = 1; println!("{}", Solution::search(nums, target)); }
{ left = mid + 1; }
in_mem_accounts_index.rs
use crate::accounts_index::{ AccountMapEntry, AccountMapEntryInner, AccountMapEntryMeta, IndexValue, PreAllocatedAccountMapEntry, RefCount, SlotList, SlotSlice, }; use crate::bucket_map_holder::{Age, BucketMapHolder}; use crate::bucket_map_holder_stats::BucketMapHolderStats; use rand::thread_rng; use rand::Rng; use solana_bucket_map::bucket_api::BucketApi; use solana_measure::measure::Measure; use solana_sdk::{clock::Slot, pubkey::Pubkey}; use std::collections::{hash_map::Entry, HashMap}; use std::ops::{Bound, RangeBounds, RangeInclusive}; use std::sync::atomic::{AtomicBool, AtomicU64, AtomicU8, Ordering}; use std::sync::{Arc, RwLock, RwLockWriteGuard}; use std::fmt::Debug; type K = Pubkey; type CacheRangesHeld = RwLock<Vec<Option<RangeInclusive<Pubkey>>>>; pub type SlotT<T> = (Slot, T); #[allow(dead_code)] // temporary during staging // one instance of this represents one bin of the accounts index. pub struct InMemAccountsIndex<T: IndexValue> { last_age_flushed: AtomicU8, // backing store map_internal: RwLock<HashMap<Pubkey, AccountMapEntry<T>>>, storage: Arc<BucketMapHolder<T>>, bin: usize, bucket: Option<Arc<BucketApi<SlotT<T>>>>, // pubkey ranges that this bin must hold in the cache while the range is present in this vec pub(crate) cache_ranges_held: CacheRangesHeld, // true while ranges are being manipulated. Used to keep an async flush from removing things while a range is being held. stop_flush: AtomicU64, // set to true when any entry in this bin is marked dirty bin_dirty: AtomicBool, // set to true while this bin is being actively flushed flushing_active: AtomicBool, } impl<T: IndexValue> Debug for InMemAccountsIndex<T> { fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Ok(()) } } pub enum InsertNewEntryResults { DidNotExist, ExistedNewEntryZeroLamports, ExistedNewEntryNonZeroLamports, } #[allow(dead_code)] // temporary during staging impl<T: IndexValue> InMemAccountsIndex<T> { pub fn new(storage: &Arc<BucketMapHolder<T>>, bin: usize) -> Self { Self { map_internal: RwLock::default(), storage: Arc::clone(storage), bin, bucket: storage .disk .as_ref() .map(|disk| disk.get_bucket_from_index(bin)) .map(Arc::clone), cache_ranges_held: CacheRangesHeld::default(), stop_flush: AtomicU64::default(), bin_dirty: AtomicBool::default(), flushing_active: AtomicBool::default(), // initialize this to max, to make it clear we have not flushed at age 0, the starting age last_age_flushed: AtomicU8::new(Age::MAX), } } /// true if this bucket needs to call flush for the current age /// we need to scan each bucket once per value of age fn get_should_age(&self, age: Age) -> bool { let last_age_flushed = self.last_age_flushed(); last_age_flushed != age } /// called after flush scans this bucket at the current age fn set_has_aged(&self, age: Age) { self.last_age_flushed.store(age, Ordering::Relaxed); self.storage.bucket_flushed_at_current_age(); } fn last_age_flushed(&self) -> Age { self.last_age_flushed.load(Ordering::Relaxed) } fn map(&self) -> &RwLock<HashMap<Pubkey, AccountMapEntry<T>>> { &self.map_internal } pub fn items<R>(&self, range: &Option<&R>) -> Vec<(K, AccountMapEntry<T>)> where R: RangeBounds<Pubkey> + std::fmt::Debug, { self.start_stop_flush(true); self.put_range_in_cache(range); // check range here to see if our items are already held in the cache Self::update_stat(&self.stats().items, 1); let map = self.map().read().unwrap(); let mut result = Vec::with_capacity(map.len()); map.iter().for_each(|(k, v)| { if range.map(|range| range.contains(k)).unwrap_or(true) { result.push((*k, Arc::clone(v))); } }); self.start_stop_flush(false); result } // only called in debug code paths pub fn keys(&self) -> Vec<Pubkey> { Self::update_stat(&self.stats().keys, 1); // easiest implementation is to load evrything from disk into cache and return the keys self.start_stop_flush(true); self.put_range_in_cache(&None::<&RangeInclusive<Pubkey>>); let keys = self.map().read().unwrap().keys().cloned().collect(); self.start_stop_flush(false); keys } fn load_from_disk(&self, pubkey: &Pubkey) -> Option<(SlotList<T>, RefCount)> { self.bucket.as_ref().and_then(|disk| { let m = Measure::start("load_disk_found_count"); let entry_disk = disk.read_value(pubkey); match &entry_disk { Some(_) => { Self::update_time_stat(&self.stats().load_disk_found_us, m); Self::update_stat(&self.stats().load_disk_found_count, 1); } None => { Self::update_time_stat(&self.stats().load_disk_missing_us, m); Self::update_stat(&self.stats().load_disk_missing_count, 1); } } entry_disk }) } fn load_account_entry_from_disk(&self, pubkey: &Pubkey) -> Option<AccountMapEntry<T>> { let entry_disk = self.load_from_disk(pubkey)?; // returns None if not on disk
Some(self.disk_to_cache_entry(entry_disk.0, entry_disk.1)) } /// lookup 'pubkey' by only looking in memory. Does not look on disk. /// callback is called whether pubkey is found or not fn get_only_in_mem<RT>( &self, pubkey: &K, callback: impl for<'a> FnOnce(Option<&'a Arc<AccountMapEntryInner<T>>>) -> RT, ) -> RT { let m = Measure::start("get"); let map = self.map().read().unwrap(); let result = map.get(pubkey); let stats = self.stats(); let (count, time) = if result.is_some() { (&stats.gets_from_mem, &stats.get_mem_us) } else { (&stats.gets_missing, &stats.get_missing_us) }; Self::update_time_stat(time, m); Self::update_stat(count, 1); callback(if let Some(entry) = result { entry.set_age(self.storage.future_age_to_flush()); Some(entry) } else { drop(map); None }) } /// lookup 'pubkey' in index (in mem or on disk) pub fn get(&self, pubkey: &K) -> Option<AccountMapEntry<T>> { self.get_internal(pubkey, |entry| (true, entry.map(Arc::clone))) } /// lookup 'pubkey' in index (in_mem or disk). /// call 'callback' whether found or not pub(crate) fn get_internal<RT>( &self, pubkey: &K, // return true if item should be added to in_mem cache callback: impl for<'a> FnOnce(Option<&Arc<AccountMapEntryInner<T>>>) -> (bool, RT), ) -> RT { self.get_only_in_mem(pubkey, |entry| { if let Some(entry) = entry { entry.set_age(self.storage.future_age_to_flush()); callback(Some(entry)).1 } else { // not in cache, look on disk let stats = &self.stats(); let disk_entry = self.load_account_entry_from_disk(pubkey); if disk_entry.is_none() { return callback(None).1; } let disk_entry = disk_entry.unwrap(); let mut map = self.map().write().unwrap(); let entry = map.entry(*pubkey); match entry { Entry::Occupied(occupied) => callback(Some(occupied.get())).1, Entry::Vacant(vacant) => { let (add_to_cache, rt) = callback(Some(&disk_entry)); if add_to_cache { stats.insert_or_delete_mem(true, self.bin); vacant.insert(disk_entry); } rt } } } }) } fn remove_if_slot_list_empty_value(&self, slot_list: SlotSlice<T>) -> bool { if slot_list.is_empty() { self.stats().insert_or_delete(false, self.bin); true } else { false } } fn delete_disk_key(&self, pubkey: &Pubkey) { if let Some(disk) = self.bucket.as_ref() { disk.delete_key(pubkey) } } fn remove_if_slot_list_empty_entry(&self, entry: Entry<K, AccountMapEntry<T>>) -> bool { match entry { Entry::Occupied(occupied) => { let result = self.remove_if_slot_list_empty_value(&occupied.get().slot_list.read().unwrap()); if result { // note there is a potential race here that has existed. // if someone else holds the arc, // then they think the item is still in the index and can make modifications. // We have to have a write lock to the map here, which means nobody else can get // the arc, but someone may already have retreived a clone of it. // account index in_mem flushing is one such possibility self.delete_disk_key(occupied.key()); self.stats().insert_or_delete_mem(false, self.bin); occupied.remove(); } result } Entry::Vacant(vacant) => { // not in cache, look on disk let entry_disk = self.load_from_disk(vacant.key()); match entry_disk { Some(entry_disk) => { // on disk if self.remove_if_slot_list_empty_value(&entry_disk.0) { // not in cache, but on disk, so just delete from disk self.delete_disk_key(vacant.key()); true } else { // could insert into cache here, but not required for correctness and value is unclear false } } None => false, // not in cache or on disk } } } } // If the slot list for pubkey exists in the index and is empty, remove the index entry for pubkey and return true. // Return false otherwise. pub fn remove_if_slot_list_empty(&self, pubkey: Pubkey) -> bool { let mut m = Measure::start("entry"); let mut map = self.map().write().unwrap(); let entry = map.entry(pubkey); m.stop(); let found = matches!(entry, Entry::Occupied(_)); let result = self.remove_if_slot_list_empty_entry(entry); drop(map); self.update_entry_stats(m, found); result } pub fn slot_list_mut<RT>( &self, pubkey: &Pubkey, user: impl for<'a> FnOnce(&mut RwLockWriteGuard<'a, SlotList<T>>) -> RT, ) -> Option<RT> { self.get_internal(pubkey, |entry| { ( true, entry.map(|entry| { let result = user(&mut entry.slot_list.write().unwrap()); entry.set_dirty(true); result }), ) }) } pub fn unref(&self, pubkey: &Pubkey) { self.get_internal(pubkey, |entry| { if let Some(entry) = entry { entry.add_un_ref(false) } (true, ()) }) } pub fn upsert( &self, pubkey: &Pubkey, new_value: PreAllocatedAccountMapEntry<T>, reclaims: &mut SlotList<T>, previous_slot_entry_was_cached: bool, ) { // try to get it just from memory first using only a read lock self.get_only_in_mem(pubkey, |entry| { if let Some(entry) = entry { Self::lock_and_update_slot_list( entry, new_value.into(), reclaims, previous_slot_entry_was_cached, ); Self::update_stat(&self.stats().updates_in_mem, 1); } else { let mut m = Measure::start("entry"); let mut map = self.map().write().unwrap(); let entry = map.entry(*pubkey); m.stop(); let found = matches!(entry, Entry::Occupied(_)); match entry { Entry::Occupied(mut occupied) => { let current = occupied.get_mut(); Self::lock_and_update_slot_list( current, new_value.into(), reclaims, previous_slot_entry_was_cached, ); current.set_age(self.storage.future_age_to_flush()); Self::update_stat(&self.stats().updates_in_mem, 1); } Entry::Vacant(vacant) => { // not in cache, look on disk let disk_entry = self.load_account_entry_from_disk(vacant.key()); let new_value = if let Some(disk_entry) = disk_entry { // on disk, so merge new_value with what was on disk Self::lock_and_update_slot_list( &disk_entry, new_value.into(), reclaims, previous_slot_entry_was_cached, ); disk_entry } else { // not on disk, so insert new thing self.stats().insert_or_delete(true, self.bin); new_value.into_account_map_entry(&self.storage) }; assert!(new_value.dirty()); vacant.insert(new_value); self.stats().insert_or_delete_mem(true, self.bin); } } drop(map); self.update_entry_stats(m, found); }; }) } fn update_entry_stats(&self, stopped_measure: Measure, found: bool) { let stats = &self.stats(); let (count, time) = if found { (&stats.entries_from_mem, &stats.entry_mem_us) } else { (&stats.entries_missing, &stats.entry_missing_us) }; Self::update_stat(time, stopped_measure.as_us()); Self::update_stat(count, 1); } // Try to update an item in the slot list the given `slot` If an item for the slot // already exists in the list, remove the older item, add it to `reclaims`, and insert // the new item. pub fn lock_and_update_slot_list( current: &AccountMapEntryInner<T>, new_value: (Slot, T), reclaims: &mut SlotList<T>, previous_slot_entry_was_cached: bool, ) { let mut slot_list = current.slot_list.write().unwrap(); let (slot, new_entry) = new_value; let addref = Self::update_slot_list( &mut slot_list, slot, new_entry, reclaims, previous_slot_entry_was_cached, ); if addref { current.add_un_ref(true); } current.set_dirty(true); } // modifies slot_list // returns true if caller should addref fn update_slot_list( list: &mut SlotList<T>, slot: Slot, account_info: T, reclaims: &mut SlotList<T>, previous_slot_entry_was_cached: bool, ) -> bool { let mut addref = !account_info.is_cached(); // find other dirty entries from the same slot for list_index in 0..list.len() { let (s, previous_update_value) = &list[list_index]; if *s == slot { let previous_was_cached = previous_update_value.is_cached(); addref = addref && previous_was_cached; let mut new_item = (slot, account_info); std::mem::swap(&mut new_item, &mut list[list_index]); if previous_slot_entry_was_cached { assert!(previous_was_cached); } else { reclaims.push(new_item); } list[(list_index + 1)..] .iter() .for_each(|item| assert!(item.0 != slot)); return addref; } } // if we make it here, we did not find the slot in the list list.push((slot, account_info)); addref } // convert from raw data on disk to AccountMapEntry, set to age in future fn disk_to_cache_entry( &self, slot_list: SlotList<T>, ref_count: RefCount, ) -> AccountMapEntry<T> { Arc::new(AccountMapEntryInner::new( slot_list, ref_count, AccountMapEntryMeta::new_dirty(&self.storage), )) } pub fn len(&self) -> usize { self.map().read().unwrap().len() } pub fn is_empty(&self) -> bool { self.len() == 0 } fn insert_returner( existing: &AccountMapEntry<T>, pubkey: &Pubkey, new_entry: PreAllocatedAccountMapEntry<T>, ) -> (AccountMapEntry<T>, T, Pubkey) { let (_slot, info): (Slot, T) = new_entry.into(); ( Arc::clone(existing), // extract the new account_info from the unused 'new_entry' info, *pubkey, ) } pub fn insert_new_entry_if_missing_with_lock( &self, pubkey: Pubkey, new_entry: PreAllocatedAccountMapEntry<T>, ) -> InsertNewEntryResults { let mut m = Measure::start("entry"); let mut map = self.map().write().unwrap(); let entry = map.entry(pubkey); m.stop(); let mut new_entry_zero_lamports = false; let (found_in_mem, already_existed) = match entry { Entry::Occupied(occupied) => { // in cache, so merge into cache let (slot, account_info) = new_entry.into(); new_entry_zero_lamports = account_info.is_zero_lamport(); InMemAccountsIndex::lock_and_update_slot_list( occupied.get(), (slot, account_info), &mut Vec::default(), false, ); ( true, /* found in mem */ true, /* already existed */ ) } Entry::Vacant(vacant) => { // not in cache, look on disk let mut existed = false; if let Some(disk) = self.bucket.as_ref() { let (slot, account_info) = new_entry.into(); new_entry_zero_lamports = account_info.is_zero_lamport(); disk.update(vacant.key(), |current| { if let Some((slot_list, mut ref_count)) = current { // on disk, so merge and update disk let mut slot_list = slot_list.to_vec(); let addref = Self::update_slot_list( &mut slot_list, slot, account_info, &mut Vec::default(), false, ); if addref { ref_count += 1 }; existed = true; Some((slot_list, ref_count)) } else { // doesn't exist on disk yet, so insert it let ref_count = if account_info.is_cached() { 0 } else { 1 }; Some((vec![(slot, account_info)], ref_count)) } }); } else { // not using disk, so insert into mem self.stats().insert_or_delete_mem(true, self.bin); let new_entry: AccountMapEntry<T> = new_entry.into_account_map_entry(&self.storage); assert!(new_entry.dirty()); vacant.insert(new_entry); } (false, existed) } }; drop(map); self.update_entry_stats(m, found_in_mem); let stats = self.stats(); if !already_existed { stats.insert_or_delete(true, self.bin); } else { Self::update_stat(&stats.updates_in_mem, 1); } if !already_existed { InsertNewEntryResults::DidNotExist } else if new_entry_zero_lamports { InsertNewEntryResults::ExistedNewEntryZeroLamports } else { InsertNewEntryResults::ExistedNewEntryNonZeroLamports } } pub fn just_set_hold_range_in_memory<R>(&self, range: &R, start_holding: bool) where R: RangeBounds<Pubkey>, { let start = match range.start_bound() { Bound::Included(bound) | Bound::Excluded(bound) => *bound, Bound::Unbounded => Pubkey::new(&[0; 32]), }; let end = match range.end_bound() { Bound::Included(bound) | Bound::Excluded(bound) => *bound, Bound::Unbounded => Pubkey::new(&[0xff; 32]), }; // this becomes inclusive - that is ok - we are just roughly holding a range of items. // inclusive is bigger than exclusive so we may hold 1 extra item worst case let inclusive_range = Some(start..=end); let mut ranges = self.cache_ranges_held.write().unwrap(); if start_holding { ranges.push(inclusive_range); } else { // find the matching range and delete it since we don't want to hold it anymore let none = inclusive_range.is_none(); for (i, r) in ranges.iter().enumerate() { if r.is_none() != none { continue; } if !none { // neither are none, so check values if let (Bound::Included(start_found), Bound::Included(end_found)) = r .as_ref() .map(|r| (r.start_bound(), r.end_bound())) .unwrap() { if start_found != &start || end_found != &end { continue; } } } // found a match. There may be dups, that's ok, we expect another call to remove the dup. ranges.remove(i); break; } } } fn start_stop_flush(&self, stop: bool) { if stop { self.stop_flush.fetch_add(1, Ordering::Release); } else if 1 == self.stop_flush.fetch_sub(1, Ordering::Release) { // stop_flush went to 0, so this bucket could now be ready to be aged self.storage.wait_dirty_or_aged.notify_one(); } } pub fn hold_range_in_memory<R>(&self, range: &R, start_holding: bool) where R: RangeBounds<Pubkey> + Debug, { self.start_stop_flush(true); if start_holding { // put everything in the cache and it will be held there self.put_range_in_cache(&Some(range)); } // do this AFTER items have been put in cache - that way anyone who finds this range can know that the items are already in the cache self.just_set_hold_range_in_memory(range, start_holding); self.start_stop_flush(false); } fn put_range_in_cache<R>(&self, range: &Option<&R>) where R: RangeBounds<Pubkey>, { assert!(self.get_stop_flush()); // caller should be controlling the lifetime of how long this needs to be present let m = Measure::start("range"); // load from disk if let Some(disk) = self.bucket.as_ref() { let items = disk.items_in_range(range); let mut map = self.map().write().unwrap(); let future_age = self.storage.future_age_to_flush(); for item in items { let entry = map.entry(item.pubkey); match entry { Entry::Occupied(occupied) => { // item already in cache, bump age to future. This helps the current age flush to succeed. occupied.get().set_age(future_age); } Entry::Vacant(vacant) => { vacant.insert(self.disk_to_cache_entry(item.slot_list, item.ref_count)); self.stats().insert_or_delete_mem(true, self.bin); } } } } Self::update_time_stat(&self.stats().get_range_us, m); } fn get_stop_flush(&self) -> bool { self.stop_flush.load(Ordering::Relaxed) > 0 } pub(crate) fn flush(&self) { let flushing = self.flushing_active.swap(true, Ordering::Acquire); if flushing { // already flushing in another thread return; } self.flush_internal(); self.flushing_active.store(false, Ordering::Release); } pub fn set_bin_dirty(&self) { self.bin_dirty.store(true, Ordering::Release); // 1 bin dirty, so only need 1 thread to wake up if many could be waiting self.storage.wait_dirty_or_aged.notify_one(); } fn random_chance_of_eviction() -> bool { // random eviction const N: usize = 1000; // 1/N chance of eviction thread_rng().gen_range(0, N) == 0 } /// return true if 'entry' should be removed from the in-mem index fn should_remove_from_mem( &self, current_age: Age, entry: &AccountMapEntry<T>, startup: bool, update_stats: bool, ) -> bool { // this could be tunable dynamically based on memory pressure // we could look at more ages or we could throw out more items we are choosing to keep in the cache if startup || (current_age == entry.age()) { // only read the slot list if we are planning to throw the item out let slot_list = entry.slot_list.read().unwrap(); if slot_list.len() != 1 { if update_stats { Self::update_stat(&self.stats().held_in_mem_slot_list_len, 1); } false // keep 0 and > 1 slot lists in mem. They will be cleaned or shrunk soon. } else { // keep items with slot lists that contained cached items let remove = !slot_list.iter().any(|(_, info)| info.is_cached()); if !remove && update_stats { Self::update_stat(&self.stats().held_in_mem_slot_list_cached, 1); } remove } } else { false } } fn flush_internal(&self) { let was_dirty = self.bin_dirty.swap(false, Ordering::Acquire); let current_age = self.storage.current_age(); let mut iterate_for_age = self.get_should_age(current_age); let startup = self.storage.get_startup(); if !was_dirty && !iterate_for_age && !startup { // wasn't dirty and no need to age, so no need to flush this bucket // but, at startup we want to remove from buckets as fast as possible if any items exist return; } // may have to loop if disk has to grow and we have to restart loop { let mut removes; let mut removes_random = Vec::default(); let disk = self.bucket.as_ref().unwrap(); let mut flush_entries_updated_on_disk = 0; let mut disk_resize = Ok(()); // scan and update loop // holds read lock { let map = self.map().read().unwrap(); removes = Vec::with_capacity(map.len()); let m = Measure::start("flush_scan_and_update"); // we don't care about lock time in this metric - bg threads can wait for (k, v) in map.iter() { if self.should_remove_from_mem(current_age, v, startup, true) { removes.push(*k); } else if Self::random_chance_of_eviction() { removes_random.push(*k); } else { // not planning to remove this item from memory now, so don't write it to disk yet continue; } // if we are removing it, then we need to update disk if we're dirty if v.clear_dirty() { // step 1: clear the dirty flag // step 2: perform the update on disk based on the fields in the entry // If a parallel operation dirties the item again - even while this flush is occurring, // the last thing the writer will do, after updating contents, is set_dirty(true) // That prevents dropping an item from cache before disk is updated to latest in mem. // happens inside of lock on in-mem cache. This is because of deleting items // it is possible that the item in the cache is marked as dirty while these updates are happening. That is ok. disk_resize = disk.try_write(k, (&v.slot_list.read().unwrap(), v.ref_count())); if disk_resize.is_ok() { flush_entries_updated_on_disk += 1; } else { // disk needs to resize, so mark all unprocessed items as dirty again so we pick them up after the resize v.set_dirty(true); break; } } } Self::update_time_stat(&self.stats().flush_scan_update_us, m); } Self::update_stat( &self.stats().flush_entries_updated_on_disk, flush_entries_updated_on_disk, ); let m = Measure::start("flush_remove_or_grow"); match disk_resize { Ok(_) => { if !self.flush_remove_from_cache(removes, current_age, startup, false) || !self.flush_remove_from_cache(removes_random, current_age, startup, true) { iterate_for_age = false; // did not make it all the way through this bucket, so didn't handle age completely } Self::update_time_stat(&self.stats().flush_remove_us, m); if iterate_for_age { // completed iteration of the buckets at the current age assert_eq!(current_age, self.storage.current_age()); self.set_has_aged(current_age); } return; } Err(err) => { // grow the bucket, outside of all in-mem locks. // then, loop to try again disk.grow(err); Self::update_time_stat(&self.stats().flush_grow_us, m); } } } } // remove keys in 'removes' from in-mem cache due to age // return true if the removal was completed fn flush_remove_from_cache( &self, removes: Vec<Pubkey>, current_age: Age, startup: bool, randomly_evicted: bool, ) -> bool { let mut completed_scan = true; if removes.is_empty() { return completed_scan; // completed, don't need to get lock or do other work } let ranges = self.cache_ranges_held.read().unwrap().clone(); if ranges.iter().any(|range| range.is_none()) { return false; // range said to hold 'all', so not completed } let mut removed = 0; // consider chunking these so we don't hold the write lock too long let mut map = self.map().write().unwrap(); for k in removes { if let Entry::Occupied(occupied) = map.entry(k) { let v = occupied.get(); if Arc::strong_count(v) > 1 { // someone is holding the value arc's ref count and could modify it, so do not remove this from in-mem cache completed_scan = false; continue; } if v.dirty() || (!randomly_evicted && !self.should_remove_from_mem(current_age, v, startup, false)) { // marked dirty or bumped in age after we looked above // these will be handled in later passes // but, at startup, everything is ready to age out if it isn't dirty continue; } if ranges.iter().any(|range| { range .as_ref() .map(|range| range.contains(&k)) .unwrap_or(true) // None means 'full range', so true }) { // this item is held in mem by range, so don't remove completed_scan = false; continue; } if self.get_stop_flush() { return false; // did NOT complete, told to stop } // all conditions for removing succeeded, so really remove item from in-mem cache removed += 1; occupied.remove(); } } self.stats() .insert_or_delete_mem_count(false, self.bin, removed); Self::update_stat(&self.stats().flush_entries_removed_from_mem, removed); completed_scan } fn stats(&self) -> &BucketMapHolderStats { &self.storage.stats } fn update_stat(stat: &AtomicU64, value: u64) { if value != 0 { stat.fetch_add(value, Ordering::Relaxed); } } pub fn update_time_stat(stat: &AtomicU64, mut m: Measure) { m.stop(); let value = m.as_us(); Self::update_stat(stat, value); } } #[cfg(test)] mod tests { use super::*; use crate::accounts_index::{AccountsIndexConfig, BINS_FOR_TESTING}; fn new_for_test<T: IndexValue>() -> InMemAccountsIndex<T> { let holder = Arc::new(BucketMapHolder::new( BINS_FOR_TESTING, &Some(AccountsIndexConfig::default()), 1, )); let bin = 0; InMemAccountsIndex::new(&holder, bin) } #[test] fn test_should_remove_from_mem() { solana_logger::setup(); let bucket = new_for_test::<u64>(); let mut startup = false; let mut current_age = 0; let ref_count = 0; let one_element_slot_list = vec![(0, 0)]; let one_element_slot_list_entry = Arc::new(AccountMapEntryInner::new( one_element_slot_list, ref_count, AccountMapEntryMeta::default(), )); // empty slot list assert!(!bucket.should_remove_from_mem( current_age, &Arc::new(AccountMapEntryInner::new( vec![], ref_count, AccountMapEntryMeta::default() )), startup, false, )); // 1 element slot list assert!(bucket.should_remove_from_mem( current_age, &one_element_slot_list_entry, startup, false, )); // 2 element slot list assert!(!bucket.should_remove_from_mem( current_age, &Arc::new(AccountMapEntryInner::new( vec![(0, 0), (1, 1)], ref_count, AccountMapEntryMeta::default() )), startup, false, )); { let bucket = new_for_test::<f64>(); // 1 element slot list with a CACHED item - f64 acts like cached assert!(!bucket.should_remove_from_mem( current_age, &Arc::new(AccountMapEntryInner::new( vec![(0, 0.0)], ref_count, AccountMapEntryMeta::default() )), startup, false, )); } // 1 element slot list, age is now assert!(bucket.should_remove_from_mem( current_age, &one_element_slot_list_entry, startup, false, )); // 1 element slot list, but not current age current_age = 1; assert!(!bucket.should_remove_from_mem( current_age, &one_element_slot_list_entry, startup, false, )); // 1 element slot list, but at startup and age not current startup = true; assert!(bucket.should_remove_from_mem( current_age, &one_element_slot_list_entry, startup, false, )); } #[test] fn test_hold_range_in_memory() { let bucket = new_for_test::<u64>(); // 0x81 is just some other range let ranges = [ Pubkey::new(&[0; 32])..=Pubkey::new(&[0xff; 32]), Pubkey::new(&[0x81; 32])..=Pubkey::new(&[0xff; 32]), ]; for range in ranges.clone() { assert!(bucket.cache_ranges_held.read().unwrap().is_empty()); bucket.hold_range_in_memory(&range, true); assert_eq!( bucket.cache_ranges_held.read().unwrap().to_vec(), vec![Some(range.clone())] ); bucket.hold_range_in_memory(&range, false); assert!(bucket.cache_ranges_held.read().unwrap().is_empty()); bucket.hold_range_in_memory(&range, true); assert_eq!( bucket.cache_ranges_held.read().unwrap().to_vec(), vec![Some(range.clone())] ); bucket.hold_range_in_memory(&range, true); assert_eq!( bucket.cache_ranges_held.read().unwrap().to_vec(), vec![Some(range.clone()), Some(range.clone())] ); bucket.hold_range_in_memory(&ranges[0], true); assert_eq!( bucket.cache_ranges_held.read().unwrap().to_vec(), vec![ Some(range.clone()), Some(range.clone()), Some(ranges[0].clone()) ] ); bucket.hold_range_in_memory(&range, false); assert_eq!( bucket.cache_ranges_held.read().unwrap().to_vec(), vec![Some(range.clone()), Some(ranges[0].clone())] ); bucket.hold_range_in_memory(&range, false); assert_eq!( bucket.cache_ranges_held.read().unwrap().to_vec(), vec![Some(ranges[0].clone())] ); bucket.hold_range_in_memory(&ranges[0].clone(), false); assert!(bucket.cache_ranges_held.read().unwrap().is_empty()); } } #[test] fn test_age() { solana_logger::setup(); let test = new_for_test::<u64>(); assert!(test.get_should_age(test.storage.current_age())); assert_eq!(test.storage.count_ages_flushed(), 0); test.set_has_aged(0); assert!(!test.get_should_age(test.storage.current_age())); assert_eq!(test.storage.count_ages_flushed(), 1); // simulate rest of buckets aging for _ in 1..BINS_FOR_TESTING { assert!(!test.storage.all_buckets_flushed_at_current_age()); test.storage.bucket_flushed_at_current_age(); } assert!(test.storage.all_buckets_flushed_at_current_age()); // advance age test.storage.increment_age(); assert_eq!(test.storage.current_age(), 1); assert!(!test.storage.all_buckets_flushed_at_current_age()); assert!(test.get_should_age(test.storage.current_age())); assert_eq!(test.storage.count_ages_flushed(), 0); } }
workers.py
import logging from tornado import web from tornado import gen from ..views import BaseHandler from ..api.workers import ListWorkers logger = logging.getLogger(__name__) class WorkerView(BaseHandler): @web.authenticated @gen.coroutine def
(self, name): try: yield ListWorkers.update_workers(app=self.application, workername=name) except Exception as e: logger.error(e) worker = ListWorkers.worker_cache.get(name) if worker is None: raise web.HTTPError(404, "Unknown worker '%s'" % name) if 'stats' not in worker: raise web.HTTPError( 404, "Unable to get stats for '%s' worker" % name ) self.render("worker.html", worker=dict(worker, name=name))
get
mod.rs
use system::instr::*; use std::collections::{HashMap, HashSet}; mod test; struct AsmError { line_no: usize, line: String, msg: String, char_no: usize, len: usize, } impl AsmError { fn new(line_no: usize, line: String, msg: String, char_no: usize, len: usize) -> AsmError { AsmError { line_no, line, msg, char_no, len} } } #[cfg(test)] pub fn parse_asm_str(asm: &str) -> Result<Vec<Box<dyn Instr>>, String> { let mut warnings: Vec<String> = vec![]; parse_asm(asm, &"<str>".to_string(), &mut warnings) } #[cfg(test)] fn parse_asm_str_with_warnings(asm: &str, warnings: &mut Vec<String>) -> Result<Vec<Box<dyn Instr>>, String> { parse_asm(asm, &"<str>".to_string(), warnings) } pub fn parse_asm(asm: &str, filename: &str, warnings: &mut Vec<String>) -> Result<Vec<Box<dyn Instr>>, String> { let mut instrs: Vec<Box<dyn Instr>> = vec![]; let mut symbols: HashMap<String, u16> = HashMap::new(); let mut addr: u16 = 0x0200; let mut errs: Vec<AsmError> = vec![]; for (line_no, line) in asm.lines().enumerate() { match parse_line(&line, &mut symbols, addr) { Err(err) => errs.push(AsmError::new( line_no, line.to_string(), err.msg, err.pos, err.len)), Ok(mut i) => { addr += 2*(i.len() as u16); instrs.append(&mut i); }, } } // Patch up symbol addresses let mut resolved_syms = HashSet::new(); for ins in &mut instrs { if let Some(sym) = ins.get_symbol() { match symbols.get(&sym) { Some(addr) => { ins.resolve_symbol(*addr); let _ = resolved_syms.insert(sym); } None => { errs.push(AsmError::new( //TODO: line info for these 0, "".to_string(), format!("Could not resolve symbol \"{}\"", sym), 0, 1)); }, } } } // Check for unused labels for (sym, _) in symbols { if !resolved_syms.contains(&sym) { warnings.push(format!("{}: warning: Unused label \"{}\"", filename, sym)); } } if !errs.is_empty() { let mut err_msg = String::from(""); for (i, err) in errs.iter().enumerate() { if i != 0 { err_msg.push('\n'); } err_msg += &format!("{}:{}:{}: error: {}", filename, err.line_no, err.char_no, err.msg); let pointer = String::from(" ").repeat(err.char_no); let len = match err.len { // Mark whole line 0 => err.line.len()-err.char_no, // Only section _ => err.len, }; // -1 because the '^' is there too let extent = String::from("~").repeat(len-1); err_msg += &format!("\n{}\n{}^{}", err.line, pointer, extent); } // In future we might want to keep these seperate return Err(err_msg); } Ok(instrs) } #[derive(PartialEq, Debug)] struct AsmArg { s: String, upper: String, pos: usize, } impl AsmArg { fn new(s: String, pos: usize) -> AsmArg { AsmArg{ upper: s.to_uppercase(), s, pos, } } fn str_cmp(&self, other: &str) -> bool { self.s == other } fn len(&self) -> usize { self.s.len() } } fn split_asm_line(line: &str) -> Vec<AsmArg> { let mut start = 0; let mut part = String::from(""); let mut parts: Vec<AsmArg> = vec![]; let terminators = [' ', '\t', ',']; for (idx, c) in line.chars().enumerate() { let is_terminator = terminators.contains(&c); let is_last = idx == line.len()-1; if !is_terminator { if part.is_empty() { start = idx; } part.push(c); } if (is_terminator || is_last) && !part.is_empty() { parts.push(AsmArg::new(part.to_owned(), start)); part.clear(); } } parts } #[derive(Debug)] struct ErrInfo { msg: String, pos: usize, len: usize, } impl ErrInfo { fn new(msg: String, pos: usize, len: usize) -> ErrInfo { ErrInfo { msg, pos, len } } } fn parse_line(line: &str, symbols: &mut HashMap<String, u16>, current_addr: u16) -> Result<Vec<Box<dyn Instr>>, ErrInfo> { // This function will add new symbols to the map and return an // instruction object if one was required. // That object may have an unresolved symbol in it, parse_asm // will take care of that. let mut instrs: Vec<Box<dyn Instr>> = vec![]; let comment_chars = "//"; let mut no_comments_line = line; if let Some(idx) = no_comments_line.find(comment_chars) { no_comments_line = no_comments_line.split_at(idx).0; } let mut args = split_asm_line(no_comments_line); // Lines consisting of only whitespace if args.is_empty() { return Ok(instrs); } let mnemonic = args.remove(0); // Check for labels if args.is_empty() && mnemonic.s.ends_with(':') { // Add a symbol for this address let sym_name = mnemonic.s[..mnemonic.len()-1].to_string(); if symbols.insert(sym_name, current_addr).is_some() { return Err(ErrInfo::new( "Label repeated".to_string(), mnemonic.pos, mnemonic.len())); }; return Ok(instrs); } if mnemonic.upper == "JP" { // JP can have one or two args if args.is_empty() || (args.len() > 2) { return Err(ErrInfo::new( format!("Expected 1 or 2 args for JP instruction, got {}", args.len()), mnemonic.pos, 0)); } } else { match check_num_args(&mnemonic, args.len()) { Ok(_) => {}, Err(e) => return Err(e), } } match get_args_type(&mnemonic) { ArgsType::Custom => { match mnemonic.upper.as_str() { // No arguments "CLS" => instrs.push(Box::new(ClearDisplayInstr::create())), "RET" => instrs.push(Box::new(RetInstr::create())), "BRK" => instrs.push(Box::new(SysInstr::create(0xFFF))), // Single argument ".WORD" => instrs.push(Box::new(WordInstr::create( parse_extended_addr(&args[0]).unwrap()))), "SYS" => { match parse_nnn_or_symbol(&args[0]) { AddressOrSymbol::Symbol(s) => { instrs.push(Box::new(SysInstr::create_with_symbol(s))); } AddressOrSymbol::Address(a) => { instrs.push(Box::new(SysInstr::create(a))); } } } "JP" => { if args.len() == 2 { // Use the parser here to allow different formatting if parse_vx(&args[0]).unwrap() != 0 { return Err(ErrInfo::new( "Jump plus instruction can only use V0!".to_string(), args[0].pos, args[0].len())); } // JP V0, addr so use the 2nd arg match parse_nnn_or_symbol(&args[1]) { AddressOrSymbol::Symbol(s) => { instrs.push(Box::new(JumpPlusVZeroInstr::create_with_symbol(s))); } AddressOrSymbol::Address(a) => { instrs.push(Box::new(JumpPlusVZeroInstr::create(a))); } } } else { //Usual JP addr match parse_nnn_or_symbol(&args[0]) { AddressOrSymbol::Symbol(s) => { instrs.push(Box::new(JumpInstr::create_with_symbol(s))); } AddressOrSymbol::Address(a) => { instrs.push(Box::new(JumpInstr::create(a))); } } } } "CALL" => { match parse_nnn_or_symbol(&args[0]) { AddressOrSymbol::Symbol(s) => { instrs.push(Box::new(CallInstr::create_with_symbol(s))); } AddressOrSymbol::Address(a) => { instrs.push(Box::new(CallInstr::create(a))); } } } // Two arguments "RND" => { match parse_vx(&args[0]) { Err(e) => return Err(e), Ok(v) => match parse_xx(&args[1]) { Err(e) => return Err(e), Ok(b) => instrs.push(Box::new(RandomInstr::create(v, b))), } } } "SE" => { let vx = parse_vx(&args[0]).unwrap(); // Byte or register versions if let Ok(a) = parse_vx(&args[1]) { instrs.push(Box::new(SkipIfRegsEqualInstr::create(vx, a))) } else if let Ok(a) = parse_xx(&args[1]) { instrs.push(Box::new(SkipEqualInstr::create(vx, a))) } else { return Err(ErrInfo::new( "Invalid argument 2 for SE instruction".to_string(), args[1].pos, args[1].len())); } }, "SNE" => { let vx = parse_vx(&args[0]).unwrap(); // Byte or register versions if let Ok(a) = parse_vx(&args[1]) { instrs.push(Box::new(SkipIfRegsNotEqualInstr::create(vx, a))) } else if let Ok(a) = parse_xx(&args[1]) { instrs.push(Box::new(SkipNotEqualInstr::create(vx, a))) } else { return Err(ErrInfo::new( "Invalid argument 2 for SNE instruction".to_string(), args[1].pos, args[1].len())); } }
"ADD" => { if let Ok(a) = parse_vx(&args[0]) { // Vx, byte if let Ok(b) = parse_vx(&args[1]) { instrs.push(Box::new(AddRegInstr::create(a, b))); // Vx, Vy } else if let Ok(b) = parse_xx(&args[1]) { instrs.push(Box::new(AddByteInstr::create(a, b))); } else { return Err(ErrInfo::new( "Invalid arguments for ADD instruction".to_string(), args[1].pos, 0)); } // I, Vx } else if args[0].str_cmp("I") { match parse_vx(&args[1]) { Err(e) => return Err(e), Ok(v) => instrs.push(Box::new(AddIVInstr::create(v))), }; } else { return Err(ErrInfo::new( "Invalid args for ADD instruction".to_string(), args[0].pos, 0)); } } "LD" => { if let Ok(a) = parse_vx(&args[0]) { if let Ok(b) = parse_xx(&args[1]) { // LD V, byte instrs.push(Box::new(LoadByteInstr::create(a, b))); } else if let Ok(b) = parse_vx(&args[1]) { // LD V, V instrs.push(Box::new(MovRegInstr::create(a, b))); } else if args[1].str_cmp("DT") { // LD V, DT instrs.push(Box::new(GetDelayTimerInstr::create(a))); } else if args[1].str_cmp("K") { // LD V, K instrs.push(Box::new(WaitForKeyInstr::create(a))); } else if args[1].str_cmp("[I]") { // LD V, [I] instrs.push(Box::new(ReadRegsFromMemInstr::create(a))); } else { return Err(ErrInfo::new( "Invalid args to LD instruction".to_string(), args[0].pos, 0)); } } else if args[0].str_cmp("I") { // Special 16 bit address sequence if let Ok(addr) = parse_extended_addr(&args[1]) { emit_extended_load(&mut instrs, addr); } else { // LD I, nnn // Using the *2nd* argument! match parse_nnn_or_symbol(&args[1]) { AddressOrSymbol::Symbol(s) => { instrs.push(Box::new(LoadIInstr::create_with_symbol(s))); } AddressOrSymbol::Address(a) => { instrs.push(Box::new(LoadIInstr::create(a))); } } } } else if args[0].str_cmp("DT") { // LD DT, V instrs.push(Box::new(SetDelayTimerInstr::create(parse_vx(&args[1]).unwrap()))); } else if args[0].str_cmp("ST") { // LD ST, V instrs.push(Box::new(SetSoundTimerInstr::create(parse_vx(&args[1]).unwrap()))); } else if args[0].str_cmp("F") { // LD F, V instrs.push(Box::new(GetDigitAddrInstr::create(parse_vx(&args[1]).unwrap()))); } else if args[0].str_cmp("B") { // LD B, V instrs.push(Box::new(StoreBCDInstr::create(parse_vx(&args[1]).unwrap()))); } else if args[0].str_cmp("[I]") { // LD [I], V instrs.push(Box::new(WriteRegsToMemInstr::create(parse_vx(&args[1]).unwrap()))); } else { return Err(ErrInfo::new( "Invalid args to LD instruction".to_string(), args[0].pos, 0)); } } // Only draw has 3 "DRW" => instrs.push(Box::new(DrawSpriteInstr::create( parse_vx(&args[0]).unwrap(), parse_vx(&args[1]).unwrap(), parse_n(&args[2]).unwrap()))), _ => return Err(ErrInfo::new( format!("Unrecognised mnemonic: {}", mnemonic.s), mnemonic.pos, mnemonic.len())), } } ArgsType::VX => { if let Err(e) = handle_vx_mnemonic(&mut instrs, &mnemonic, &args) { return Err(e); } } ArgsType::VXVY => { if let Err(e) = handle_vxvy_mnemonic(&mut instrs, &mnemonic, &args) { return Err(e); } } } Ok(instrs) } fn emit_extended_load(instrs: &mut Vec<Box<dyn Instr>>, addr: u16) { if addr <= 0xFFF { instrs.push(Box::new(LoadIInstr::create(addr))); } else { // We're going to change I anyway so we can trash it let rest_of_addr = addr - 0xFFF; instrs.push(Box::new(LoadIInstr::create(0xFFF))); // Number of ADD I, Vx we have to do with 0xFF // Can't think of another way other than reserving a register here let regnum: u8 = 14; let max_regval: u16 = 0xFF; let num_adds = (rest_of_addr / max_regval) as u8; // Remainder value for the last ADD let remainder = (rest_of_addr % max_regval) as u8; if num_adds != 0 { instrs.push(Box::new(LoadByteInstr::create(regnum, max_regval as u8))); for _ in 0..num_adds { instrs.push(Box::new(AddIVInstr::create(regnum))); } } if remainder != 0 { instrs.push(Box::new(LoadByteInstr::create(regnum, remainder))); instrs.push(Box::new(AddIVInstr::create(regnum))); } /* TADA! You just loaded a 16 bit address into I but gave up a register temporarily to do it. The reason you can't save/restore is as follows: - Set I to some location (font memory/high addr?) - Save V0 to memory - Do stuff with it to get I to the high address - Then set I back to the saved V0 location .... Which defeats the point of this whole silly exercise. Also restoring the memory you save to is tricky. */ } } fn handle_vx_mnemonic(instrs: &mut Vec<Box<dyn Instr>>, mnemonic: &AsmArg, args: &[AsmArg]) -> Result<(), ErrInfo> { let x = match parse_vx(&args[0]) { Err(e) => return Err(e), Ok(v) => v, }; match mnemonic.upper.as_str() { "SHR" => instrs.push(Box::new(ShrRegInstr::create(x))), "SHL" => instrs.push(Box::new(ShlRegInstr::create(x))), "SKP" => instrs.push(Box::new(SkipKeyIfPressedInstr::create(x))), "SKNP" => instrs.push(Box::new(SkipKeyIfNotPressedInstr::create(x))), _ => panic!("Unknown mnemonic {} with VX args", mnemonic.s), }; Ok(()) } fn handle_vxvy_mnemonic(instrs: &mut Vec<Box<dyn Instr>>, mnemonic: &AsmArg, args: &[AsmArg]) -> Result<(), ErrInfo> { let x = match parse_vx(&args[0]) { Err(e) => return Err(e), Ok(v) => v, }; let y = match parse_vx(&args[1]) { Err(e) => return Err(e), Ok(v) => v, }; match mnemonic.upper.as_str() { "OR" => instrs.push(Box::new(OrRegInstr::create(x, y))), "XOR" => instrs.push(Box::new(XORRegInstr::create(x, y))), "AND" => instrs.push(Box::new(AndRegInstr::create(x, y))), "SUB" => instrs.push(Box::new(SubRegInstr::create(x, y))), "SUBN" => instrs.push(Box::new(SubNRegInstr::create(x, y))), _ => panic!("Unknown mnemonic {} with VXVY args", mnemonic.s), }; Ok(()) } enum ArgsType { Custom, VX, VXVY, } fn get_args_type(mnemonic: &AsmArg) -> ArgsType { match mnemonic.upper.as_str() { "SHR" | "SHL" | "SKP" | "SKNP" => ArgsType::VX, "OR" | "XOR" | "AND" | "SUB" | "SUBN" => ArgsType::VXVY, _ => ArgsType::Custom, } } fn check_num_args(mnemonic: &AsmArg, num: usize) -> Result<usize, ErrInfo> { let expected: usize = match &mnemonic.upper[..] { "CLS" | "RET" | "BRK" => 0, "SYS" | "CALL" | "SHR" | "SHL" | "SKP" | "SKNP" | ".WORD" => 1, // Some variants of LD only have 1 variable arg, but for asm // purposes they all have two "LD" | "ADD" | "SE" | "SNE" | "OR" | "AND" | "XOR" | "SUB" | "SUBN" | "RND" => 2, "DRW" => 3, _ => return Err(ErrInfo::new( format!("Unrecognised mnemonic: {}", mnemonic.s), mnemonic.pos, mnemonic.len())), }; if expected != num { return Err(ErrInfo::new( format!("Expected {} args for {}, got {}", expected, mnemonic.s, num), mnemonic.pos, mnemonic.len())); } Ok(num) } fn parse_vx(arg: &AsmArg) -> Result<u8, ErrInfo> { let c1 = arg.s.chars().next().unwrap(); if (c1 != 'V') && (c1 != 'v') { return Err(ErrInfo::new( "VX arg does not begin with \"V\"".to_string(), arg.pos, arg.len())); } let num = &arg.s[1..]; let idx: u8; match num.parse::<u8>() { Err(_) => { match u8::from_str_radix(&arg.s[1..], 16) { Err(_) => return Err( ErrInfo::new(format!("Invalid V register: \"{}\"", arg.s), arg.pos, arg.len())), Ok(v) => idx = v, } } Ok(v) => idx = v, } if idx > 0xF { return Err(ErrInfo::new( "V register index cannot be > 0xF".to_string(), arg.pos, arg.len())); } Ok(idx) } fn parse_hex(arg: &AsmArg) -> Result<u16, ErrInfo> { if arg.len() < 2 { return Err(ErrInfo::new("Arg too short to be a hex number".to_string(), arg.pos, arg.len())); } if &arg.s[..2] != "0x" { return Err(ErrInfo::new("Hex number must start with \"0x\"".to_string(), arg.pos, arg.len())); } match u16::from_str_radix(&arg.s[2..], 16) { Err(e) => Err(ErrInfo::new(format!("Invalid hex number: {}", e.to_string()), arg.pos, arg.len())), Ok(v) => Ok(v), } } fn parse_xx(arg: &AsmArg) -> Result<u8, ErrInfo> { let v = match parse_hex(arg) { Err(_) => match arg.s.parse::<u16>() { Err(_) => return Err(ErrInfo::new( "Inalid byte argument".to_string(), arg.pos, arg.len())), Ok(v) => v, }, Ok(v) => v, }; // TODO: some out of range error, as opposed to not recognised if v > 0xff { return Err(ErrInfo::new("Byte argument larger than 0xFF".to_string(), arg.pos, arg.len())); } Ok(v as u8) } fn parse_nnn(arg: &AsmArg) -> Result<u16, ErrInfo> { match parse_hex(arg) { Err(e) => Err(e), Ok(v) => { if v > 0xfff { return Err(ErrInfo::new( "Address argument larger than 0xFFF".to_string(), arg.pos, arg.len())); } Ok(v) } } } fn parse_extended_addr(arg: &AsmArg) -> Result<u16, ErrInfo> { match parse_hex(arg) { Err(e) => Err(e), Ok(v) => Ok(v), } } fn parse_nnn_or_symbol(arg: &AsmArg) -> AddressOrSymbol { match parse_nnn(arg) { Ok(v) => AddressOrSymbol::Address(v), // Try to lookup anything else as a symbol Err(_) => AddressOrSymbol::Symbol(arg.s.to_owned()), } } fn parse_n(arg: &AsmArg) -> Result<u8, ErrInfo> { match arg.s.parse::<u8>() { Err(msg) => Err(ErrInfo::new(msg.to_string(), arg.pos, arg.len())), Ok(v) => { if v > 15 { Err(ErrInfo::new("Nibble must be < 16".to_string(), arg.pos, arg.len())) } else { Ok(v) } } } }
common.rs
use bracket_color::prelude::*; use bracket_pathfinding::prelude::*; use bracket_random::prelude::RandomNumberGenerator; use crossterm::queue; use crossterm::style::{Color::Rgb, Print, SetForegroundColor}; use std::io::{stdout, Write}; // Console Support pub fn print_color(color: RGB, text: &str) { queue!( stdout(), SetForegroundColor(Rgb { r: (color.r * 255.0) as u8, g: (color.g * 255.0) as u8, b: (color.b * 255.0) as u8, }) ) .expect("Command Fail"); queue!(stdout(), Print(text)).expect("Command fail"); } pub fn flush_console() { stdout().flush().expect("Flush Fail"); } // Map pub const MAP_WIDTH: usize = 80; pub const MAP_HEIGHT: usize = 20; pub const MAP_TILES: usize = MAP_WIDTH * MAP_HEIGHT; pub const START_POINT: Point = Point::constant(2, MAP_HEIGHT as i32 / 2); pub const END_POINT: Point = Point::constant(MAP_WIDTH as i32 - 2, MAP_HEIGHT as i32 / 2); pub struct Map { pub tiles: Vec<char>, } impl Map { pub fn new() -> Self { let mut tiles = Self { tiles: vec!['.'; MAP_TILES], }; // Add random walls let n_walls = 200; let mut rng = RandomNumberGenerator::new(); for _ in 0..n_walls { let target = Point::new( rng.roll_dice(1, MAP_WIDTH as i32 - 1), rng.roll_dice(1, MAP_HEIGHT as i32 - 1), ); if target != START_POINT && target != END_POINT { let idx = tiles.point2d_to_index(target); tiles.tiles[idx] = '#'; } } tiles } fn valid_exit(&self, loc: Point, delta: Point) -> Option<usize> { let destination = loc + delta; let idx = self.point2d_to_index(destination); if self.in_bounds(destination) && self.tiles[idx] == '.' { Some(idx) } else { None } } } impl BaseMap for Map { fn is_opaque(&self, idx: usize) -> bool
fn get_available_exits(&self, idx: usize) -> SmallVec<[(usize, f32); 10]> { let mut exits = SmallVec::new(); let location = self.index_to_point2d(idx); if let Some(idx) = self.valid_exit(location, Point::new(-1, 0)) { exits.push((idx, 1.0)) } if let Some(idx) = self.valid_exit(location, Point::new(1, 0)) { exits.push((idx, 1.0)) } if let Some(idx) = self.valid_exit(location, Point::new(0, -1)) { exits.push((idx, 1.0)) } if let Some(idx) = self.valid_exit(location, Point::new(0, 1)) { exits.push((idx, 1.0)) } if let Some(idx) = self.valid_exit(location, Point::new(-1, -1)) { exits.push((idx, 1.4)) } if let Some(idx) = self.valid_exit(location, Point::new(1, -1)) { exits.push((idx, 1.4)) } if let Some(idx) = self.valid_exit(location, Point::new(-1, 1)) { exits.push((idx, 1.4)) } if let Some(idx) = self.valid_exit(location, Point::new(-1, 1)) { exits.push((idx, 1.4)) } exits } fn get_pathing_distance(&self, idx1: usize, idx2: usize) -> f32 { DistanceAlg::Pythagoras .distance2d(self.index_to_point2d(idx1), self.index_to_point2d(idx2)) } } impl Algorithm2D for Map { fn dimensions(&self) -> Point { Point::new(MAP_WIDTH, MAP_HEIGHT) } }
{ self.tiles[idx as usize] == '#' }
test_line2d.py
import pytest from src.geometry.line2d import standard_line def test_standard_line_0(): # One point is not enough to form a line for i in range(-10, 11): for j in range(-10, 11): A, B, C = standard_line(i, j, i, j) assert A == 0 and B == 0 and C == 0 @pytest.mark.parametrize("points", [ [(0, 0), (1, 1)], [(1, 1), (0, 0)], [(1, 1), (2, 2)], [(2, 2), (1, 1)], [(100, 100), (200, 200)], [(200, 200), (100, 100)] ]) def test_standard_line_1(points): point0, point1 = points A, B, C = standard_line( point0[0], point0[1], point1[0], point1[1] ) for i in range(-10, 10): assert i * A + i * B + C == 0 @pytest.mark.parametrize("points", [ [(0, 0), (1, -1)], [(1, -1), (0, 0)], [(1, -1), (2, -2)], [(2, -2), (1, -1)], [(100, -100), (200, -200)], [(200, -200), (100, -100)] ]) def test_standard_line_2(points): point0, point1 = points A, B, C = standard_line( point0[0], point0[1], point1[0], point1[1] ) for i in range(-10, 10): assert i * A + (-i) * B + C == 0 def test_standard_line_3():
def test_standard_line_4(): A, B, C = standard_line(0, 0, 1, 0) assert A * 0 + B * 0 + C == 0 assert A * 101 + B * 0 + C == 0 assert A * (-101) + B * 0 + C == 0 def test_standard_line_5(): A, B, C = standard_line(0, 2, 10, 2) assert A * 101 + B * 2 + C == 0 assert A *(-101) + B * 2 + C == 0
A, B, C = standard_line(0, 0, 0, 1) assert A * 0 + B * 2 + C == 0 assert A * 0 + B * 101 + C == 0 assert A * 0 + B * (-101) + C == 0
bilevel_test.go
// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package processing import ( "math/rand" "reflect" "testing" "time" edf_testing "github.com/google/edf/testing" ) func randFloat32(mean float64) float64 { return rand.Float64() + mean - 0.5 } func TestBiLevelBasics(t *testing.T) { t1 := time.Now() t2 := t1.Add(time.Duration(10) * time.Second) records := []float64{randFloat32(10), randFloat32(10), randFloat32(5), randFloat32(5), randFloat32(10), randFloat32(10), randFloat32(7), randFloat32(5), randFloat32(10), randFloat32(10), randFloat32(5), randFloat32(5), randFloat32(10), randFloat32(10), randFloat32(5), randFloat32(5), randFloat32(10), randFloat32(10), randFloat32(5), randFloat32(5)} baseSignal := edf_testing.NewTestingSignal(t1, t2, records) biLevel := NewBiLevelSignal(baseSignal, 5, 10, 1) if biLevel.StartTime() != t1 { t.Errorf("%v should be equal to %v", biLevel.StartTime(), t1) } if biLevel.EndTime() != t2
expectedSignal := []Level{HIGH, HIGH, LOW, LOW, HIGH, HIGH, TRANSITION, LOW, HIGH, HIGH, LOW, LOW, HIGH, HIGH, LOW, LOW, HIGH, HIGH, LOW, LOW} actualSignal, err := biLevel.BiLevelRecording(t1, t2) if err != nil { t.Error(err) } if !reflect.DeepEqual(actualSignal, expectedSignal) { t.Errorf("%v should be equal to %v", actualSignal, expectedSignal) } }
{ t.Errorf("%v should be equal to %v", biLevel.EndTime(), t2) }
resources_js_Pages_Home_vue.js
"use strict"; (self["webpackChunk"] = self["webpackChunk"] || []).push([["resources_js_Pages_Home_vue"],{ /***/ "./node_modules/babel-loader/lib/index.js??clonedRuleSet-5.use[0]!./node_modules/vue-loader/dist/templateLoader.js??ruleSet[1].rules[2]!./node_modules/vue-loader/dist/index.js??ruleSet[0].use[0]!./resources/js/Pages/Home.vue?vue&type=template&id=6a63e488": /*!*********************************************************************************************************************************************************************************************************************************************************************!*\ !*** ./node_modules/babel-loader/lib/index.js??clonedRuleSet-5.use[0]!./node_modules/vue-loader/dist/templateLoader.js??ruleSet[1].rules[2]!./node_modules/vue-loader/dist/index.js??ruleSet[0].use[0]!./resources/js/Pages/Home.vue?vue&type=template&id=6a63e488 ***! \*********************************************************************************************************************************************************************************************************************************************************************/ /***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { __webpack_require__.r(__webpack_exports__); /* harmony export */ __webpack_require__.d(__webpack_exports__, { /* harmony export */ "render": () => (/* binding */ render) /* harmony export */ }); /* harmony import */ var vue__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! vue */ "./node_modules/vue/dist/vue.esm-bundler.js"); var _hoisted_1 = /*#__PURE__*/(0,vue__WEBPACK_IMPORTED_MODULE_0__.createElementVNode)("title", null, "Home", -1 /* HOISTED */ ); var _hoisted_2 = /*#__PURE__*/(0,vue__WEBPACK_IMPORTED_MODULE_0__.createElementVNode)("meta", { type: "description", "head-key": "description" }, null, -1 /* HOISTED */ ); var _hoisted_3 = /*#__PURE__*/(0,vue__WEBPACK_IMPORTED_MODULE_0__.createElementVNode)("h1", null, "Hello", -1 /* HOISTED */
function render(_ctx, _cache) { var _component_Head = (0,vue__WEBPACK_IMPORTED_MODULE_0__.resolveComponent)("Head"); return (0,vue__WEBPACK_IMPORTED_MODULE_0__.openBlock)(), (0,vue__WEBPACK_IMPORTED_MODULE_0__.createElementBlock)(vue__WEBPACK_IMPORTED_MODULE_0__.Fragment, null, [(0,vue__WEBPACK_IMPORTED_MODULE_0__.createVNode)(_component_Head, { title: "test" }, { "default": (0,vue__WEBPACK_IMPORTED_MODULE_0__.withCtx)(function () { return [_hoisted_1, _hoisted_2]; }), _: 1 /* STABLE */ }), _hoisted_3], 64 /* STABLE_FRAGMENT */ ); } /***/ }), /***/ "./resources/js/Pages/Home.vue": /*!*************************************!*\ !*** ./resources/js/Pages/Home.vue ***! \*************************************/ /***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { __webpack_require__.r(__webpack_exports__); /* harmony export */ __webpack_require__.d(__webpack_exports__, { /* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) /* harmony export */ }); /* harmony import */ var _Home_vue_vue_type_template_id_6a63e488__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./Home.vue?vue&type=template&id=6a63e488 */ "./resources/js/Pages/Home.vue?vue&type=template&id=6a63e488"); /* harmony import */ var C_laragon_www_inertia_demo_node_modules_vue_loader_dist_exportHelper_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./node_modules/vue-loader/dist/exportHelper.js */ "./node_modules/vue-loader/dist/exportHelper.js"); const script = {} ; const __exports__ = /*#__PURE__*/(0,C_laragon_www_inertia_demo_node_modules_vue_loader_dist_exportHelper_js__WEBPACK_IMPORTED_MODULE_1__["default"])(script, [['render',_Home_vue_vue_type_template_id_6a63e488__WEBPACK_IMPORTED_MODULE_0__.render],['__file',"resources/js/Pages/Home.vue"]]) /* hot reload */ if (false) {} /* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (__exports__); /***/ }), /***/ "./resources/js/Pages/Home.vue?vue&type=template&id=6a63e488": /*!*******************************************************************!*\ !*** ./resources/js/Pages/Home.vue?vue&type=template&id=6a63e488 ***! \*******************************************************************/ /***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { __webpack_require__.r(__webpack_exports__); /* harmony export */ __webpack_require__.d(__webpack_exports__, { /* harmony export */ "render": () => (/* reexport safe */ _node_modules_babel_loader_lib_index_js_clonedRuleSet_5_use_0_node_modules_vue_loader_dist_templateLoader_js_ruleSet_1_rules_2_node_modules_vue_loader_dist_index_js_ruleSet_0_use_0_Home_vue_vue_type_template_id_6a63e488__WEBPACK_IMPORTED_MODULE_0__.render) /* harmony export */ }); /* harmony import */ var _node_modules_babel_loader_lib_index_js_clonedRuleSet_5_use_0_node_modules_vue_loader_dist_templateLoader_js_ruleSet_1_rules_2_node_modules_vue_loader_dist_index_js_ruleSet_0_use_0_Home_vue_vue_type_template_id_6a63e488__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! -!../../../node_modules/babel-loader/lib/index.js??clonedRuleSet-5.use[0]!../../../node_modules/vue-loader/dist/templateLoader.js??ruleSet[1].rules[2]!../../../node_modules/vue-loader/dist/index.js??ruleSet[0].use[0]!./Home.vue?vue&type=template&id=6a63e488 */ "./node_modules/babel-loader/lib/index.js??clonedRuleSet-5.use[0]!./node_modules/vue-loader/dist/templateLoader.js??ruleSet[1].rules[2]!./node_modules/vue-loader/dist/index.js??ruleSet[0].use[0]!./resources/js/Pages/Home.vue?vue&type=template&id=6a63e488"); /***/ }) }]);
);
main.py
import sys import os import torch import random import numpy as np from tqdm import tqdm import torch.nn as nn import torch.optim as optim import math from network import GUNet from mlp_dropout import MLPClassifier from sklearn import metrics from util import cmd_args, load_data sys.path.append( '%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname( os.path.realpath(__file__))) class Classifier(nn.Module): def __init__(self): super(Classifier, self).__init__() model = GUNet self.s2v = model( latent_dim=cmd_args.latent_dim, output_dim=cmd_args.out_dim, num_node_feats=cmd_args.feat_dim+cmd_args.attr_dim, num_edge_feats=0, k=cmd_args.sortpooling_k) out_dim = cmd_args.out_dim if out_dim == 0: out_dim = self.s2v.dense_dim self.mlp = MLPClassifier( input_size=out_dim, hidden_size=cmd_args.hidden, num_class=cmd_args.num_class, with_dropout=cmd_args.dropout) def PrepareFeatureLabel(self, batch_graph): labels = torch.LongTensor(len(batch_graph)) n_nodes = 0 if batch_graph[0].node_tags is not None: node_tag_flag = True concat_tag = [] else: node_tag_flag = False if batch_graph[0].node_features is not None: node_feat_flag = True concat_feat = [] else: node_feat_flag = False for i in range(len(batch_graph)): labels[i] = batch_graph[i].label n_nodes += batch_graph[i].num_nodes if node_tag_flag: concat_tag += batch_graph[i].node_tags if node_feat_flag: tmp = torch.from_numpy( batch_graph[i].node_features).type('torch.FloatTensor') concat_feat.append(tmp) if node_tag_flag: concat_tag = torch.LongTensor(concat_tag).view(-1, 1) node_tag = torch.zeros(n_nodes, cmd_args.feat_dim) node_tag.scatter_(1, concat_tag, 1) if node_feat_flag: node_feat = torch.cat(concat_feat, 0) if node_feat_flag and node_tag_flag: # concatenate one-hot embedding of node tags (node labels) # with continuous node features node_feat = torch.cat([node_tag.type_as(node_feat), node_feat], 1) elif node_feat_flag is False and node_tag_flag: node_feat = node_tag elif node_feat_flag and node_tag_flag is False: pass else: node_feat = torch.ones(n_nodes, 1) if cmd_args.mode == 'gpu': node_feat = node_feat.cuda() labels = labels.cuda() return node_feat, labels def forward(self, batch_graph): node_feat, labels = self.PrepareFeatureLabel(batch_graph) embed = self.s2v(batch_graph, node_feat, None) return self.mlp(embed, labels) def output_features(self, batch_graph): node_feat, labels = self.PrepareFeatureLabel(batch_graph) embed = self.s2v(batch_graph, node_feat, None) return embed, labels def
(g_list, classifier, sample_idxes, optimizer=None, bsize=cmd_args.batch_size): total_loss = [] total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize # noqa pbar = tqdm(range(total_iters), unit='batch') all_targets = [] all_scores = [] n_samples = 0 for pos in pbar: selected_idx = sample_idxes[pos * bsize: (pos + 1) * bsize] batch_graph = [g_list[idx] for idx in selected_idx] targets = [g_list[idx].label for idx in selected_idx] all_targets += targets logits, loss, acc = classifier(batch_graph) all_scores.append(logits[:, 1].detach()) # for binary classification if optimizer is not None: optimizer.zero_grad() loss.backward() optimizer.step() loss = loss.data.cpu().numpy() pbar.set_description('loss: %0.5f acc: %0.5f' % (loss, acc)) total_loss.append(np.array([loss, acc]) * len(selected_idx)) n_samples += len(selected_idx) if optimizer is None: assert n_samples == len(sample_idxes) total_loss = np.array(total_loss) avg_loss = np.sum(total_loss, 0) / n_samples all_scores = torch.cat(all_scores).cpu().numpy() # np.savetxt('test_scores.txt', all_scores) # output test predictions all_targets = np.array(all_targets) fpr, tpr, _ = metrics.roc_curve(all_targets, all_scores, pos_label=1) auc = metrics.auc(fpr, tpr) avg_loss = np.concatenate((avg_loss, [auc])) return avg_loss if __name__ == '__main__': print(cmd_args) random.seed(cmd_args.seed) np.random.seed(cmd_args.seed) torch.manual_seed(cmd_args.seed) train_graphs, test_graphs = load_data() print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs))) if cmd_args.sortpooling_k <= 1: num_nodes_list = sorted([ g.num_nodes for g in train_graphs + test_graphs]) cmd_args.sortpooling_k = num_nodes_list[ int(math.ceil(cmd_args.sortpooling_k * len(num_nodes_list))) - 1] cmd_args.sortpooling_k = max(10, cmd_args.sortpooling_k) print('k used in SortPooling is: ' + str(cmd_args.sortpooling_k)) classifier = Classifier() if cmd_args.mode == 'gpu': classifier = classifier.cuda() optimizer = optim.Adam( classifier.parameters(), lr=cmd_args.learning_rate, amsgrad=True, weight_decay=0.0008) train_idxes = list(range(len(train_graphs))) best_loss = None max_acc = 0.0 for epoch in range(cmd_args.num_epochs): random.shuffle(train_idxes) classifier.train() avg_loss = loop_dataset( train_graphs, classifier, train_idxes, optimizer=optimizer) if not cmd_args.printAUC: avg_loss[2] = 0.0 print('\033[92maverage training of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, avg_loss[0], avg_loss[1], avg_loss[2])) # noqa classifier.eval() test_loss = loop_dataset( test_graphs, classifier, list(range(len(test_graphs)))) if not cmd_args.printAUC: test_loss[2] = 0.0 print('\033[93maverage test of epoch %d: loss %.5f acc %.5f auc %.5f\033[0m' % (epoch, test_loss[0], test_loss[1], test_loss[2])) # noqa max_acc = max(max_acc, test_loss[1]) with open('acc_result_%s.txt' % cmd_args.data, 'a+') as f: # f.write(str(test_loss[1]) + '\n') f.write(str(max_acc) + '\n') if cmd_args.printAUC: with open('auc_results.txt', 'a+') as f: f.write(str(test_loss[2]) + '\n') if cmd_args.extract_features: features, labels = classifier.output_features(train_graphs) labels = labels.type('torch.FloatTensor') np.savetxt('extracted_features_train.txt', torch.cat( [labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(), '%.4f') features, labels = classifier.output_features(test_graphs) labels = labels.type('torch.FloatTensor') np.savetxt('extracted_features_test.txt', torch.cat( [labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(), '%.4f')
loop_dataset
arguments.rs
use clap::{App, SubCommand, ArgMatches}; pub fn parse_args<'a>() -> ArgMatches<'a> {
.about("parse git submodules to json file") ) .subcommand(SubCommand::with_name("clone_repos") .about("clone any missing repos") ) .get_matches(); matches }
let matches = App::new("git_submodules") .author(crate_authors!()) .version(crate_version!()) .subcommand(SubCommand::with_name("generate_json_file")
test_monitor.py
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ DP-Monitor test. """ import pytest import numpy as np import mindspore.nn as nn import mindspore.dataset as ds from mindspore.train import Model import mindspore.context as context from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory from mindarmour.utils.logger import LogUtil from tests.ut.python.utils.mock_net import Net LOGGER = LogUtil.get_instance() TAG = 'DP-Monitor Test' def dataset_generator(): batch_size = 16 batches = 128 data = np.random.random((batches * batch_size, 1, 32, 32)).astype( np.float32) label = np.random.randint(0, 10, batches * batch_size).astype(np.int32) for i in range(batches): yield data[i * batch_size: (i + 1) * batch_size], \ label[i * batch_size: (i + 1) * batch_size] @pytest.mark.level0 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_card @pytest.mark.component_mindarmour def test_dp_monitor(): context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") batch_size = 16 epochs = 1 rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = rdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator, ["data", "label"]) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_card @pytest.mark.component_mindarmour def
(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") batch_size = 16 epochs = 1 rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = rdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator, ["data", "label"]) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False) @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_card @pytest.mark.component_mindarmour def test_dp_monitor_cpu(): context.set_context(mode=context.GRAPH_MODE, device_target="CPU") batch_size = 16 epochs = 1 rdp = PrivacyMonitorFactory.create(policy='rdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = rdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator, ["data", "label"]) model.train(epochs, ds1, callbacks=[rdp], dataset_sink_mode=False) @pytest.mark.level0 @pytest.mark.platform_arm_ascend_training @pytest.mark.platform_x86_ascend_training @pytest.mark.env_card @pytest.mark.component_mindarmour def test_dp_monitor_zcdp(): context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") batch_size = 16 epochs = 1 zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = zcdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator, ["data", "label"]) model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False) @pytest.mark.level0 @pytest.mark.platform_x86_gpu_training @pytest.mark.env_card @pytest.mark.component_mindarmour def test_dp_monitor_zcdp_gpu(): context.set_context(mode=context.GRAPH_MODE, device_target="GPU") batch_size = 16 epochs = 1 zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = zcdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator, ["data", "label"]) model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False) @pytest.mark.level0 @pytest.mark.platform_x86_cpu @pytest.mark.env_card @pytest.mark.component_mindarmour def test_dp_monitor_zcdp_cpu(): context.set_context(mode=context.GRAPH_MODE, device_target="CPU") batch_size = 16 epochs = 1 zcdp = PrivacyMonitorFactory.create(policy='zcdp', num_samples=60000, batch_size=batch_size, initial_noise_multiplier=0.4, noise_decay_rate=6e-3) suggest_epoch = zcdp.max_epoch_suggest() LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = Net() net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) LOGGER.info(TAG, "============== Starting Training ==============") ds1 = ds.GeneratorDataset(dataset_generator, ["data", "label"]) model.train(epochs, ds1, callbacks=[zcdp], dataset_sink_mode=False)
test_dp_monitor_gpu
call.go
/* * * Copyright 2014 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package grpc import ( "golang.org/x/net/context" ) // Invoke sends the RPC request on the wire and returns after response is // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) if cc.dopts.unaryInt != nil { return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) } return invoke(ctx, method, args, reply, cc, opts...) } func combine(o1 []CallOption, o2 []CallOption) []CallOption { // we don't use append because o1 could have extra capacity whose // elements would be overwritten, which could cause inadvertent // sharing (and race connditions) between concurrent calls if len(o1) == 0 { return o2 } else if len(o2) == 0 { return o1 } ret := make([]CallOption, len(o1)+len(o2)) copy(ret, o1) copy(ret[len(o1):], o2) return ret } // Invoke sends the RPC request on the wire and returns after response is // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. func
(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { // TODO: implement retries in clientStream and make this simply // newClientStream, SendMsg, RecvMsg. firstAttempt := true for { csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err } cs := csInt.(*clientStream) if err := cs.SendMsg(req); err != nil { if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { // TODO: Add a field to header for grpc-transparent-retry-attempts firstAttempt = false continue } return err } if err := cs.RecvMsg(reply); err != nil { if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { // TODO: Add a field to header for grpc-transparent-retry-attempts firstAttempt = false continue } return err } return nil } }
Invoke
utils.py
import os from PIL import Image import numpy as np def get_files(folder, name_filter=None, extension_filter=None): """Helper function that returns the list of files in a specified folder with a specified extension. Keyword arguments: - folder (``string``): The path to a folder. - name_filter (```string``, optional): The returned files must contain this substring in their filename. Default: None; files are not filtered. - extension_filter (``string``, optional): The desired file extension. Default: None; files are not filtered """ if not os.path.isdir(folder): raise RuntimeError("\"{0}\" is not a folder.".format(folder)) # Filename filter: if not specified don't filter (condition always true); # otherwise, use a lambda expression to filter out files that do not # contain "name_filter" if name_filter is None: # This looks hackish...there is probably a better way name_cond = lambda filename: True else: name_cond = lambda filename: name_filter in filename # Extension filter: if not specified don't filter (condition always true); # otherwise, use a lambda expression to filter out files whose extension # is not "extension_filter" if extension_filter is None: # This looks hackish...there is probably a better way ext_cond = lambda filename: True else: ext_cond = lambda filename: filename.endswith(extension_filter) filtered_files = [] # Explore the directory tree to get files that contain "name_filter" and # with extension "extension_filter" for path, _, files in os.walk(folder): files.sort() for file in files: if name_cond(file) and ext_cond(file): full_path = os.path.join(path, file) filtered_files.append(full_path) return filtered_files def pil_loader(data_path, label_path): """Loads a sample and label image given their path as PIL images. Keyword arguments: - data_path (``string``): The filepath to the image. - label_path (``string``): The filepath to the ground-truth image. Returns the image and the label as PIL images. """ data = Image.open(data_path) label = Image.open(label_path) return data, label def remap(image, old_values, new_values): assert isinstance(image, Image.Image) or isinstance( image, np.ndarray), "image must be of type PIL.Image or numpy.ndarray" assert type(new_values) is tuple, "new_values must be of type tuple" assert type(old_values) is tuple, "old_values must be of type tuple" assert len(new_values) == len( old_values), "new_values and old_values must have the same length" # If image is a PIL.Image convert it to a numpy array if isinstance(image, Image.Image): image = np.array(image) # Replace old values by the new ones tmp = np.zeros_like(image) for old, new in zip(old_values, new_values): # Since tmp is already initialized as zeros we can skip new values # equal to 0 if new != 0: tmp[image == old] = new return Image.fromarray(tmp) def enet_weighing(dataloader, num_classes, c=1.02): """Computes class weights as described in the ENet paper: w_class = 1 / (ln(c + p_class)), where c is usually 1.02 and p_class is the propensity score of that class: propensity_score = freq_class / total_pixels. References: https://arxiv.org/abs/1606.02147 Keyword arguments: - dataloader (``data.Dataloader``): A data loader to iterate over the dataset. - num_classes (``int``): The number of classes. - c (``int``, optional): AN additional hyper-parameter which restricts the interval of values for the weights. Default: 1.02. """ class_count = 0 total = 0 for _, label in dataloader: label = label.cpu().numpy() # Flatten label flat_label = label.flatten() # Sum up the number of pixels of each class and the total pixel # counts for each label class_count += np.bincount(flat_label, minlength=num_classes) total += flat_label.size # Compute propensity score and then the weights for each class propensity_score = class_count / total class_weights = 1 / (np.log(c + propensity_score)) return class_weights def median_freq_balancing(dataloader, num_classes): """Computes class weights using median frequency balancing as described in https://arxiv.org/abs/1411.4734: w_class = median_freq / freq_class, where freq_class is the number of pixels of a given class divided by the total number of pixels in images where that class is present, and median_freq is the median of freq_class. Keyword arguments: - dataloader (``data.Dataloader``): A data loader to iterate over the dataset. whose weights are going to be computed. - num_classes (``int``): The number of classes """ class_count = 0 total = 0 for _, label in dataloader: label = label.cpu().numpy() # Flatten label flat_label = label.flatten() # Sum up the class frequencies bincount = np.bincount(flat_label, minlength=num_classes)
# does not exist in the label) or equal to the pixel count (if # the class exists in the label) total += mask * flat_label.size # Sum up the number of pixels found for each class class_count += bincount # Compute the frequency and its median freq = class_count / total med = np.median(freq) return med / freq
# Create of mask of classes that exist in the label mask = bincount > 0 # Multiply the mask by the pixel count. The resulting array has # one element for each class. The value is either 0 (if the class
bindings_x86_64.rs
/* automatically generated by rust-bindgen */ pub const MSDFGEN_CUBIC_SEARCH_STARTS: u32 = 4; pub const MSDFGEN_CUBIC_SEARCH_STEPS: u32 = 4; pub const MSDFGEN_VERSION: &'static [u8; 4usize] = b"1.6\0"; pub type std_size_t = ::std::os::raw::c_ulong; pub type std_integral_constant_value_type<_Tp> = _Tp; pub type std_integral_constant_type = u8; pub type std_true_type = u8;
pub struct std___and_ { pub _address: u8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_is_empty { pub _address: u8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_make_unsigned { pub _address: u8, } pub type std_make_unsigned_type = u8; #[repr(C)] #[derive(Copy, Clone)] pub union std_aligned_storage_type { pub __data: *mut ::std::os::raw::c_uchar, pub __align: std_aligned_storage_type__bindgen_ty_1, _bindgen_union_align: u64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_aligned_storage_type__bindgen_ty_1 { pub _address: u8, } #[test] fn bindgen_test_layout_std_aligned_storage_type() { assert_eq!( ::std::mem::size_of::<std_aligned_storage_type>(), 8usize, concat!("Size of: ", stringify!(std_aligned_storage_type)) ); assert_eq!( ::std::mem::align_of::<std_aligned_storage_type>(), 8usize, concat!("Alignment of ", stringify!(std_aligned_storage_type)) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std___detector { pub _address: u8, } pub type std___detector_value_t = std_false_type; pub type std___detector_type<_Default> = _Default; pub type std___detected_or = std___detector; pub type std___detected_or_t = std___detected_or; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_iterator { pub _address: u8, } pub type std_iterator_iterator_category<_Category> = _Category; pub type std_iterator_value_type<_Tp> = _Tp; pub type std_iterator_difference_type<_Distance> = _Distance; pub type std_iterator_pointer<_Pointer> = _Pointer; pub type std_iterator_reference<_Reference> = _Reference; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std___iterator_traits { pub _address: u8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_iterator_traits { pub _address: u8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std___undefined { _unused: [u8; 0], } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std___get_first_arg { pub _address: u8, } pub type std___get_first_arg_type = std___undefined; pub type std___get_first_arg_t = std___get_first_arg; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std___replace_first_arg { pub _address: u8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_pointer_traits { pub _address: u8, } pub type std_pointer_traits___element_type = [u8; 0usize]; pub type std_pointer_traits___difference_type = [u8; 0usize]; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_pointer_traits___rebind { pub _address: u8, } pub type std_pointer_traits_pointer<_Ptr> = _Ptr; pub type std_pointer_traits_element_type = std___detected_or_t; pub type std_pointer_traits_difference_type = std___detected_or_t; pub type std_pointer_traits_rebind = std_pointer_traits___rebind; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_reverse_iterator<_Iterator> { pub current: _Iterator, pub _phantom_0: ::std::marker::PhantomData<::std::cell::UnsafeCell<_Iterator>>, } pub type std_reverse_iterator___traits_type = std_iterator_traits; pub type std_reverse_iterator_iterator_type<_Iterator> = _Iterator; pub type std_reverse_iterator_difference_type = std_reverse_iterator___traits_type; pub type std_reverse_iterator_pointer = std_reverse_iterator___traits_type; pub type std_reverse_iterator_reference = std_reverse_iterator___traits_type; pub type std___allocator_base = __gnu_cxx_new_allocator; #[repr(C)] #[derive(Debug)] pub struct std_allocator { pub _address: u8, } pub type std_allocator_size_type = std_size_t; pub type std_allocator_difference_type = isize; pub type std_allocator_pointer<_Tp> = *mut _Tp; pub type std_allocator_const_pointer<_Tp> = *const _Tp; pub type std_allocator_reference<_Tp> = *mut _Tp; pub type std_allocator_const_reference<_Tp> = *const _Tp; pub type std_allocator_value_type<_Tp> = _Tp; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_allocator_rebind { pub _address: u8, } pub type std_allocator_rebind_other = std_allocator; pub type std_allocator_propagate_on_container_move_assignment = std_true_type; pub type std_allocator_is_always_equal = std_true_type; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std___allocator_traits_base { pub _address: u8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std___allocator_traits_base___rebind { pub _address: u8, } pub type std___allocator_traits_base___pointer = [u8; 0usize]; pub type std___allocator_traits_base___c_pointer = [u8; 0usize]; pub type std___allocator_traits_base___v_pointer = [u8; 0usize]; pub type std___allocator_traits_base___cv_pointer = [u8; 0usize]; pub type std___allocator_traits_base___pocca = [u8; 0usize]; pub type std___allocator_traits_base___pocma = [u8; 0usize]; pub type std___allocator_traits_base___pocs = [u8; 0usize]; pub type std___allocator_traits_base___equal = [u8; 0usize]; #[test] fn bindgen_test_layout_std___allocator_traits_base() { assert_eq!( ::std::mem::size_of::<std___allocator_traits_base>(), 1usize, concat!("Size of: ", stringify!(std___allocator_traits_base)) ); assert_eq!( ::std::mem::align_of::<std___allocator_traits_base>(), 1usize, concat!("Alignment of ", stringify!(std___allocator_traits_base)) ); } pub type std___alloc_rebind = std___allocator_traits_base; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_allocator_traits { pub _address: u8, } pub type std_allocator_traits_allocator_type<_Alloc> = _Alloc; pub type std_allocator_traits_value_type = [u8; 0usize]; pub type std_allocator_traits_pointer = std___detected_or_t; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_allocator_traits__Ptr { pub _address: u8, } pub type std_allocator_traits__Ptr_type = [u8; 0usize]; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_allocator_traits__Diff { pub _address: u8, } pub type std_allocator_traits__Diff_type = std_pointer_traits; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_allocator_traits__Size { pub _address: u8, } pub type std_allocator_traits_const_pointer = [u8; 0usize]; pub type std_allocator_traits_void_pointer = std_allocator_traits__Ptr; pub type std_allocator_traits_const_void_pointer = std_allocator_traits__Ptr; pub type std_allocator_traits_difference_type = [u8; 0usize]; pub type std_allocator_traits_size_type = [u8; 0usize]; pub type std_allocator_traits_propagate_on_container_copy_assignment = std___detected_or_t; pub type std_allocator_traits_propagate_on_container_move_assignment = std___detected_or_t; pub type std_allocator_traits_propagate_on_container_swap = std___detected_or_t; pub type std_allocator_traits_is_always_equal = std___detected_or_t; pub type std_allocator_traits_rebind_alloc = std___alloc_rebind; pub type std_allocator_traits_rebind_traits = std_allocator_traits; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_allocator_traits___construct_helper { pub _address: u8, } pub type std_allocator_traits___construct_helper_type<_Alloc> = _Alloc; pub type std_allocator_traits___has_construct = std_allocator_traits___construct_helper; #[repr(C)] pub struct std__Vector_base { pub _M_impl: std__Vector_base__Vector_impl, } pub type std__Vector_base__Tp_alloc_type = [u8; 0usize]; pub type std__Vector_base_pointer = [u8; 0usize]; #[repr(C)] pub struct std__Vector_base__Vector_impl { pub _M_start: std__Vector_base_pointer, pub _M_finish: std__Vector_base_pointer, pub _M_end_of_storage: std__Vector_base_pointer, } pub type std__Vector_base_allocator_type<_Alloc> = _Alloc; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct std_vector { pub _address: u8, } pub type std_vector__Base = std__Vector_base; pub type std_vector__Tp_alloc_type = std_vector__Base; pub type std_vector__Alloc_traits = __gnu_cxx___alloc_traits; pub type std_vector_value_type<_Tp> = _Tp; pub type std_vector_pointer = std_vector__Base; pub type std_vector_const_pointer = std_vector__Alloc_traits; pub type std_vector_reference = std_vector__Alloc_traits; pub type std_vector_const_reference = std_vector__Alloc_traits; pub type std_vector_iterator = __gnu_cxx___normal_iterator<std_vector_pointer>; pub type std_vector_const_iterator = __gnu_cxx___normal_iterator<std_vector_const_pointer>; pub type std_vector_const_reverse_iterator = std_reverse_iterator<std_vector_const_iterator>; pub type std_vector_reverse_iterator = std_reverse_iterator<std_vector_iterator>; pub type std_vector_size_type = std_size_t; pub type std_vector_difference_type = isize; pub type std_vector_allocator_type<_Alloc> = _Alloc; #[repr(C)] #[derive(Debug)] pub struct std_vector__Temporary_value { pub _M_this: *mut u8, pub __buf: u8, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct __gnu_cxx___normal_iterator<_Iterator> { pub _M_current: _Iterator, pub _phantom_0: ::std::marker::PhantomData<::std::cell::UnsafeCell<_Iterator>>, } pub type __gnu_cxx___normal_iterator___traits_type = std_iterator_traits; pub type __gnu_cxx___normal_iterator_iterator_type<_Iterator> = _Iterator; pub type __gnu_cxx___normal_iterator_iterator_category = __gnu_cxx___normal_iterator___traits_type; pub type __gnu_cxx___normal_iterator_value_type = __gnu_cxx___normal_iterator___traits_type; pub type __gnu_cxx___normal_iterator_difference_type = __gnu_cxx___normal_iterator___traits_type; pub type __gnu_cxx___normal_iterator_reference = __gnu_cxx___normal_iterator___traits_type; pub type __gnu_cxx___normal_iterator_pointer = __gnu_cxx___normal_iterator___traits_type; #[repr(C)] #[derive(Debug)] pub struct __gnu_cxx_new_allocator { pub _address: u8, } pub type __gnu_cxx_new_allocator_size_type = std_size_t; pub type __gnu_cxx_new_allocator_difference_type = isize; pub type __gnu_cxx_new_allocator_pointer<_Tp> = *mut _Tp; pub type __gnu_cxx_new_allocator_const_pointer<_Tp> = *const _Tp; pub type __gnu_cxx_new_allocator_reference<_Tp> = *mut _Tp; pub type __gnu_cxx_new_allocator_const_reference<_Tp> = *const _Tp; pub type __gnu_cxx_new_allocator_value_type<_Tp> = _Tp; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct __gnu_cxx_new_allocator_rebind { pub _address: u8, } pub type __gnu_cxx_new_allocator_rebind_other = __gnu_cxx_new_allocator; pub type __gnu_cxx_new_allocator_propagate_on_container_move_assignment = std_true_type; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct __gnu_cxx___alloc_traits { pub _address: u8, } pub type __gnu_cxx___alloc_traits_allocator_type<_Alloc> = _Alloc; pub type __gnu_cxx___alloc_traits__Base_type = std_allocator_traits; pub type __gnu_cxx___alloc_traits_value_type = __gnu_cxx___alloc_traits__Base_type; pub type __gnu_cxx___alloc_traits_pointer = __gnu_cxx___alloc_traits__Base_type; pub type __gnu_cxx___alloc_traits_const_pointer = __gnu_cxx___alloc_traits__Base_type; pub type __gnu_cxx___alloc_traits_size_type = __gnu_cxx___alloc_traits__Base_type; pub type __gnu_cxx___alloc_traits_difference_type = __gnu_cxx___alloc_traits__Base_type; pub type __gnu_cxx___alloc_traits_reference = *mut __gnu_cxx___alloc_traits_value_type; pub type __gnu_cxx___alloc_traits_const_reference = *const __gnu_cxx___alloc_traits_value_type; pub type __gnu_cxx___alloc_traits___is_custom_pointer = std___and_; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct __gnu_cxx___alloc_traits_rebind { pub _address: u8, } pub type __gnu_cxx___alloc_traits_rebind_other = __gnu_cxx___alloc_traits__Base_type; pub type size_t = ::std::os::raw::c_ulong; pub type __off_t = ::std::os::raw::c_long; pub type __off64_t = ::std::os::raw::c_long; #[doc = " A 2-dimensional euclidean vector with double precision."] #[doc = " Implementation based on the Vector2 template from Artery Engine."] #[doc = " @author Viktor Chlumsky"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct msdfgen_Vector2 { pub x: f64, pub y: f64, } #[test] fn bindgen_test_layout_msdfgen_Vector2() { assert_eq!( ::std::mem::size_of::<msdfgen_Vector2>(), 16usize, concat!("Size of: ", stringify!(msdfgen_Vector2)) ); assert_eq!( ::std::mem::align_of::<msdfgen_Vector2>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_Vector2)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Vector2>())).x as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(msdfgen_Vector2), "::", stringify!(x) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Vector2>())).y as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(msdfgen_Vector2), "::", stringify!(y) ) ); } extern "C" { #[doc = " Sets the vector to zero."] #[link_name = "\u{1}_ZN7msdfgen7Vector25resetEv"] pub fn msdfgen_Vector2_reset(this: *mut msdfgen_Vector2); } extern "C" { #[doc = " Sets individual elements of the vector."] #[link_name = "\u{1}_ZN7msdfgen7Vector23setEdd"] pub fn msdfgen_Vector2_set(this: *mut msdfgen_Vector2, x: f64, y: f64); } extern "C" { #[doc = " Returns the vector's length."] #[link_name = "\u{1}_ZNK7msdfgen7Vector26lengthEv"] pub fn msdfgen_Vector2_length(this: *const msdfgen_Vector2) -> f64; } extern "C" { #[doc = " Returns the angle of the vector in radians (atan2)."] #[link_name = "\u{1}_ZNK7msdfgen7Vector29directionEv"] pub fn msdfgen_Vector2_direction(this: *const msdfgen_Vector2) -> f64; } extern "C" { #[doc = " Returns the normalized vector - one that has the same direction but unit length."] #[link_name = "\u{1}_ZNK7msdfgen7Vector29normalizeEb"] pub fn msdfgen_Vector2_normalize( this: *const msdfgen_Vector2, allowZero: bool, ) -> msdfgen_Vector2; } extern "C" { #[doc = " Returns a vector with the same length that is orthogonal to this one."] #[link_name = "\u{1}_ZNK7msdfgen7Vector213getOrthogonalEb"] pub fn msdfgen_Vector2_getOrthogonal( this: *const msdfgen_Vector2, polarity: bool, ) -> msdfgen_Vector2; } extern "C" { #[doc = " Returns a vector with unit length that is orthogonal to this one."] #[link_name = "\u{1}_ZNK7msdfgen7Vector214getOrthonormalEbb"] pub fn msdfgen_Vector2_getOrthonormal( this: *const msdfgen_Vector2, polarity: bool, allowZero: bool, ) -> msdfgen_Vector2; } extern "C" { #[doc = " Returns a vector projected along this one."] #[link_name = "\u{1}_ZNK7msdfgen7Vector27projectERKS0_b"] pub fn msdfgen_Vector2_project( this: *const msdfgen_Vector2, vector: *const msdfgen_Vector2, positive: bool, ) -> msdfgen_Vector2; } extern "C" { #[link_name = "\u{1}_ZN7msdfgen7Vector2C1Ed"] pub fn msdfgen_Vector2_Vector2(this: *mut msdfgen_Vector2, val: f64); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen7Vector2C1Edd"] pub fn msdfgen_Vector2_Vector21(this: *mut msdfgen_Vector2, x: f64, y: f64); } impl msdfgen_Vector2 { #[inline] pub unsafe fn reset(&mut self) { msdfgen_Vector2_reset(self) } #[inline] pub unsafe fn set(&mut self, x: f64, y: f64) { msdfgen_Vector2_set(self, x, y) } #[inline] pub unsafe fn length(&self) -> f64 { msdfgen_Vector2_length(self) } #[inline] pub unsafe fn direction(&self) -> f64 { msdfgen_Vector2_direction(self) } #[inline] pub unsafe fn normalize(&self, allowZero: bool) -> msdfgen_Vector2 { msdfgen_Vector2_normalize(self, allowZero) } #[inline] pub unsafe fn getOrthogonal(&self, polarity: bool) -> msdfgen_Vector2 { msdfgen_Vector2_getOrthogonal(self, polarity) } #[inline] pub unsafe fn getOrthonormal(&self, polarity: bool, allowZero: bool) -> msdfgen_Vector2 { msdfgen_Vector2_getOrthonormal(self, polarity, allowZero) } #[inline] pub unsafe fn project( &self, vector: *const msdfgen_Vector2, positive: bool, ) -> msdfgen_Vector2 { msdfgen_Vector2_project(self, vector, positive) } #[inline] pub unsafe fn new(val: f64) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_Vector2_Vector2(__bindgen_tmp.as_mut_ptr(), val); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new1(x: f64, y: f64) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_Vector2_Vector21(__bindgen_tmp.as_mut_ptr(), x, y); __bindgen_tmp.assume_init() } } #[doc = " A vector may also represent a point, which shall be differentiated semantically using the alias Point2."] pub type msdfgen_Point2 = msdfgen_Vector2; pub const msdfgen_FillRule_FILL_NONZERO: msdfgen_FillRule = 0; pub const msdfgen_FillRule_FILL_ODD: msdfgen_FillRule = 1; pub const msdfgen_FillRule_FILL_POSITIVE: msdfgen_FillRule = 2; pub const msdfgen_FillRule_FILL_NEGATIVE: msdfgen_FillRule = 3; #[doc = " Fill rule dictates how intersection total is interpreted during rasterization."] pub type msdfgen_FillRule = u32; extern "C" { #[doc = " Resolves the number of intersection into a binary fill value based on fill rule."] #[link_name = "\u{1}_ZN7msdfgen17interpretFillRuleEiNS_8FillRuleE"] pub fn msdfgen_interpretFillRule( intersections: ::std::os::raw::c_int, fillRule: msdfgen_FillRule, ) -> bool; } #[doc = " Represents a horizontal scanline intersecting a shape."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_Scanline { pub intersections: [u64; 3usize], pub lastIndex: ::std::os::raw::c_int, } #[doc = " An intersection with the scanline."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct msdfgen_Scanline_Intersection { #[doc = " X coordinate."] pub x: f64, #[doc = " Normalized Y direction of the oriented edge at the point of intersection."] pub direction: ::std::os::raw::c_int, } #[test] fn bindgen_test_layout_msdfgen_Scanline_Intersection() { assert_eq!( ::std::mem::size_of::<msdfgen_Scanline_Intersection>(), 16usize, concat!("Size of: ", stringify!(msdfgen_Scanline_Intersection)) ); assert_eq!( ::std::mem::align_of::<msdfgen_Scanline_Intersection>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_Scanline_Intersection)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Scanline_Intersection>())).x as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(msdfgen_Scanline_Intersection), "::", stringify!(x) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Scanline_Intersection>())).direction as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(msdfgen_Scanline_Intersection), "::", stringify!(direction) ) ); } #[test] fn bindgen_test_layout_msdfgen_Scanline() { assert_eq!( ::std::mem::size_of::<msdfgen_Scanline>(), 32usize, concat!("Size of: ", stringify!(msdfgen_Scanline)) ); assert_eq!( ::std::mem::align_of::<msdfgen_Scanline>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_Scanline)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Scanline>())).intersections as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(msdfgen_Scanline), "::", stringify!(intersections) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Scanline>())).lastIndex as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(msdfgen_Scanline), "::", stringify!(lastIndex) ) ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen8Scanline7overlapERKS0_S2_ddNS_8FillRuleE"] pub fn msdfgen_Scanline_overlap( a: *const msdfgen_Scanline, b: *const msdfgen_Scanline, xFrom: f64, xTo: f64, fillRule: msdfgen_FillRule, ) -> f64; } extern "C" { #[doc = " Populates the intersection list."] #[link_name = "\u{1}_ZN7msdfgen8Scanline16setIntersectionsERKSt6vectorINS0_12IntersectionESaIS2_EE"] pub fn msdfgen_Scanline_setIntersections( this: *mut msdfgen_Scanline, intersections: *const [u64; 3usize], ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen8Scanline16setIntersectionsEOSt6vectorINS0_12IntersectionESaIS2_EE"] pub fn msdfgen_Scanline_setIntersections1( this: *mut msdfgen_Scanline, intersections: *mut [u64; 3usize], ); } extern "C" { #[doc = " Returns the number of intersections left of x."] #[link_name = "\u{1}_ZNK7msdfgen8Scanline18countIntersectionsEd"] pub fn msdfgen_Scanline_countIntersections( this: *const msdfgen_Scanline, x: f64, ) -> ::std::os::raw::c_int; } extern "C" { #[doc = " Returns the total sign of intersections left of x."] #[link_name = "\u{1}_ZNK7msdfgen8Scanline16sumIntersectionsEd"] pub fn msdfgen_Scanline_sumIntersections( this: *const msdfgen_Scanline, x: f64, ) -> ::std::os::raw::c_int; } extern "C" { #[doc = " Decides whether the scanline is filled at x based on fill rule."] #[link_name = "\u{1}_ZNK7msdfgen8Scanline6filledEdNS_8FillRuleE"] pub fn msdfgen_Scanline_filled( this: *const msdfgen_Scanline, x: f64, fillRule: msdfgen_FillRule, ) -> bool; } extern "C" { #[link_name = "\u{1}_ZN7msdfgen8ScanlineC1Ev"] pub fn msdfgen_Scanline_Scanline(this: *mut msdfgen_Scanline); } impl msdfgen_Scanline { #[inline] pub unsafe fn overlap( a: *const msdfgen_Scanline, b: *const msdfgen_Scanline, xFrom: f64, xTo: f64, fillRule: msdfgen_FillRule, ) -> f64 { msdfgen_Scanline_overlap(a, b, xFrom, xTo, fillRule) } #[inline] pub unsafe fn setIntersections(&mut self, intersections: *const [u64; 3usize]) { msdfgen_Scanline_setIntersections(self, intersections) } #[inline] pub unsafe fn setIntersections1(&mut self, intersections: *mut [u64; 3usize]) { msdfgen_Scanline_setIntersections1(self, intersections) } #[inline] pub unsafe fn countIntersections(&self, x: f64) -> ::std::os::raw::c_int { msdfgen_Scanline_countIntersections(self, x) } #[inline] pub unsafe fn sumIntersections(&self, x: f64) -> ::std::os::raw::c_int { msdfgen_Scanline_sumIntersections(self, x) } #[inline] pub unsafe fn filled(&self, x: f64, fillRule: msdfgen_FillRule) -> bool { msdfgen_Scanline_filled(self, x, fillRule) } #[inline] pub unsafe fn new() -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_Scanline_Scanline(__bindgen_tmp.as_mut_ptr()); __bindgen_tmp.assume_init() } } #[doc = " Represents a signed distance and alignment, which together can be compared to uniquely determine the closest edge segment."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct msdfgen_SignedDistance { pub distance: f64, pub dot: f64, } extern "C" { #[link_name = "\u{1}_ZN7msdfgen14SignedDistance8INFINITEE"] pub static msdfgen_SignedDistance_INFINITE: msdfgen_SignedDistance; } #[test] fn bindgen_test_layout_msdfgen_SignedDistance() { assert_eq!( ::std::mem::size_of::<msdfgen_SignedDistance>(), 16usize, concat!("Size of: ", stringify!(msdfgen_SignedDistance)) ); assert_eq!( ::std::mem::align_of::<msdfgen_SignedDistance>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_SignedDistance)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_SignedDistance>())).distance as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(msdfgen_SignedDistance), "::", stringify!(distance) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_SignedDistance>())).dot as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(msdfgen_SignedDistance), "::", stringify!(dot) ) ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen14SignedDistanceC1Ev"] pub fn msdfgen_SignedDistance_SignedDistance(this: *mut msdfgen_SignedDistance); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen14SignedDistanceC1Edd"] pub fn msdfgen_SignedDistance_SignedDistance1( this: *mut msdfgen_SignedDistance, dist: f64, d: f64, ); } impl msdfgen_SignedDistance { #[inline] pub unsafe fn new() -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_SignedDistance_SignedDistance(__bindgen_tmp.as_mut_ptr()); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new1(dist: f64, d: f64) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_SignedDistance_SignedDistance1(__bindgen_tmp.as_mut_ptr(), dist, d); __bindgen_tmp.assume_init() } } pub const msdfgen_EdgeColor_BLACK: msdfgen_EdgeColor = 0; pub const msdfgen_EdgeColor_RED: msdfgen_EdgeColor = 1; pub const msdfgen_EdgeColor_GREEN: msdfgen_EdgeColor = 2; pub const msdfgen_EdgeColor_YELLOW: msdfgen_EdgeColor = 3; pub const msdfgen_EdgeColor_BLUE: msdfgen_EdgeColor = 4; pub const msdfgen_EdgeColor_MAGENTA: msdfgen_EdgeColor = 5; pub const msdfgen_EdgeColor_CYAN: msdfgen_EdgeColor = 6; pub const msdfgen_EdgeColor_WHITE: msdfgen_EdgeColor = 7; #[doc = " Edge color specifies which color channels an edge belongs to."] pub type msdfgen_EdgeColor = u32; #[repr(C)] pub struct msdfgen_EdgeSegment__bindgen_vtable(::std::os::raw::c_void); #[doc = " An abstract edge segment."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_EdgeSegment { pub vtable_: *const msdfgen_EdgeSegment__bindgen_vtable, pub color: msdfgen_EdgeColor, } #[test] fn bindgen_test_layout_msdfgen_EdgeSegment() { assert_eq!( ::std::mem::size_of::<msdfgen_EdgeSegment>(), 16usize, concat!("Size of: ", stringify!(msdfgen_EdgeSegment)) ); assert_eq!( ::std::mem::align_of::<msdfgen_EdgeSegment>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_EdgeSegment)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_EdgeSegment>())).color as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(msdfgen_EdgeSegment), "::", stringify!(color) ) ); } extern "C" { #[doc = " Converts a previously retrieved signed distance from origin to pseudo-distance."] #[link_name = "\u{1}_ZNK7msdfgen11EdgeSegment24distanceToPseudoDistanceERNS_14SignedDistanceENS_7Vector2Ed"] pub fn msdfgen_EdgeSegment_distanceToPseudoDistance( this: *mut ::std::os::raw::c_void, distance: *mut msdfgen_SignedDistance, origin: msdfgen_Point2, param: f64, ); } #[doc = " A line segment."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_LinearSegment { pub _base: msdfgen_EdgeSegment, pub p: [msdfgen_Point2; 2usize], } #[test] fn bindgen_test_layout_msdfgen_LinearSegment() { assert_eq!( ::std::mem::size_of::<msdfgen_LinearSegment>(), 48usize, concat!("Size of: ", stringify!(msdfgen_LinearSegment)) ); assert_eq!( ::std::mem::align_of::<msdfgen_LinearSegment>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_LinearSegment)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_LinearSegment>())).p as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(msdfgen_LinearSegment), "::", stringify!(p) ) ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen13LinearSegmentC1ENS_7Vector2ES1_NS_9EdgeColorE"] pub fn msdfgen_LinearSegment_LinearSegment( this: *mut msdfgen_LinearSegment, p0: msdfgen_Point2, p1: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ); } impl msdfgen_LinearSegment { #[inline] pub unsafe fn new( p0: msdfgen_Point2, p1: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_LinearSegment_LinearSegment(__bindgen_tmp.as_mut_ptr(), p0, p1, edgeColor); __bindgen_tmp.assume_init() } } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen13LinearSegment5cloneEv"] pub fn msdfgen_LinearSegment_clone( this: *mut ::std::os::raw::c_void, ) -> *mut msdfgen_LinearSegment; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen13LinearSegment5pointEd"] pub fn msdfgen_LinearSegment_point( this: *mut ::std::os::raw::c_void, param: f64, ) -> msdfgen_Point2; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen13LinearSegment9directionEd"] pub fn msdfgen_LinearSegment_direction( this: *mut ::std::os::raw::c_void, param: f64, ) -> msdfgen_Vector2; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen13LinearSegment14signedDistanceENS_7Vector2ERd"] pub fn msdfgen_LinearSegment_signedDistance( this: *mut ::std::os::raw::c_void, origin: msdfgen_Point2, param: *mut f64, ) -> msdfgen_SignedDistance; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen13LinearSegment21scanlineIntersectionsEPdPid"] pub fn msdfgen_LinearSegment_scanlineIntersections( this: *mut ::std::os::raw::c_void, x: *mut f64, dy: *mut ::std::os::raw::c_int, y: f64, ) -> ::std::os::raw::c_int; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen13LinearSegment6boundsERdS1_S1_S1_"] pub fn msdfgen_LinearSegment_bounds( this: *mut ::std::os::raw::c_void, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen13LinearSegment14moveStartPointENS_7Vector2E"] pub fn msdfgen_LinearSegment_moveStartPoint( this: *mut ::std::os::raw::c_void, to: msdfgen_Point2, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen13LinearSegment12moveEndPointENS_7Vector2E"] pub fn msdfgen_LinearSegment_moveEndPoint( this: *mut ::std::os::raw::c_void, to: msdfgen_Point2, ); } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen13LinearSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_"] pub fn msdfgen_LinearSegment_splitInThirds( this: *mut ::std::os::raw::c_void, part1: *mut *mut msdfgen_EdgeSegment, part2: *mut *mut msdfgen_EdgeSegment, part3: *mut *mut msdfgen_EdgeSegment, ); } #[doc = " A quadratic Bezier curve."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_QuadraticSegment { pub _base: msdfgen_EdgeSegment, pub p: [msdfgen_Point2; 3usize], } #[test] fn bindgen_test_layout_msdfgen_QuadraticSegment() { assert_eq!( ::std::mem::size_of::<msdfgen_QuadraticSegment>(), 64usize, concat!("Size of: ", stringify!(msdfgen_QuadraticSegment)) ); assert_eq!( ::std::mem::align_of::<msdfgen_QuadraticSegment>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_QuadraticSegment)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_QuadraticSegment>())).p as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(msdfgen_QuadraticSegment), "::", stringify!(p) ) ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen16QuadraticSegmentC1ENS_7Vector2ES1_S1_NS_9EdgeColorE"] pub fn msdfgen_QuadraticSegment_QuadraticSegment( this: *mut msdfgen_QuadraticSegment, p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ); } impl msdfgen_QuadraticSegment { #[inline] pub unsafe fn new( p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_QuadraticSegment_QuadraticSegment( __bindgen_tmp.as_mut_ptr(), p0, p1, p2, edgeColor, ); __bindgen_tmp.assume_init() } } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen16QuadraticSegment5cloneEv"] pub fn msdfgen_QuadraticSegment_clone( this: *mut ::std::os::raw::c_void, ) -> *mut msdfgen_QuadraticSegment; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen16QuadraticSegment5pointEd"] pub fn msdfgen_QuadraticSegment_point( this: *mut ::std::os::raw::c_void, param: f64, ) -> msdfgen_Point2; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen16QuadraticSegment9directionEd"] pub fn msdfgen_QuadraticSegment_direction( this: *mut ::std::os::raw::c_void, param: f64, ) -> msdfgen_Vector2; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen16QuadraticSegment14signedDistanceENS_7Vector2ERd"] pub fn msdfgen_QuadraticSegment_signedDistance( this: *mut ::std::os::raw::c_void, origin: msdfgen_Point2, param: *mut f64, ) -> msdfgen_SignedDistance; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen16QuadraticSegment21scanlineIntersectionsEPdPid"] pub fn msdfgen_QuadraticSegment_scanlineIntersections( this: *mut ::std::os::raw::c_void, x: *mut f64, dy: *mut ::std::os::raw::c_int, y: f64, ) -> ::std::os::raw::c_int; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen16QuadraticSegment6boundsERdS1_S1_S1_"] pub fn msdfgen_QuadraticSegment_bounds( this: *mut ::std::os::raw::c_void, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen16QuadraticSegment14moveStartPointENS_7Vector2E"] pub fn msdfgen_QuadraticSegment_moveStartPoint( this: *mut ::std::os::raw::c_void, to: msdfgen_Point2, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen16QuadraticSegment12moveEndPointENS_7Vector2E"] pub fn msdfgen_QuadraticSegment_moveEndPoint( this: *mut ::std::os::raw::c_void, to: msdfgen_Point2, ); } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen16QuadraticSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_"] pub fn msdfgen_QuadraticSegment_splitInThirds( this: *mut ::std::os::raw::c_void, part1: *mut *mut msdfgen_EdgeSegment, part2: *mut *mut msdfgen_EdgeSegment, part3: *mut *mut msdfgen_EdgeSegment, ); } #[doc = " A cubic Bezier curve."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_CubicSegment { pub _base: msdfgen_EdgeSegment, pub p: [msdfgen_Point2; 4usize], } #[test] fn bindgen_test_layout_msdfgen_CubicSegment() { assert_eq!( ::std::mem::size_of::<msdfgen_CubicSegment>(), 80usize, concat!("Size of: ", stringify!(msdfgen_CubicSegment)) ); assert_eq!( ::std::mem::align_of::<msdfgen_CubicSegment>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_CubicSegment)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_CubicSegment>())).p as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(msdfgen_CubicSegment), "::", stringify!(p) ) ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen12CubicSegmentC1ENS_7Vector2ES1_S1_S1_NS_9EdgeColorE"] pub fn msdfgen_CubicSegment_CubicSegment( this: *mut msdfgen_CubicSegment, p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, p3: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ); } impl msdfgen_CubicSegment { #[inline] pub unsafe fn new( p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, p3: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_CubicSegment_CubicSegment(__bindgen_tmp.as_mut_ptr(), p0, p1, p2, p3, edgeColor); __bindgen_tmp.assume_init() } } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen12CubicSegment5cloneEv"] pub fn msdfgen_CubicSegment_clone( this: *mut ::std::os::raw::c_void, ) -> *mut msdfgen_CubicSegment; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen12CubicSegment5pointEd"] pub fn msdfgen_CubicSegment_point( this: *mut ::std::os::raw::c_void, param: f64, ) -> msdfgen_Point2; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen12CubicSegment9directionEd"] pub fn msdfgen_CubicSegment_direction( this: *mut ::std::os::raw::c_void, param: f64, ) -> msdfgen_Vector2; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen12CubicSegment14signedDistanceENS_7Vector2ERd"] pub fn msdfgen_CubicSegment_signedDistance( this: *mut ::std::os::raw::c_void, origin: msdfgen_Point2, param: *mut f64, ) -> msdfgen_SignedDistance; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen12CubicSegment21scanlineIntersectionsEPdPid"] pub fn msdfgen_CubicSegment_scanlineIntersections( this: *mut ::std::os::raw::c_void, x: *mut f64, dy: *mut ::std::os::raw::c_int, y: f64, ) -> ::std::os::raw::c_int; } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen12CubicSegment6boundsERdS1_S1_S1_"] pub fn msdfgen_CubicSegment_bounds( this: *mut ::std::os::raw::c_void, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen12CubicSegment14moveStartPointENS_7Vector2E"] pub fn msdfgen_CubicSegment_moveStartPoint( this: *mut ::std::os::raw::c_void, to: msdfgen_Point2, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen12CubicSegment12moveEndPointENS_7Vector2E"] pub fn msdfgen_CubicSegment_moveEndPoint(this: *mut ::std::os::raw::c_void, to: msdfgen_Point2); } extern "C" { #[link_name = "\u{1}_ZNK7msdfgen12CubicSegment13splitInThirdsERPNS_11EdgeSegmentES3_S3_"] pub fn msdfgen_CubicSegment_splitInThirds( this: *mut ::std::os::raw::c_void, part1: *mut *mut msdfgen_EdgeSegment, part2: *mut *mut msdfgen_EdgeSegment, part3: *mut *mut msdfgen_EdgeSegment, ); } #[doc = " Container for a single edge of dynamic type."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_EdgeHolder { pub edgeSegment: *mut msdfgen_EdgeSegment, } #[test] fn bindgen_test_layout_msdfgen_EdgeHolder() { assert_eq!( ::std::mem::size_of::<msdfgen_EdgeHolder>(), 8usize, concat!("Size of: ", stringify!(msdfgen_EdgeHolder)) ); assert_eq!( ::std::mem::align_of::<msdfgen_EdgeHolder>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_EdgeHolder)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_EdgeHolder>())).edgeSegment as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(msdfgen_EdgeHolder), "::", stringify!(edgeSegment) ) ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderC1Ev"] pub fn msdfgen_EdgeHolder_EdgeHolder(this: *mut msdfgen_EdgeHolder); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderC1EPNS_11EdgeSegmentE"] pub fn msdfgen_EdgeHolder_EdgeHolder1( this: *mut msdfgen_EdgeHolder, segment: *mut msdfgen_EdgeSegment, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_NS_9EdgeColorE"] pub fn msdfgen_EdgeHolder_EdgeHolder2( this: *mut msdfgen_EdgeHolder, p0: msdfgen_Point2, p1: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_S1_NS_9EdgeColorE"] pub fn msdfgen_EdgeHolder_EdgeHolder3( this: *mut msdfgen_EdgeHolder, p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderC1ENS_7Vector2ES1_S1_S1_NS_9EdgeColorE"] pub fn msdfgen_EdgeHolder_EdgeHolder4( this: *mut msdfgen_EdgeHolder, p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, p3: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderC1ERKS0_"] pub fn msdfgen_EdgeHolder_EdgeHolder5( this: *mut msdfgen_EdgeHolder, orig: *const msdfgen_EdgeHolder, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderC1EOS0_"] pub fn msdfgen_EdgeHolder_EdgeHolder6( this: *mut msdfgen_EdgeHolder, orig: *mut msdfgen_EdgeHolder, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen10EdgeHolderD1Ev"] pub fn msdfgen_EdgeHolder_EdgeHolder_destructor(this: *mut msdfgen_EdgeHolder); } impl msdfgen_EdgeHolder { #[inline] pub unsafe fn new() -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_EdgeHolder_EdgeHolder(__bindgen_tmp.as_mut_ptr()); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new1(segment: *mut msdfgen_EdgeSegment) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_EdgeHolder_EdgeHolder1(__bindgen_tmp.as_mut_ptr(), segment); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new2( p0: msdfgen_Point2, p1: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_EdgeHolder_EdgeHolder2(__bindgen_tmp.as_mut_ptr(), p0, p1, edgeColor); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new3( p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_EdgeHolder_EdgeHolder3(__bindgen_tmp.as_mut_ptr(), p0, p1, p2, edgeColor); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new4( p0: msdfgen_Point2, p1: msdfgen_Point2, p2: msdfgen_Point2, p3: msdfgen_Point2, edgeColor: msdfgen_EdgeColor, ) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_EdgeHolder_EdgeHolder4(__bindgen_tmp.as_mut_ptr(), p0, p1, p2, p3, edgeColor); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new5(orig: *const msdfgen_EdgeHolder) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_EdgeHolder_EdgeHolder5(__bindgen_tmp.as_mut_ptr(), orig); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn new6(orig: *mut msdfgen_EdgeHolder) -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_EdgeHolder_EdgeHolder6(__bindgen_tmp.as_mut_ptr(), orig); __bindgen_tmp.assume_init() } #[inline] pub unsafe fn destruct(&mut self) { msdfgen_EdgeHolder_EdgeHolder_destructor(self) } } #[doc = " A single closed contour of a shape."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_Contour { #[doc = " The sequence of edges that make up the contour."] pub edges: [u64; 3usize], } #[test] fn bindgen_test_layout_msdfgen_Contour() { assert_eq!( ::std::mem::size_of::<msdfgen_Contour>(), 24usize, concat!("Size of: ", stringify!(msdfgen_Contour)) ); assert_eq!( ::std::mem::align_of::<msdfgen_Contour>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_Contour)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Contour>())).edges as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(msdfgen_Contour), "::", stringify!(edges) ) ); } extern "C" { #[doc = " Adds an edge to the contour."] #[link_name = "\u{1}_ZN7msdfgen7Contour7addEdgeERKNS_10EdgeHolderE"] pub fn msdfgen_Contour_addEdge(this: *mut msdfgen_Contour, edge: *const msdfgen_EdgeHolder); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen7Contour7addEdgeEONS_10EdgeHolderE"] pub fn msdfgen_Contour_addEdge1(this: *mut msdfgen_Contour, edge: *mut msdfgen_EdgeHolder); } extern "C" { #[doc = " Creates a new edge in the contour and returns its reference."] #[link_name = "\u{1}_ZN7msdfgen7Contour7addEdgeEv"] pub fn msdfgen_Contour_addEdge2(this: *mut msdfgen_Contour) -> *mut msdfgen_EdgeHolder; } extern "C" { #[doc = " Adjusts the bounding box to fit the contour."] #[link_name = "\u{1}_ZNK7msdfgen7Contour6boundsERdS1_S1_S1_"] pub fn msdfgen_Contour_bounds( this: *const msdfgen_Contour, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, ); } extern "C" { #[doc = " Adjusts the bounding box to fit the contour border's mitered corners."] #[link_name = "\u{1}_ZNK7msdfgen7Contour11miterBoundsERdS1_S1_S1_dd"] pub fn msdfgen_Contour_miterBounds( this: *const msdfgen_Contour, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, border: f64, miterLimit: f64, ); } extern "C" { #[doc = " Computes the winding of the contour. Returns 1 if positive, -1 if negative."] #[link_name = "\u{1}_ZNK7msdfgen7Contour7windingEv"] pub fn msdfgen_Contour_winding(this: *const msdfgen_Contour) -> ::std::os::raw::c_int; } impl msdfgen_Contour { #[inline] pub unsafe fn addEdge(&mut self, edge: *const msdfgen_EdgeHolder) { msdfgen_Contour_addEdge(self, edge) } #[inline] pub unsafe fn addEdge1(&mut self, edge: *mut msdfgen_EdgeHolder) { msdfgen_Contour_addEdge1(self, edge) } #[inline] pub unsafe fn addEdge2(&mut self) -> *mut msdfgen_EdgeHolder { msdfgen_Contour_addEdge2(self) } #[inline] pub unsafe fn bounds(&self, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64) { msdfgen_Contour_bounds(self, l, b, r, t) } #[inline] pub unsafe fn miterBounds( &self, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, border: f64, miterLimit: f64, ) { msdfgen_Contour_miterBounds(self, l, b, r, t, border, miterLimit) } #[inline] pub unsafe fn winding(&self) -> ::std::os::raw::c_int { msdfgen_Contour_winding(self) } } #[doc = " Vector shape representation."] #[repr(C)] #[derive(Debug)] pub struct msdfgen_Shape { #[doc = " The list of contours the shape consists of."] pub contours: [u64; 3usize], #[doc = " Specifies whether the shape uses bottom-to-top (false) or top-to-bottom (true) Y coordinates."] pub inverseYAxis: bool, } #[test] fn bindgen_test_layout_msdfgen_Shape() { assert_eq!( ::std::mem::size_of::<msdfgen_Shape>(), 32usize, concat!("Size of: ", stringify!(msdfgen_Shape)) ); assert_eq!( ::std::mem::align_of::<msdfgen_Shape>(), 8usize, concat!("Alignment of ", stringify!(msdfgen_Shape)) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Shape>())).contours as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(msdfgen_Shape), "::", stringify!(contours) ) ); assert_eq!( unsafe { &(*(::std::ptr::null::<msdfgen_Shape>())).inverseYAxis as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(msdfgen_Shape), "::", stringify!(inverseYAxis) ) ); } extern "C" { #[doc = " Adds a contour."] #[link_name = "\u{1}_ZN7msdfgen5Shape10addContourERKNS_7ContourE"] pub fn msdfgen_Shape_addContour(this: *mut msdfgen_Shape, contour: *const msdfgen_Contour); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen5Shape10addContourEONS_7ContourE"] pub fn msdfgen_Shape_addContour1(this: *mut msdfgen_Shape, contour: *mut msdfgen_Contour); } extern "C" { #[doc = " Adds a blank contour and returns its reference."] #[link_name = "\u{1}_ZN7msdfgen5Shape10addContourEv"] pub fn msdfgen_Shape_addContour2(this: *mut msdfgen_Shape) -> *mut msdfgen_Contour; } extern "C" { #[doc = " Normalizes the shape geometry for distance field generation."] #[link_name = "\u{1}_ZN7msdfgen5Shape9normalizeEv"] pub fn msdfgen_Shape_normalize(this: *mut msdfgen_Shape); } extern "C" { #[doc = " Performs basic checks to determine if the object represents a valid shape."] #[link_name = "\u{1}_ZNK7msdfgen5Shape8validateEv"] pub fn msdfgen_Shape_validate(this: *const msdfgen_Shape) -> bool; } extern "C" { #[doc = " Adjusts the bounding box to fit the shape."] #[link_name = "\u{1}_ZNK7msdfgen5Shape6boundsERdS1_S1_S1_"] pub fn msdfgen_Shape_bounds( this: *const msdfgen_Shape, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, ); } extern "C" { #[doc = " Adjusts the bounding box to fit the shape border's mitered corners."] #[link_name = "\u{1}_ZNK7msdfgen5Shape11miterBoundsERdS1_S1_S1_dd"] pub fn msdfgen_Shape_miterBounds( this: *const msdfgen_Shape, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, border: f64, miterLimit: f64, ); } extern "C" { #[doc = " Outputs the scanline that intersects the shape at y."] #[link_name = "\u{1}_ZNK7msdfgen5Shape8scanlineERNS_8ScanlineEd"] pub fn msdfgen_Shape_scanline(this: *const msdfgen_Shape, line: *mut msdfgen_Scanline, y: f64); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen5ShapeC1Ev"] pub fn msdfgen_Shape_Shape(this: *mut msdfgen_Shape); } impl msdfgen_Shape { #[inline] pub unsafe fn addContour(&mut self, contour: *const msdfgen_Contour) { msdfgen_Shape_addContour(self, contour) } #[inline] pub unsafe fn addContour1(&mut self, contour: *mut msdfgen_Contour) { msdfgen_Shape_addContour1(self, contour) } #[inline] pub unsafe fn addContour2(&mut self) -> *mut msdfgen_Contour { msdfgen_Shape_addContour2(self) } #[inline] pub unsafe fn normalize(&mut self) { msdfgen_Shape_normalize(self) } #[inline] pub unsafe fn validate(&self) -> bool { msdfgen_Shape_validate(self) } #[inline] pub unsafe fn bounds(&self, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64) { msdfgen_Shape_bounds(self, l, b, r, t) } #[inline] pub unsafe fn miterBounds( &self, l: *mut f64, b: *mut f64, r: *mut f64, t: *mut f64, border: f64, miterLimit: f64, ) { msdfgen_Shape_miterBounds(self, l, b, r, t, border, miterLimit) } #[inline] pub unsafe fn scanline(&self, line: *mut msdfgen_Scanline, y: f64) { msdfgen_Shape_scanline(self, line, y) } #[inline] pub unsafe fn new() -> Self { let mut __bindgen_tmp = ::std::mem::MaybeUninit::uninit(); msdfgen_Shape_Shape(__bindgen_tmp.as_mut_ptr()); __bindgen_tmp.assume_init() } } pub type msdfgen_byte = ::std::os::raw::c_uchar; extern "C" { #[doc = " Assigns colors to edges of the shape in accordance to the multi-channel distance field technique."] #[doc = " May split some edges if necessary."] #[doc = " angleThreshold specifies the maximum angle (in radians) to be considered a corner, for example 3 (~172 degrees)."] #[doc = " Values below 1/2 PI will be treated as the external angle."] #[link_name = "\u{1}_ZN7msdfgen18edgeColoringSimpleERNS_5ShapeEdy"] pub fn msdfgen_edgeColoringSimple( shape: *mut msdfgen_Shape, angleThreshold: f64, seed: ::std::os::raw::c_ulonglong, ); } extern "C" { #[doc = " Reconstructs the shape's appearance into output from the distance field sdf."] #[link_name = "\u{1}_ZN7msdfgen9renderSDFERKNS_9BitmapRefIfLi1EEERKNS_14BitmapConstRefIfLi1EEEd"] pub fn msdfgen_renderSDF(output: *const u8, sdf: *const u8, pxRange: f64); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen9renderSDFERKNS_9BitmapRefIfLi3EEERKNS_14BitmapConstRefIfLi1EEEd"] pub fn msdfgen_renderSDF1(output: *const u8, sdf: *const u8, pxRange: f64); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen9renderSDFERKNS_9BitmapRefIfLi1EEERKNS_14BitmapConstRefIfLi3EEEd"] pub fn msdfgen_renderSDF2(output: *const u8, sdf: *const u8, pxRange: f64); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen9renderSDFERKNS_9BitmapRefIfLi3EEERKNS_14BitmapConstRefIfLi3EEEd"] pub fn msdfgen_renderSDF3(output: *const u8, sdf: *const u8, pxRange: f64); } extern "C" { #[doc = " Snaps the values of the floating-point bitmaps into one of the 256 values representable in a standard 8-bit bitmap."] #[link_name = "\u{1}_ZN7msdfgen12simulate8bitERKNS_9BitmapRefIfLi1EEE"] pub fn msdfgen_simulate8bit(bitmap: *const u8); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen12simulate8bitERKNS_9BitmapRefIfLi3EEE"] pub fn msdfgen_simulate8bit1(bitmap: *const u8); } extern "C" { #[doc = " Rasterizes the shape into a monochrome bitmap."] #[link_name = "\u{1}_ZN7msdfgen9rasterizeERKNS_9BitmapRefIfLi1EEERKNS_5ShapeERKNS_7Vector2ES9_NS_8FillRuleE"] pub fn msdfgen_rasterize( output: *const u8, shape: *const msdfgen_Shape, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, fillRule: msdfgen_FillRule, ); } extern "C" { #[doc = " Fixes the sign of the input signed distance field, so that it matches the shape's rasterized fill."] #[link_name = "\u{1}_ZN7msdfgen22distanceSignCorrectionERKNS_9BitmapRefIfLi1EEERKNS_5ShapeERKNS_7Vector2ES9_NS_8FillRuleE"] pub fn msdfgen_distanceSignCorrection( sdf: *const u8, shape: *const msdfgen_Shape, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, fillRule: msdfgen_FillRule, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen22distanceSignCorrectionERKNS_9BitmapRefIfLi3EEERKNS_5ShapeERKNS_7Vector2ES9_NS_8FillRuleE"] pub fn msdfgen_distanceSignCorrection1( sdf: *const u8, shape: *const msdfgen_Shape, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, fillRule: msdfgen_FillRule, ); } extern "C" { #[doc = " Analytically constructs a scanline at y evaluating fill by linear interpolation of the SDF."] #[link_name = "\u{1}_ZN7msdfgen11scanlineSDFERNS_8ScanlineERKNS_14BitmapConstRefIfLi1EEERKNS_7Vector2ES8_bd"] pub fn msdfgen_scanlineSDF( line: *mut msdfgen_Scanline, sdf: *const u8, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, inverseYAxis: bool, y: f64, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen11scanlineSDFERNS_8ScanlineERKNS_14BitmapConstRefIfLi3EEERKNS_7Vector2ES8_bd"] pub fn msdfgen_scanlineSDF1( line: *mut msdfgen_Scanline, sdf: *const u8, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, inverseYAxis: bool, y: f64, ); } extern "C" { #[doc = " Estimates the portion of the area that will be filled incorrectly when rendering using the SDF."] #[link_name = "\u{1}_ZN7msdfgen16estimateSDFErrorERKNS_14BitmapConstRefIfLi1EEERKNS_5ShapeERKNS_7Vector2ES9_iNS_8FillRuleE"] pub fn msdfgen_estimateSDFError( sdf: *const u8, shape: *const msdfgen_Shape, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, scanlinesPerRow: ::std::os::raw::c_int, fillRule: msdfgen_FillRule, ) -> f64; } extern "C" { #[link_name = "\u{1}_ZN7msdfgen16estimateSDFErrorERKNS_14BitmapConstRefIfLi3EEERKNS_5ShapeERKNS_7Vector2ES9_iNS_8FillRuleE"] pub fn msdfgen_estimateSDFError1( sdf: *const u8, shape: *const msdfgen_Shape, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, scanlinesPerRow: ::std::os::raw::c_int, fillRule: msdfgen_FillRule, ) -> f64; } extern "C" { #[doc = " Generates a conventional single-channel signed distance field."] #[link_name = "\u{1}_ZN7msdfgen11generateSDFERKNS_9BitmapRefIfLi1EEERKNS_5ShapeEdRKNS_7Vector2ES9_b"] pub fn msdfgen_generateSDF( output: *const u8, shape: *const msdfgen_Shape, range: f64, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, overlapSupport: bool, ); } extern "C" { #[doc = " Generates a single-channel signed pseudo-distance field."] #[link_name = "\u{1}_ZN7msdfgen17generatePseudoSDFERKNS_9BitmapRefIfLi1EEERKNS_5ShapeEdRKNS_7Vector2ES9_b"] pub fn msdfgen_generatePseudoSDF( output: *const u8, shape: *const msdfgen_Shape, range: f64, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, overlapSupport: bool, ); } extern "C" { #[doc = " Generates a multi-channel signed distance field. Edge colors must be assigned first! (See edgeColoringSimple)"] #[link_name = "\u{1}_ZN7msdfgen12generateMSDFERKNS_9BitmapRefIfLi3EEERKNS_5ShapeEdRKNS_7Vector2ES9_db"] pub fn msdfgen_generateMSDF( output: *const u8, shape: *const msdfgen_Shape, range: f64, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, edgeThreshold: f64, overlapSupport: bool, ); } extern "C" { #[doc = " Resolves multi-channel signed distance field values that may cause interpolation artifacts. (Already called by generateMSDF)"] #[link_name = "\u{1}_ZN7msdfgen19msdfErrorCorrectionERKNS_9BitmapRefIfLi3EEERKNS_7Vector2E"] pub fn msdfgen_msdfErrorCorrection(output: *const u8, threshold: *const msdfgen_Vector2); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen18generateSDF_legacyERKNS_9BitmapRefIfLi1EEERKNS_5ShapeEdRKNS_7Vector2ES9_"] pub fn msdfgen_generateSDF_legacy( output: *const u8, shape: *const msdfgen_Shape, range: f64, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen24generatePseudoSDF_legacyERKNS_9BitmapRefIfLi1EEERKNS_5ShapeEdRKNS_7Vector2ES9_"] pub fn msdfgen_generatePseudoSDF_legacy( output: *const u8, shape: *const msdfgen_Shape, range: f64, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen19generateMSDF_legacyERKNS_9BitmapRefIfLi3EEERKNS_5ShapeEdRKNS_7Vector2ES9_d"] pub fn msdfgen_generateMSDF_legacy( output: *const u8, shape: *const msdfgen_Shape, range: f64, scale: *const msdfgen_Vector2, translate: *const msdfgen_Vector2, edgeThreshold: f64, ); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen19Contour_constructorEv"] pub fn msdfgen_Contour_constructor() -> msdfgen_Contour; } extern "C" { #[link_name = "\u{1}_ZN7msdfgen18Contour_destructorERNS_7ContourE"] pub fn msdfgen_Contour_destructor(self_: *mut msdfgen_Contour); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen16Shape_destructorERNS_5ShapeE"] pub fn msdfgen_Shape_destructor(self_: *mut msdfgen_Shape); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen19Scanline_destructorERNS_8ScanlineE"] pub fn msdfgen_Scanline_destructor(self_: *mut msdfgen_Scanline); } extern "C" { #[link_name = "\u{1}_ZN7msdfgen21EdgeHolder_setSegmentERNS_10EdgeHolderERKNS_11EdgeSegmentE"] pub fn msdfgen_EdgeHolder_setSegment( self_: *mut msdfgen_EdgeHolder, segment: *const msdfgen_EdgeSegment, ); } pub const msdfgen_SegmentKind_LINEAR: msdfgen_SegmentKind = 0; pub const msdfgen_SegmentKind_QUADRATIC: msdfgen_SegmentKind = 1; pub const msdfgen_SegmentKind_CUBIC: msdfgen_SegmentKind = 2; pub type msdfgen_SegmentKind = u32; extern "C" { #[link_name = "\u{1}_ZN7msdfgen19EdgeSegment_getKindERKNS_11EdgeSegmentE"] pub fn msdfgen_EdgeSegment_getKind(self_: *const msdfgen_EdgeSegment) -> msdfgen_SegmentKind; } #[test] fn __bindgen_test_layout_std_allocator_open0_msdfgen_Scanline_Intersection_close0_instantiation() { assert_eq!( ::std::mem::size_of::<std_allocator>(), 1usize, concat!( "Size of template specialization: ", stringify!(std_allocator) ) ); assert_eq!( ::std::mem::align_of::<std_allocator>(), 1usize, concat!( "Alignment of template specialization: ", stringify!(std_allocator) ) ); } #[test] fn __bindgen_test_layout_std_allocator_open0_msdfgen_Scanline_Intersection_close0_instantiation_1() { assert_eq!( ::std::mem::size_of::<std_allocator>(), 1usize, concat!( "Size of template specialization: ", stringify!(std_allocator) ) ); assert_eq!( ::std::mem::align_of::<std_allocator>(), 1usize, concat!( "Alignment of template specialization: ", stringify!(std_allocator) ) ); } #[test] fn __bindgen_test_layout_std_allocator_open0_msdfgen_Scanline_Intersection_close0_instantiation_2() { assert_eq!( ::std::mem::size_of::<std_allocator>(), 1usize, concat!( "Size of template specialization: ", stringify!(std_allocator) ) ); assert_eq!( ::std::mem::align_of::<std_allocator>(), 1usize, concat!( "Alignment of template specialization: ", stringify!(std_allocator) ) ); } #[test] fn __bindgen_test_layout_std_allocator_open0_msdfgen_EdgeHolder_close0_instantiation() { assert_eq!( ::std::mem::size_of::<std_allocator>(), 1usize, concat!( "Size of template specialization: ", stringify!(std_allocator) ) ); assert_eq!( ::std::mem::align_of::<std_allocator>(), 1usize, concat!( "Alignment of template specialization: ", stringify!(std_allocator) ) ); } #[test] fn __bindgen_test_layout_std_allocator_open0_msdfgen_Contour_close0_instantiation() { assert_eq!( ::std::mem::size_of::<std_allocator>(), 1usize, concat!( "Size of template specialization: ", stringify!(std_allocator) ) ); assert_eq!( ::std::mem::align_of::<std_allocator>(), 1usize, concat!( "Alignment of template specialization: ", stringify!(std_allocator) ) ); }
pub type std_false_type = u8; #[repr(C)] #[derive(Debug, Copy, Clone)]
api_op_UpdateTeamMember.go
// Code generated by smithy-go-codegen DO NOT EDIT. package codestar import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Updates a team member's attributes in an AWS CodeStar project. For example, you // can change a team member's role in the project, or change whether they have // remote access to project resources. func (c *Client) UpdateTeamMember(ctx context.Context, params *UpdateTeamMemberInput, optFns ...func(*Options)) (*UpdateTeamMemberOutput, error) { if params == nil { params = &UpdateTeamMemberInput{} } result, metadata, err := c.invokeOperation(ctx, "UpdateTeamMember", params, optFns, c.addOperationUpdateTeamMemberMiddlewares) if err != nil { return nil, err } out := result.(*UpdateTeamMemberOutput) out.ResultMetadata = metadata return out, nil } type UpdateTeamMemberInput struct { // The ID of the project. // // This member is required. ProjectId *string // The Amazon Resource Name (ARN) of the user for whom you want to change team // membership attributes. // // This member is required. UserArn *string // The role assigned to the user in the project. Project roles have different // levels of access. For more information, see Working with Teams // (http://docs.aws.amazon.com/codestar/latest/userguide/working-with-teams.html) // in the AWS CodeStar User Guide. ProjectRole *string // Whether a team member is allowed to remotely access project resources using the // SSH public key associated with the user's profile. Even if this is set to True, // the user must associate a public key with their profile before the user can // access resources. RemoteAccessAllowed bool } type UpdateTeamMemberOutput struct { // The project role granted to the user. ProjectRole *string // Whether a team member is allowed to remotely access project resources using the
// SSH public key associated with the user's profile. RemoteAccessAllowed bool // The Amazon Resource Name (ARN) of the user whose team membership attributes were // updated. UserArn *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func (c *Client) addOperationUpdateTeamMemberMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsAwsjson11_serializeOpUpdateTeamMember{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpUpdateTeamMember{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addOpUpdateTeamMemberValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTeamMember(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opUpdateTeamMember(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "codestar", OperationName: "UpdateTeamMember", } }
style.js
'use strict'; export const MAIN_COLOR = '#00BFA5';
export const SECOND_COLOR = '#FFD600'; export const CHART_HEIGHT = 35;
TwoDimRandomValue.py
from __future__ import division def get_x_distribution_series(x_values, probabilites): distr_series = {} for i, x in enumerate(x_values): prob_sum = 0 for prob in probabilites[i]: prob_sum += prob distr_series[x] = prob_sum return distr_series def get_x_distribution_function(distribution_series): """ :type distribution_series:dict """ distrib_func = {} prob = 0 for x in sorted(distribution_series.keys()): distrib_func[x] = prob prob += distribution_series[x] return distrib_func def get_conditional_distribution_series(probabilities, x_values, y_values, x_distr_series): series = {} for j, y in enumerate(y_values): series[y] = {} for i, x in enumerate(x_values): series[y][x] = probabilities[i][j] / x_distr_series[x] return series def get_y_distribution_function(x_values, y_values, conditional_distribution_series): distrib_func = {} for x in x_values: distrib_func[x] = {} prob = 0 for y in sorted(y_values): distrib_func[x][y] = prob prob += conditional_distribution_series[y][x] return distrib_func def two_dim_value_generator(x_values, y_values, x_distribution_func, y_distrib_func, uniform_generator): """ :type x_distribution_func:dict :type y_distrib_func:dict """ while True: xi = uniform_generator.next() for i in range(len(x_values) - 1): if x_distribution_func[x_values[i]] < xi <= x_distribution_func[x_values[i + 1]]: x = x_values[i] break else: x = x_values[-1] xi = uniform_generator.next() for i in range(len(y_values) - 1): if y_distrib_func[x][y_values[i]] < xi <= y_distrib_func[x][y_values[i + 1]]: y = y_values[i] break else: y = y_values[-1] yield x, y def get_expected_value_x(probabilities, x_values): exp_value_x = 0 for i, x in enumerate(x_values): for prob in probabilities[i]: exp_value_x += x * prob return exp_value_x def get_expected_value_y(probabilities, y_values): exp_value_y = 0 for j, y in enumerate(y_values): for i in range(len(probabilities)): exp_value_y += y * probabilities[i][j] return exp_value_y def get_dispersion_x(probabilities, x_values, exp_value_x): disp_x = 0 for i, x in enumerate(x_values):
return disp_x def get_dispersion_y(probabilities, y_values, exp_value_y): disp_y = 0 for i, y in enumerate(y_values): for j in range(len(probabilities)): disp_y += probabilities[i][j] * (y - exp_value_y) ** 2 return disp_y def get_correlation_coefficient(probabilities, x_values, y_values): exp_value_x = get_expected_value_x(probabilities, x_values) exp_value_y = get_expected_value_y(probabilities, y_values) disp_x = get_dispersion_x(probabilities, x_values, exp_value_x) disp_y = get_dispersion_y(probabilities, y_values, exp_value_y) covariance_coefficient = 0 for i, x in enumerate(x_values): for j, y in enumerate(y_values): covariance_coefficient += (x - exp_value_x) * (y - exp_value_y) * probabilities[i][j] return covariance_coefficient / (disp_x ** (1 / 2) * disp_y ** (1 / 2)) def get_bar_chart(k, a, b): """ :type values:list :type a,b:float :type k:int """ def func(values): h = (b - a) / k b_i = a values.sort() barchart = [] value_quantity = len(values) for i in range(k): a_i = b_i b_i = a_i + h v = 0 for x in values: if x > b_i: break elif a_i < x <= b_i: v += 1 barchart.append((a_i, b_i, v / value_quantity)) return barchart return func def cleared_values(values): clr_values = [] for x in values: if x not in clr_values: clr_values.append(x) clr_values.sort() return clr_values def get_conditional_distribution(x_values_inp, y_values_inp, gener_values): cond_distrib={} pair_quantity=len(gener_values) for x, y in gener_values: if x not in cond_distrib: cond_distrib[x]={} for y_in in y_values_inp: cond_distrib[x][y_in]=0 cond_distrib[x][y]+=1/pair_quantity distrib_list=[] for x in x_values_inp: distrib_list.append([]) for y in y_values_inp: distrib_list[-1].append(cond_distrib[x][y]) return distrib_list
for prob in probabilities[i]: disp_x += prob * (x - exp_value_x) ** 2
colors.go
package transform import ( "crypto/sha1" "io" "math/rand" "github.com/prymitive/karma/internal/config" "github.com/prymitive/karma/internal/models" "github.com/prymitive/karma/internal/slices" "github.com/hansrodtang/randomcolor" plcolors "gopkg.in/go-playground/colors.v1" log "github.com/sirupsen/logrus" ) func labelToSeed(key string, val string) int64 { h := sha1.New() _, err := io.WriteString(h, key) if err != nil { log.Errorf("Failed to write label key '%s' to the seed sha1: %s", key, err) } _, err = io.WriteString(h, val) if err != nil { log.Errorf("Failed to write label value '%s' to the seed sha1: %s", val, err) } var seed int64 for _, i := range h.Sum(nil) { seed += int64(i) } return seed } func rgbToBrightness(r, g, b uint8) int32 { return ((int32(r) * 299) + (int32(g) * 587) + (int32(b) * 114)) / 1000 } func parseCustomColor(colorStore models.LabelsColorMap, key, val, customColor string) {
return } rgb := color.ToRGB() bc := models.Color{ Red: rgb.R, Green: rgb.G, Blue: rgb.B, Alpha: 255, } brightness := rgbToBrightness(bc.Red, bc.Green, bc.Blue) if _, found := colorStore[key]; !found { colorStore[key] = make(map[string]models.LabelColors) } colorStore[key][val] = models.LabelColors{ Brightness: brightness, Background: bc, } } // ColorLabel update karmaColorMap object with a color object generated // from label key and value passed here // It's used to generate unique colors for configured labels func ColorLabel(colorStore models.LabelsColorMap, key string, val string) { // first handle custom colors _, ok := config.Config.Labels.Color.Custom[key] if ok { for _, colorRule := range config.Config.Labels.Color.Custom[key] { if colorRule.Value == val { parseCustomColor(colorStore, key, val, colorRule.Color) return } if colorRule.CompiledRegex != nil && colorRule.CompiledRegex.MatchString(val) { parseCustomColor(colorStore, key, val, colorRule.Color) return } } } // if no custom color is found then generate unique colors if needed if slices.StringInSlice(config.Config.Labels.Color.Unique, key) { if _, found := colorStore[key]; !found { colorStore[key] = make(map[string]models.LabelColors) } if _, found := colorStore[key][val]; !found { rand.Seed(labelToSeed(key, val)) color := randomcolor.New(randomcolor.Random, randomcolor.LIGHT) red, green, blue, alpha := color.RGBA() bc := models.Color{ Red: uint8(red >> 8), Green: uint8(green >> 8), Blue: uint8(blue >> 8), Alpha: uint8(alpha >> 8), } // check if color is bright or dark and pick the right background // uses https://www.w3.org/WAI/ER/WD-AERT/#color-contrast method brightness := rgbToBrightness(bc.Red, bc.Green, bc.Blue) colorStore[key][val] = models.LabelColors{ Brightness: brightness, Background: bc, } } } }
color, err := plcolors.Parse(customColor) if err != nil { log.Warningf("Failed to parse custom color for %s=%s: %s", key, val, err)
avx.rs
//! Advanced Vector Extensions (AVX) //! //! The references are: //! //! - [Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2: //! Instruction Set Reference, A-Z][intel64_ref]. - [AMD64 Architecture //! Programmer's Manual, Volume 3: General-Purpose and System //! Instructions][amd64_ref]. //! //! [Wikipedia][wiki] provides a quick overview of the instructions available. //! //! [intel64_ref]: http://www.intel.de/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf //! [amd64_ref]: http://support.amd.com/TechDocs/24594.pdf //! [wiki]: https://en.wikipedia.org/wiki/Advanced_Vector_Extensions use crate::{ core_arch::{simd::*, simd_llvm::*, x86::*}, intrinsics, mem::{self, transmute}, ptr, }; #[cfg(test)] use stdarch_test::assert_instr; /// Adds packed double-precision (64-bit) floating-point elements /// in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_add_pd(a: __m256d, b: __m256d) -> __m256d { simd_add(a, b) } /// Adds packed single-precision (32-bit) floating-point elements in `a` and /// `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_add_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_add_ps(a: __m256, b: __m256) -> __m256 { simd_add(a, b) } /// Computes the bitwise AND of a packed double-precision (64-bit) /// floating-point elements in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_and_pd) #[inline] #[target_feature(enable = "avx")] // FIXME: Should be 'vandpd' instuction. // See https://github.com/rust-lang/stdarch/issues/71 #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_and_pd(a: __m256d, b: __m256d) -> __m256d { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); transmute(simd_and(a, b)) } /// Computes the bitwise AND of packed single-precision (32-bit) floating-point /// elements in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_and_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vandps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_and_ps(a: __m256, b: __m256) -> __m256 { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); transmute(simd_and(a, b)) } /// Computes the bitwise OR packed double-precision (64-bit) floating-point /// elements in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_or_pd) #[inline] #[target_feature(enable = "avx")] // FIXME: should be `vorpd` instuction. // See <https://github.com/rust-lang/stdarch/issues/71>. #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_or_pd(a: __m256d, b: __m256d) -> __m256d { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); transmute(simd_or(a, b)) } /// Computes the bitwise OR packed single-precision (32-bit) floating-point /// elements in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_or_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vorps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_or_ps(a: __m256, b: __m256) -> __m256 { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); transmute(simd_or(a, b)) } /// Shuffles double-precision (64-bit) floating-point elements within 128-bit /// lanes using the control in `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vshufpd, MASK = 3))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_shuffle_pd<const MASK: i32>(a: __m256d, b: __m256d) -> __m256d { static_assert_imm8!(MASK); simd_shuffle4( a, b, [ MASK as u32 & 0b1, ((MASK as u32 >> 1) & 0b1) + 4, ((MASK as u32 >> 2) & 0b1) + 2, ((MASK as u32 >> 3) & 0b1) + 6, ], ) } /// Shuffles single-precision (32-bit) floating-point elements in `a` within /// 128-bit lanes using the control in `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_shuffle_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vshufps, MASK = 3))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_shuffle_ps<const MASK: i32>(a: __m256, b: __m256) -> __m256 { static_assert_imm8!(MASK); simd_shuffle8( a, b, [ MASK as u32 & 0b11, (MASK as u32 >> 2) & 0b11, ((MASK as u32 >> 4) & 0b11) + 8, ((MASK as u32 >> 6) & 0b11) + 8, (MASK as u32 & 0b11) + 4, ((MASK as u32 >> 2) & 0b11) + 4, ((MASK as u32 >> 4) & 0b11) + 12, ((MASK as u32 >> 6) & 0b11) + 12, ], ) } /// Computes the bitwise NOT of packed double-precision (64-bit) floating-point /// elements in `a`, and then AND with `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_andnot_pd) #[inline] #[target_feature(enable = "avx")] // FIXME: should be `vandnpd` instruction. #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_andnot_pd(a: __m256d, b: __m256d) -> __m256d { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); transmute(simd_and(simd_xor(u64x4::splat(!(0_u64)), a), b)) } /// Computes the bitwise NOT of packed single-precision (32-bit) floating-point /// elements in `a` /// and then AND with `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_andnot_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vandnps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_andnot_ps(a: __m256, b: __m256) -> __m256 { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); transmute(simd_and(simd_xor(u32x8::splat(!(0_u32)), a), b)) } /// Compares packed double-precision (64-bit) floating-point elements /// in `a` and `b`, and returns packed maximum values /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaxpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_pd(a: __m256d, b: __m256d) -> __m256d { simd_fmax(a, b) } /// Compares packed single-precision (32-bit) floating-point elements in `a` /// and `b`, and returns packed maximum values /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_max_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaxps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_max_ps(a: __m256, b: __m256) -> __m256 { simd_fmax(a, b) } /// Compares packed double-precision (64-bit) floating-point elements /// in `a` and `b`, and returns packed minimum values /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vminpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_pd(a: __m256d, b: __m256d) -> __m256d { simd_fmin(a, b) } /// Compares packed single-precision (32-bit) floating-point elements in `a` /// and `b`, and returns packed minimum values /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_min_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vminps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_min_ps(a: __m256, b: __m256) -> __m256 { simd_fmin(a, b) } /// Multiplies packed double-precision (64-bit) floating-point elements /// in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmulpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_mul_pd(a: __m256d, b: __m256d) -> __m256d { simd_mul(a, b) } /// Multiplies packed single-precision (32-bit) floating-point elements in `a` and /// `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_mul_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmulps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_mul_ps(a: __m256, b: __m256) -> __m256 { simd_mul(a, b) } /// Alternatively adds and subtracts packed double-precision (64-bit) /// floating-point elements in `a` to/from packed elements in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_addsub_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_addsub_pd(a: __m256d, b: __m256d) -> __m256d { addsubpd256(a, b) } /// Alternatively adds and subtracts packed single-precision (32-bit) /// floating-point elements in `a` to/from packed elements in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_addsub_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vaddsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_addsub_ps(a: __m256, b: __m256) -> __m256 { addsubps256(a, b) } /// Subtracts packed double-precision (64-bit) floating-point elements in `b` /// from packed elements in `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_sub_pd(a: __m256d, b: __m256d) -> __m256d { simd_sub(a, b) } /// Subtracts packed single-precision (32-bit) floating-point elements in `b` /// from packed elements in `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sub_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_sub_ps(a: __m256, b: __m256) -> __m256 { simd_sub(a, b) } /// Computes the division of each of the 8 packed 32-bit floating-point elements /// in `a` by the corresponding packed elements in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_div_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vdivps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_div_ps(a: __m256, b: __m256) -> __m256 { simd_div(a, b) } /// Computes the division of each of the 4 packed 64-bit floating-point elements /// in `a` by the corresponding packed elements in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_div_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vdivpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_div_pd(a: __m256d, b: __m256d) -> __m256d { simd_div(a, b) } /// Rounds packed double-precision (64-bit) floating point elements in `a` /// according to the flag `ROUNDING`. The value of `ROUNDING` may be as follows: /// /// - `0x00`: Round to the nearest whole number. /// - `0x01`: Round down, toward negative infinity. /// - `0x02`: Round up, toward positive infinity. /// - `0x03`: Truncate the values. /// /// For a complete list of options, check [the LLVM docs][llvm_docs]. /// /// [llvm_docs]: https://github.com/llvm-mirror/clang/blob/dcd8d797b20291f1a6b3e0ddda085aa2bbb382a8/lib/Headers/avxintrin.h#L382 /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_round_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundpd, ROUNDING = 0x3))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_round_pd<const ROUNDING: i32>(a: __m256d) -> __m256d { static_assert_imm4!(ROUNDING); roundpd256(a, ROUNDING) } /// Rounds packed double-precision (64-bit) floating point elements in `a` /// toward positive infinity. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ceil_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_ceil_pd(a: __m256d) -> __m256d { simd_ceil(a) } /// Rounds packed double-precision (64-bit) floating point elements in `a` /// toward negative infinity. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_floor_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_floor_pd(a: __m256d) -> __m256d { simd_floor(a) } /// Rounds packed single-precision (32-bit) floating point elements in `a` /// according to the flag `ROUNDING`. The value of `ROUNDING` may be as follows: /// /// - `0x00`: Round to the nearest whole number. /// - `0x01`: Round down, toward negative infinity. /// - `0x02`: Round up, toward positive infinity. /// - `0x03`: Truncate the values. /// /// For a complete list of options, check [the LLVM docs][llvm_docs]. /// /// [llvm_docs]: https://github.com/llvm-mirror/clang/blob/dcd8d797b20291f1a6b3e0ddda085aa2bbb382a8/lib/Headers/avxintrin.h#L382 /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_round_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundps, ROUNDING = 0x00))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_round_ps<const ROUNDING: i32>(a: __m256) -> __m256 { static_assert_imm4!(ROUNDING); roundps256(a, ROUNDING) } /// Rounds packed single-precision (32-bit) floating point elements in `a` /// toward positive infinity. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_ceil_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_ceil_ps(a: __m256) -> __m256 { simd_ceil(a) } /// Rounds packed single-precision (32-bit) floating point elements in `a` /// toward negative infinity. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_floor_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vroundps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_floor_ps(a: __m256) -> __m256 { simd_floor(a) } /// Returns the square root of packed single-precision (32-bit) floating point /// elements in `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vsqrtps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_sqrt_ps(a: __m256) -> __m256 { sqrtps256(a) } /// Returns the square root of packed double-precision (64-bit) floating point /// elements in `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_sqrt_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vsqrtpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_sqrt_pd(a: __m256d) -> __m256d { simd_fsqrt(a) } /// Blends packed double-precision (64-bit) floating-point elements from /// `a` and `b` using control mask `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blend_pd) #[inline] #[target_feature(enable = "avx")] // Note: LLVM7 prefers single-precision blend instructions when // possible, see: https://bugs.llvm.org/show_bug.cgi?id=38194 // #[cfg_attr(test, assert_instr(vblendpd, imm8 = 9))] #[cfg_attr(test, assert_instr(vblendps, IMM4 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blend_pd<const IMM4: i32>(a: __m256d, b: __m256d) -> __m256d { static_assert_imm4!(IMM4); simd_shuffle4( a, b, [ ((IMM4 as u32 >> 0) & 1) * 4 + 0, ((IMM4 as u32 >> 1) & 1) * 4 + 1, ((IMM4 as u32 >> 2) & 1) * 4 + 2, ((IMM4 as u32 >> 3) & 1) * 4 + 3, ], ) } /// Blends packed single-precision (32-bit) floating-point elements from /// `a` and `b` using control mask `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blend_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vblendps, IMM8 = 9))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blend_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 { static_assert_imm8!(IMM8); simd_shuffle8( a, b, [ ((IMM8 as u32 >> 0) & 1) * 8 + 0, ((IMM8 as u32 >> 1) & 1) * 8 + 1, ((IMM8 as u32 >> 2) & 1) * 8 + 2, ((IMM8 as u32 >> 3) & 1) * 8 + 3, ((IMM8 as u32 >> 4) & 1) * 8 + 4, ((IMM8 as u32 >> 5) & 1) * 8 + 5, ((IMM8 as u32 >> 6) & 1) * 8 + 6, ((IMM8 as u32 >> 7) & 1) * 8 + 7, ], ) } /// Blends packed double-precision (64-bit) floating-point elements from /// `a` and `b` using `c` as a mask. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blendv_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vblendvpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blendv_pd(a: __m256d, b: __m256d, c: __m256d) -> __m256d { vblendvpd(a, b, c) } /// Blends packed single-precision (32-bit) floating-point elements from /// `a` and `b` using `c` as a mask. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_blendv_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vblendvps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_blendv_ps(a: __m256, b: __m256, c: __m256) -> __m256 { vblendvps(a, b, c) } /// Conditionally multiplies the packed single-precision (32-bit) floating-point /// elements in `a` and `b` using the high 4 bits in `imm8`, /// sum the four products, and conditionally return the sum /// using the low 4 bits of `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_dp_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vdpps, IMM8 = 0x0))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_dp_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 { static_assert_imm8!(IMM8); vdpps(a, b, IMM8) } /// Horizontal addition of adjacent pairs in the two packed vectors /// of 4 64-bit floating points `a` and `b`. /// In the result, sums of elements from `a` are returned in even locations, /// while sums of elements from `b` are returned in odd locations. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hadd_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhaddpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_hadd_pd(a: __m256d, b: __m256d) -> __m256d { vhaddpd(a, b) } /// Horizontal addition of adjacent pairs in the two packed vectors /// of 8 32-bit floating points `a` and `b`. /// In the result, sums of elements from `a` are returned in locations of /// indices 0, 1, 4, 5; while sums of elements from `b` are locations /// 2, 3, 6, 7. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hadd_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhaddps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_hadd_ps(a: __m256, b: __m256) -> __m256 { vhaddps(a, b) } /// Horizontal subtraction of adjacent pairs in the two packed vectors /// of 4 64-bit floating points `a` and `b`. /// In the result, sums of elements from `a` are returned in even locations, /// while sums of elements from `b` are returned in odd locations. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hsub_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhsubpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_hsub_pd(a: __m256d, b: __m256d) -> __m256d { vhsubpd(a, b) } /// Horizontal subtraction of adjacent pairs in the two packed vectors /// of 8 32-bit floating points `a` and `b`. /// In the result, sums of elements from `a` are returned in locations of /// indices 0, 1, 4, 5; while sums of elements from `b` are locations /// 2, 3, 6, 7. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_hsub_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vhsubps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_hsub_ps(a: __m256, b: __m256) -> __m256 { vhsubps(a, b) } /// Computes the bitwise XOR of packed double-precision (64-bit) floating-point /// elements in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_xor_pd) #[inline] #[target_feature(enable = "avx")] // FIXME Should be 'vxorpd' instruction. #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_xor_pd(a: __m256d, b: __m256d) -> __m256d { let a: u64x4 = transmute(a); let b: u64x4 = transmute(b); transmute(simd_xor(a, b)) } /// Computes the bitwise XOR of packed single-precision (32-bit) floating-point /// elements in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_xor_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_xor_ps(a: __m256, b: __m256) -> __m256 { let a: u32x8 = transmute(a); let b: u32x8 = transmute(b); transmute(simd_xor(a, b)) } /// Equal (ordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_EQ_OQ: i32 = 0x00; /// Less-than (ordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_LT_OS: i32 = 0x01; /// Less-than-or-equal (ordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_LE_OS: i32 = 0x02; /// Unordered (non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_UNORD_Q: i32 = 0x03; /// Not-equal (unordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NEQ_UQ: i32 = 0x04; /// Not-less-than (unordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NLT_US: i32 = 0x05; /// Not-less-than-or-equal (unordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NLE_US: i32 = 0x06; /// Ordered (non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_ORD_Q: i32 = 0x07; /// Equal (unordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_EQ_UQ: i32 = 0x08; /// Not-greater-than-or-equal (unordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NGE_US: i32 = 0x09; /// Not-greater-than (unordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NGT_US: i32 = 0x0a; /// False (ordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_FALSE_OQ: i32 = 0x0b; /// Not-equal (ordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NEQ_OQ: i32 = 0x0c; /// Greater-than-or-equal (ordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_GE_OS: i32 = 0x0d; /// Greater-than (ordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_GT_OS: i32 = 0x0e; /// True (unordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_TRUE_UQ: i32 = 0x0f; /// Equal (ordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_EQ_OS: i32 = 0x10; /// Less-than (ordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_LT_OQ: i32 = 0x11; /// Less-than-or-equal (ordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_LE_OQ: i32 = 0x12; /// Unordered (signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_UNORD_S: i32 = 0x13; /// Not-equal (unordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NEQ_US: i32 = 0x14; /// Not-less-than (unordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NLT_UQ: i32 = 0x15; /// Not-less-than-or-equal (unordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NLE_UQ: i32 = 0x16; /// Ordered (signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_ORD_S: i32 = 0x17; /// Equal (unordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_EQ_US: i32 = 0x18; /// Not-greater-than-or-equal (unordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NGE_UQ: i32 = 0x19; /// Not-greater-than (unordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NGT_UQ: i32 = 0x1a; /// False (ordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_FALSE_OS: i32 = 0x1b; /// Not-equal (ordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_NEQ_OS: i32 = 0x1c; /// Greater-than-or-equal (ordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_GE_OQ: i32 = 0x1d; /// Greater-than (ordered, non-signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_GT_OQ: i32 = 0x1e; /// True (unordered, signaling) #[stable(feature = "simd_x86", since = "1.27.0")] pub const _CMP_TRUE_US: i32 = 0x1f; /// Compares packed double-precision (64-bit) floating-point /// elements in `a` and `b` based on the comparison operand /// specified by `IMM5`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_pd) #[inline] #[target_feature(enable = "avx,sse2")] #[cfg_attr(test, assert_instr(vcmpeqpd, IMM5 = 0))] // TODO Validate vcmppd #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_pd<const IMM5: i32>(a: __m128d, b: __m128d) -> __m128d { static_assert_imm5!(IMM5); vcmppd(a, b, IMM5 as i8) } /// Compares packed double-precision (64-bit) floating-point /// elements in `a` and `b` based on the comparison operand /// specified by `IMM5`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcmpeqpd, IMM5 = 0))] // TODO Validate vcmppd #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cmp_pd<const IMM5: i32>(a: __m256d, b: __m256d) -> __m256d { static_assert_imm5!(IMM5); vcmppd256(a, b, IMM5 as u8) } /// Compares packed single-precision (32-bit) floating-point /// elements in `a` and `b` based on the comparison operand /// specified by `IMM5`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ps) #[inline] #[target_feature(enable = "avx,sse")] #[cfg_attr(test, assert_instr(vcmpeqps, IMM5 = 0))] // TODO Validate vcmpps #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_ps<const IMM5: i32>(a: __m128, b: __m128) -> __m128 { static_assert_imm5!(IMM5); vcmpps(a, b, IMM5 as i8) } /// Compares packed single-precision (32-bit) floating-point /// elements in `a` and `b` based on the comparison operand /// specified by `IMM5`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cmp_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcmpeqps, IMM5 = 0))] // TODO Validate vcmpps #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cmp_ps<const IMM5: i32>(a: __m256, b: __m256) -> __m256 { static_assert_imm5!(IMM5); vcmpps256(a, b, IMM5 as u8) } /// Compares the lower double-precision (64-bit) floating-point element in /// `a` and `b` based on the comparison operand specified by `IMM5`, /// store the result in the lower element of returned vector, /// and copies the upper element from `a` to the upper element of returned /// vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_sd) #[inline] #[target_feature(enable = "avx,sse2")] #[cfg_attr(test, assert_instr(vcmpeqsd, IMM5 = 0))] // TODO Validate vcmpsd #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_sd<const IMM5: i32>(a: __m128d, b: __m128d) -> __m128d { static_assert_imm5!(IMM5); vcmpsd(a, b, IMM5 as i8) } /// Compares the lower single-precision (32-bit) floating-point element in /// `a` and `b` based on the comparison operand specified by `IMM5`, /// store the result in the lower element of returned vector, /// and copies the upper 3 packed elements from `a` to the upper elements of /// returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_cmp_ss) #[inline] #[target_feature(enable = "avx,sse")] #[cfg_attr(test, assert_instr(vcmpeqss, IMM5 = 0))] // TODO Validate vcmpss #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cmp_ss<const IMM5: i32>(a: __m128, b: __m128) -> __m128 { static_assert_imm5!(IMM5); vcmpss(a, b, IMM5 as i8) } /// Converts packed 32-bit integers in `a` to packed double-precision (64-bit) /// floating-point elements. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtdq2pd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvtepi32_pd(a: __m128i) -> __m256d { simd_cast(a.as_i32x4()) } /// Converts packed 32-bit integers in `a` to packed single-precision (32-bit) /// floating-point elements. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtepi32_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtdq2ps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvtepi32_ps(a: __m256i) -> __m256 { vcvtdq2ps(a.as_i32x8()) } /// Converts packed double-precision (64-bit) floating-point elements in `a` /// to packed single-precision (32-bit) floating-point elements. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtpd_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtpd2ps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvtpd_ps(a: __m256d) -> __m128 { vcvtpd2ps(a) } /// Converts packed single-precision (32-bit) floating-point elements in `a` /// to packed 32-bit integers. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtps_epi32) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtps2dq))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvtps_epi32(a: __m256) -> __m256i { transmute(vcvtps2dq(a)) } /// Converts packed single-precision (32-bit) floating-point elements in `a` /// to packed double-precision (64-bit) floating-point elements. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtps_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtps2pd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvtps_pd(a: __m128) -> __m256d { simd_cast(a) } /// Converts packed double-precision (64-bit) floating-point elements in `a` /// to packed 32-bit integers with truncation. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttpd_epi32) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvttpd_epi32(a: __m256d) -> __m128i { transmute(vcvttpd2dq(a)) } /// Converts packed double-precision (64-bit) floating-point elements in `a` /// to packed 32-bit integers. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtpd_epi32) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvtpd2dq))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvtpd_epi32(a: __m256d) -> __m128i { transmute(vcvtpd2dq(a)) } /// Converts packed single-precision (32-bit) floating-point elements in `a` /// to packed 32-bit integers with truncation. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvttps_epi32) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vcvttps2dq))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvttps_epi32(a: __m256) -> __m256i { transmute(vcvttps2dq(a)) } /// Extracts 128 bits (composed of 4 packed single-precision (32-bit) /// floating-point elements) from `a`, selected with `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extractf128_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf128, IMM1 = 1) )] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extractf128_ps<const IMM1: i32>(a: __m256) -> __m128 { static_assert_imm1!(IMM1); simd_shuffle4( a, _mm256_undefined_ps(), [[0, 1, 2, 3], [4, 5, 6, 7]][IMM1 as usize], ) } /// Extracts 128 bits (composed of 2 packed double-precision (64-bit) /// floating-point elements) from `a`, selected with `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extractf128_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf128, IMM1 = 1) )] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extractf128_pd<const IMM1: i32>(a: __m256d) -> __m128d { static_assert_imm1!(IMM1); simd_shuffle2(a, _mm256_undefined_pd(), [[0, 1], [2, 3]][IMM1 as usize]) } /// Extracts 128 bits (composed of integer data) from `a`, selected with `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_extractf128_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vextractf128, IMM1 = 1) )] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_extractf128_si256<const IMM1: i32>(a: __m256i) -> __m128i { static_assert_imm1!(IMM1); let dst: i64x2 = simd_shuffle2( a.as_i64x4(), _mm256_undefined_si256().as_i64x4(), [[0, 1], [2, 3]][IMM1 as usize], ); transmute(dst) } /// Zeroes the contents of all XMM or YMM registers. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zeroall) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vzeroall))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_zeroall() { vzeroall() } /// Zeroes the upper 128 bits of all YMM registers; /// the lower 128-bits of the registers are unmodified. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zeroupper) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vzeroupper))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_zeroupper() { vzeroupper() } /// Shuffles single-precision (32-bit) floating-point elements in `a` /// within 128-bit lanes using the control in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutevar_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vpermilps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permutevar_ps(a: __m256, b: __m256i) -> __m256 { vpermilps256(a, b.as_i32x8()) } /// Shuffles single-precision (32-bit) floating-point elements in `a` /// using the control in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutevar_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vpermilps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_permutevar_ps(a: __m128, b: __m128i) -> __m128 { vpermilps(a, b.as_i32x4()) } /// Shuffles single-precision (32-bit) floating-point elements in `a` /// within 128-bit lanes using the control in `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vpermilps, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute_ps<const IMM8: i32>(a: __m256) -> __m256 { static_assert_imm8!(IMM8); simd_shuffle8( a, _mm256_undefined_ps(), [ (IMM8 as u32 >> 0) & 0b11, (IMM8 as u32 >> 2) & 0b11, (IMM8 as u32 >> 4) & 0b11, (IMM8 as u32 >> 6) & 0b11, ((IMM8 as u32 >> 0) & 0b11) + 4, ((IMM8 as u32 >> 2) & 0b11) + 4, ((IMM8 as u32 >> 4) & 0b11) + 4, ((IMM8 as u32 >> 6) & 0b11) + 4, ], ) } /// Shuffles single-precision (32-bit) floating-point elements in `a` /// using the control in `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permute_ps) #[inline] #[target_feature(enable = "avx,sse")] #[cfg_attr(test, assert_instr(vpermilps, IMM8 = 9))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_permute_ps<const IMM8: i32>(a: __m128) -> __m128 { static_assert_imm8!(IMM8); simd_shuffle4( a, _mm_undefined_ps(), [ (IMM8 as u32 >> 0) & 0b11, (IMM8 as u32 >> 2) & 0b11, (IMM8 as u32 >> 4) & 0b11, (IMM8 as u32 >> 6) & 0b11, ], ) } /// Shuffles double-precision (64-bit) floating-point elements in `a` /// within 256-bit lanes using the control in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permutevar_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vpermilpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permutevar_pd(a: __m256d, b: __m256i) -> __m256d { vpermilpd256(a, b.as_i64x4()) } /// Shuffles double-precision (64-bit) floating-point elements in `a` /// using the control in `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permutevar_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vpermilpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_permutevar_pd(a: __m128d, b: __m128i) -> __m128d { vpermilpd(a, b.as_i64x2()) } /// Shuffles double-precision (64-bit) floating-point elements in `a` /// within 128-bit lanes using the control in `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vpermilpd, IMM4 = 0x1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute_pd<const IMM4: i32>(a: __m256d) -> __m256d { static_assert_imm4!(IMM4); simd_shuffle4( a, _mm256_undefined_pd(), [ ((IMM4 as u32 >> 0) & 1), ((IMM4 as u32 >> 1) & 1), ((IMM4 as u32 >> 2) & 1) + 2, ((IMM4 as u32 >> 3) & 1) + 2, ], ) } /// Shuffles double-precision (64-bit) floating-point elements in `a` /// using the control in `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_permute_pd) #[inline] #[target_feature(enable = "avx,sse2")] #[cfg_attr(test, assert_instr(vpermilpd, IMM2 = 0x1))] #[rustc_legacy_const_generics(1)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_permute_pd<const IMM2: i32>(a: __m128d) -> __m128d { static_assert_imm2!(IMM2); simd_shuffle2( a, _mm_undefined_pd(), [(IMM2 as u32) & 1, (IMM2 as u32 >> 1) & 1], ) } /// Shuffles 256 bits (composed of 8 packed single-precision (32-bit) /// floating-point elements) selected by `imm8` from `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute2f128_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x5))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute2f128_ps<const IMM8: i32>(a: __m256, b: __m256) -> __m256 { static_assert_imm8!(IMM8); vperm2f128ps256(a, b, IMM8 as i8) } /// Shuffles 256 bits (composed of 4 packed double-precision (64-bit) /// floating-point elements) selected by `imm8` from `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute2f128_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x31))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute2f128_pd<const IMM8: i32>(a: __m256d, b: __m256d) -> __m256d { static_assert_imm8!(IMM8); vperm2f128pd256(a, b, IMM8 as i8) } /// Shuffles 128-bits (composed of integer data) selected by `imm8` /// from `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_permute2f128_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vperm2f128, IMM8 = 0x31))] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_permute2f128_si256<const IMM8: i32>(a: __m256i, b: __m256i) -> __m256i { static_assert_imm8!(IMM8); transmute(vperm2f128si256(a.as_i32x8(), b.as_i32x8(), IMM8 as i8)) } /// Broadcasts a single-precision (32-bit) floating-point element from memory /// to all elements of the returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_ss) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::trivially_copy_pass_by_ref)] pub unsafe fn _mm256_broadcast_ss(f: &f32) -> __m256 { _mm256_set1_ps(*f) } /// Broadcasts a single-precision (32-bit) floating-point element from memory /// to all elements of the returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_broadcast_ss) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vbroadcastss))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::trivially_copy_pass_by_ref)] pub unsafe fn _mm_broadcast_ss(f: &f32) -> __m128 { _mm_set1_ps(*f) } /// Broadcasts a double-precision (64-bit) floating-point element from memory /// to all elements of the returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_sd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vbroadcastsd))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::trivially_copy_pass_by_ref)] pub unsafe fn _mm256_broadcast_sd(f: &f64) -> __m256d { _mm256_set1_pd(*f) } /// Broadcasts 128 bits from memory (composed of 4 packed single-precision /// (32-bit) floating-point elements) to all elements of the returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vbroadcastf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_broadcast_ps(a: &__m128) -> __m256 { vbroadcastf128ps256(a) } /// Broadcasts 128 bits from memory (composed of 2 packed double-precision /// (64-bit) floating-point elements) to all elements of the returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_broadcast_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vbroadcastf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_broadcast_pd(a: &__m128d) -> __m256d { vbroadcastf128pd256(a) } /// Copies `a` to result, then inserts 128 bits (composed of 4 packed /// single-precision (32-bit) floating-point elements) from `b` into result /// at the location specified by `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insertf128_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinsertf128, IMM1 = 1) )] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insertf128_ps<const IMM1: i32>(a: __m256, b: __m128) -> __m256 { static_assert_imm1!(IMM1); simd_shuffle8( a, _mm256_castps128_ps256(b), [[8, 9, 10, 11, 4, 5, 6, 7], [0, 1, 2, 3, 8, 9, 10, 11]][IMM1 as usize], ) } /// Copies `a` to result, then inserts 128 bits (composed of 2 packed /// double-precision (64-bit) floating-point elements) from `b` into result /// at the location specified by `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insertf128_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinsertf128, IMM1 = 1) )] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insertf128_pd<const IMM1: i32>(a: __m256d, b: __m128d) -> __m256d { static_assert_imm1!(IMM1); simd_shuffle4( a, _mm256_castpd128_pd256(b), [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize], ) } /// Copies `a` to result, then inserts 128 bits from `b` into result /// at the location specified by `imm8`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insertf128_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr( all(test, not(target_os = "windows")), assert_instr(vinsertf128, IMM1 = 1) )] #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insertf128_si256<const IMM1: i32>(a: __m256i, b: __m128i) -> __m256i { static_assert_imm1!(IMM1); let dst: i64x4 = simd_shuffle4( a.as_i64x4(), _mm256_castsi128_si256(b).as_i64x4(), [[4, 5, 2, 3], [0, 1, 4, 5]][IMM1 as usize], ); transmute(dst) } /// Copies `a` to result, and inserts the 8-bit integer `i` into result /// at the location specified by `index`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insert_epi8) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insert_epi8<const INDEX: i32>(a: __m256i, i: i8) -> __m256i { static_assert_imm5!(INDEX); transmute(simd_insert(a.as_i8x32(), INDEX as u32, i)) } /// Copies `a` to result, and inserts the 16-bit integer `i` into result /// at the location specified by `index`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insert_epi16) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insert_epi16<const INDEX: i32>(a: __m256i, i: i16) -> __m256i { static_assert_imm4!(INDEX); transmute(simd_insert(a.as_i16x16(), INDEX as u32, i)) } /// Copies `a` to result, and inserts the 32-bit integer `i` into result /// at the location specified by `index`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_insert_epi32) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[rustc_legacy_const_generics(2)] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_insert_epi32<const INDEX: i32>(a: __m256i, i: i32) -> __m256i { static_assert_imm3!(INDEX); transmute(simd_insert(a.as_i32x8(), INDEX as u32, i)) } /// Loads 256-bits (composed of 4 packed double-precision (64-bit) /// floating-point elements) from memory into result. /// `mem_addr` must be aligned on a 32-byte boundary or a /// general-protection exception may be generated. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovapd expected #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm256_load_pd(mem_addr: *const f64) -> __m256d { *(mem_addr as *const __m256d) } /// Stores 256-bits (composed of 4 packed double-precision (64-bit) /// floating-point elements) from `a` into memory. /// `mem_addr` must be aligned on a 32-byte boundary or a /// general-protection exception may be generated. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovapd expected #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm256_store_pd(mem_addr: *mut f64, a: __m256d) { *(mem_addr as *mut __m256d) = a; } /// Loads 256-bits (composed of 8 packed single-precision (32-bit) /// floating-point elements) from memory into result. /// `mem_addr` must be aligned on a 32-byte boundary or a /// general-protection exception may be generated. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm256_load_ps(mem_addr: *const f32) -> __m256 { *(mem_addr as *const __m256) } /// Stores 256-bits (composed of 8 packed single-precision (32-bit) /// floating-point elements) from `a` into memory. /// `mem_addr` must be aligned on a 32-byte boundary or a /// general-protection exception may be generated. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovaps))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm256_store_ps(mem_addr: *mut f32, a: __m256) { *(mem_addr as *mut __m256) = a; } /// Loads 256-bits (composed of 4 packed double-precision (64-bit) /// floating-point elements) from memory into result. /// `mem_addr` does not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovupd expected #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_loadu_pd(mem_addr: *const f64) -> __m256d { let mut dst = _mm256_undefined_pd(); ptr::copy_nonoverlapping( mem_addr as *const u8, &mut dst as *mut __m256d as *mut u8, mem::size_of::<__m256d>(), ); dst } /// Stores 256-bits (composed of 4 packed double-precision (64-bit) /// floating-point elements) from `a` into memory. /// `mem_addr` does not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovupd expected #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_storeu_pd(mem_addr: *mut f64, a: __m256d) { storeupd256(mem_addr, a); } /// Loads 256-bits (composed of 8 packed single-precision (32-bit) /// floating-point elements) from memory into result. /// `mem_addr` does not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_loadu_ps(mem_addr: *const f32) -> __m256 { let mut dst = _mm256_undefined_ps(); ptr::copy_nonoverlapping( mem_addr as *const u8, &mut dst as *mut __m256 as *mut u8, mem::size_of::<__m256>(), ); dst } /// Stores 256-bits (composed of 8 packed single-precision (32-bit) /// floating-point elements) from `a` into memory. /// `mem_addr` does not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_storeu_ps(mem_addr: *mut f32, a: __m256) { storeups256(mem_addr, a); } /// Loads 256-bits of integer data from memory into result. /// `mem_addr` must be aligned on a 32-byte boundary or a /// general-protection exception may be generated. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_load_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovdqa expected #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_load_si256(mem_addr: *const __m256i) -> __m256i { *mem_addr } /// Stores 256-bits of integer data from `a` into memory. /// `mem_addr` must be aligned on a 32-byte boundary or a /// general-protection exception may be generated. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_store_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovaps))] // FIXME vmovdqa expected #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_store_si256(mem_addr: *mut __m256i, a: __m256i) { *mem_addr = a; } /// Loads 256-bits of integer data from memory into result. /// `mem_addr` does not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovdqu expected #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_loadu_si256(mem_addr: *const __m256i) -> __m256i { let mut dst = _mm256_undefined_si256(); ptr::copy_nonoverlapping( mem_addr as *const u8, &mut dst as *mut __m256i as *mut u8, mem::size_of::<__m256i>(), ); dst } /// Stores 256-bits of integer data from `a` into memory. /// `mem_addr` does not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovups))] // FIXME vmovdqu expected #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_storeu_si256(mem_addr: *mut __m256i, a: __m256i) { storeudq256(mem_addr as *mut i8, a.as_i8x32()); } /// Loads packed double-precision (64-bit) floating-point elements from memory /// into result using `mask` (elements are zeroed out when the high bit of the /// corresponding element is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskload_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_maskload_pd(mem_addr: *const f64, mask: __m256i) -> __m256d { maskloadpd256(mem_addr as *const i8, mask.as_i64x4()) } /// Stores packed double-precision (64-bit) floating-point elements from `a` /// into memory using `mask`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskstore_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_maskstore_pd(mem_addr: *mut f64, mask: __m256i, a: __m256d) { maskstorepd256(mem_addr as *mut i8, mask.as_i64x4(), a); } /// Loads packed double-precision (64-bit) floating-point elements from memory /// into result using `mask` (elements are zeroed out when the high bit of the /// corresponding element is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskload_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_maskload_pd(mem_addr: *const f64, mask: __m128i) -> __m128d { maskloadpd(mem_addr as *const i8, mask.as_i64x2()) } /// Stores packed double-precision (64-bit) floating-point elements from `a` /// into memory using `mask`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskstore_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_maskstore_pd(mem_addr: *mut f64, mask: __m128i, a: __m128d) { maskstorepd(mem_addr as *mut i8, mask.as_i64x2(), a); } /// Loads packed single-precision (32-bit) floating-point elements from memory /// into result using `mask` (elements are zeroed out when the high bit of the /// corresponding element is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskload_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_maskload_ps(mem_addr: *const f32, mask: __m256i) -> __m256 { maskloadps256(mem_addr as *const i8, mask.as_i32x8()) } /// Stores packed single-precision (32-bit) floating-point elements from `a` /// into memory using `mask`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_maskstore_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_maskstore_ps(mem_addr: *mut f32, mask: __m256i, a: __m256) { maskstoreps256(mem_addr as *mut i8, mask.as_i32x8(), a); } /// Loads packed single-precision (32-bit) floating-point elements from memory /// into result using `mask` (elements are zeroed out when the high bit of the /// corresponding element is not set). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskload_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_maskload_ps(mem_addr: *const f32, mask: __m128i) -> __m128 { maskloadps(mem_addr as *const i8, mask.as_i32x4()) } /// Stores packed single-precision (32-bit) floating-point elements from `a` /// into memory using `mask`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_maskstore_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmaskmovps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_maskstore_ps(mem_addr: *mut f32, mask: __m128i, a: __m128) { maskstoreps(mem_addr as *mut i8, mask.as_i32x4(), a); } /// Duplicate odd-indexed single-precision (32-bit) floating-point elements /// from `a`, and returns the results. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movehdup_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovshdup))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_movehdup_ps(a: __m256) -> __m256 { simd_shuffle8(a, a, [1, 1, 3, 3, 5, 5, 7, 7]) } /// Duplicate even-indexed single-precision (32-bit) floating-point elements /// from `a`, and returns the results. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_moveldup_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovsldup))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_moveldup_ps(a: __m256) -> __m256 { simd_shuffle8(a, a, [0, 0, 2, 2, 4, 4, 6, 6]) } /// Duplicate even-indexed double-precision (64-bit) floating-point elements /// from `a`, and returns the results. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movedup_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovddup))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_movedup_pd(a: __m256d) -> __m256d { simd_shuffle4(a, a, [0, 0, 2, 2]) } /// Loads 256-bits of integer data from unaligned memory into result. /// This intrinsic may perform better than `_mm256_loadu_si256` when the /// data crosses a cache line boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_lddqu_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vlddqu))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_lddqu_si256(mem_addr: *const __m256i) -> __m256i { transmute(vlddqu(mem_addr as *const i8)) } /// Moves integer data from a 256-bit integer vector to a 32-byte /// aligned memory location. To minimize caching, the data is flagged as /// non-temporal (unlikely to be used again soon) /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_stream_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovntps))] // FIXME vmovntdq #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_stream_si256(mem_addr: *mut __m256i, a: __m256i) { intrinsics::nontemporal_store(mem_addr, a); } /// Moves double-precision values from a 256-bit vector of `[4 x double]` /// to a 32-byte aligned memory location. To minimize caching, the data is /// flagged as non-temporal (unlikely to be used again soon). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_stream_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovntps))] // FIXME vmovntpd #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm256_stream_pd(mem_addr: *mut f64, a: __m256d) { intrinsics::nontemporal_store(mem_addr as *mut __m256d, a); } /// Moves single-precision floating point values from a 256-bit vector /// of `[8 x float]` to a 32-byte aligned memory location. To minimize /// caching, the data is flagged as non-temporal (unlikely to be used again /// soon). /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_stream_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovntps))] #[stable(feature = "simd_x86", since = "1.27.0")] #[allow(clippy::cast_ptr_alignment)] pub unsafe fn _mm256_stream_ps(mem_addr: *mut f32, a: __m256) { intrinsics::nontemporal_store(mem_addr as *mut __m256, a); } /// Computes the approximate reciprocal of packed single-precision (32-bit) /// floating-point elements in `a`, and returns the results. The maximum /// relative error for this approximation is less than 1.5*2^-12. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rcp_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vrcpps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_rcp_ps(a: __m256) -> __m256 { vrcpps(a) } /// Computes the approximate reciprocal square root of packed single-precision /// (32-bit) floating-point elements in `a`, and returns the results. /// The maximum relative error for this approximation is less than 1.5*2^-12. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_rsqrt_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vrsqrtps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_rsqrt_ps(a: __m256) -> __m256 { vrsqrtps(a) } /// Unpacks and interleave double-precision (64-bit) floating-point elements /// from the high half of each 128-bit lane in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpckhpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_unpackhi_pd(a: __m256d, b: __m256d) -> __m256d { simd_shuffle4(a, b, [1, 5, 3, 7]) } /// Unpacks and interleave single-precision (32-bit) floating-point elements /// from the high half of each 128-bit lane in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpackhi_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpckhps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_unpackhi_ps(a: __m256, b: __m256) -> __m256 { simd_shuffle8(a, b, [2, 10, 3, 11, 6, 14, 7, 15]) } /// Unpacks and interleave double-precision (64-bit) floating-point elements /// from the low half of each 128-bit lane in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpcklpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_unpacklo_pd(a: __m256d, b: __m256d) -> __m256d { simd_shuffle4(a, b, [0, 4, 2, 6]) } /// Unpacks and interleave single-precision (32-bit) floating-point elements /// from the low half of each 128-bit lane in `a` and `b`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_unpacklo_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vunpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_unpacklo_ps(a: __m256, b: __m256) -> __m256 { simd_shuffle8(a, b, [0, 8, 1, 9, 4, 12, 5, 13]) } /// Computes the bitwise AND of 256 bits (representing integer data) in `a` and /// `b`, and set `ZF` to 1 if the result is zero, otherwise set `ZF` to 0. /// Computes the bitwise NOT of `a` and then AND with `b`, and set `CF` to 1 if /// the result is zero, otherwise set `CF` to 0. Return the `ZF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testz_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vptest))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testz_si256(a: __m256i, b: __m256i) -> i32 { ptestz256(a.as_i64x4(), b.as_i64x4()) } /// Computes the bitwise AND of 256 bits (representing integer data) in `a` and /// `b`, and set `ZF` to 1 if the result is zero, otherwise set `ZF` to 0. /// Computes the bitwise NOT of `a` and then AND with `b`, and set `CF` to 1 if /// the result is zero, otherwise set `CF` to 0. Return the `CF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testc_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vptest))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testc_si256(a: __m256i, b: __m256i) -> i32 { ptestc256(a.as_i64x4(), b.as_i64x4()) } /// Computes the bitwise AND of 256 bits (representing integer data) in `a` and /// `b`, and set `ZF` to 1 if the result is zero, otherwise set `ZF` to 0. /// Computes the bitwise NOT of `a` and then AND with `b`, and set `CF` to 1 if /// the result is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and /// `CF` values are zero, otherwise return 0. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testnzc_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vptest))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testnzc_si256(a: __m256i, b: __m256i) -> i32 { ptestnzc256(a.as_i64x4(), b.as_i64x4()) } /// Computes the bitwise AND of 256 bits (representing double-precision (64-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 256-bit /// value, and set `ZF` to 1 if the sign bit of each 64-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `ZF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testz_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testz_pd(a: __m256d, b: __m256d) -> i32 { vtestzpd256(a, b) } /// Computes the bitwise AND of 256 bits (representing double-precision (64-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 256-bit /// value, and set `ZF` to 1 if the sign bit of each 64-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `CF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testc_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testc_pd(a: __m256d, b: __m256d) -> i32 { vtestcpd256(a, b) } /// Computes the bitwise AND of 256 bits (representing double-precision (64-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 256-bit /// value, and set `ZF` to 1 if the sign bit of each 64-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values /// are zero, otherwise return 0. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testnzc_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testnzc_pd(a: __m256d, b: __m256d) -> i32 { vtestnzcpd256(a, b) } /// Computes the bitwise AND of 128 bits (representing double-precision (64-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 128-bit /// value, and set `ZF` to 1 if the sign bit of each 64-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `ZF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_testz_pd(a: __m128d, b: __m128d) -> i32 { vtestzpd(a, b) } /// Computes the bitwise AND of 128 bits (representing double-precision (64-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 128-bit /// value, and set `ZF` to 1 if the sign bit of each 64-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `CF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testc_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_testc_pd(a: __m128d, b: __m128d) -> i32 { vtestcpd(a, b) } /// Computes the bitwise AND of 128 bits (representing double-precision (64-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 128-bit /// value, and set `ZF` to 1 if the sign bit of each 64-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 64-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values /// are zero, otherwise return 0. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testnzc_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_testnzc_pd(a: __m128d, b: __m128d) -> i32 { vtestnzcpd(a, b) } /// Computes the bitwise AND of 256 bits (representing single-precision (32-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 256-bit /// value, and set `ZF` to 1 if the sign bit of each 32-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `ZF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testz_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testz_ps(a: __m256, b: __m256) -> i32 { vtestzps256(a, b) } /// Computes the bitwise AND of 256 bits (representing single-precision (32-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 256-bit /// value, and set `ZF` to 1 if the sign bit of each 32-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `CF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testc_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testc_ps(a: __m256, b: __m256) -> i32 { vtestcps256(a, b) } /// Computes the bitwise AND of 256 bits (representing single-precision (32-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 256-bit /// value, and set `ZF` to 1 if the sign bit of each 32-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values /// are zero, otherwise return 0. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_testnzc_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_testnzc_ps(a: __m256, b: __m256) -> i32 { vtestnzcps256(a, b) } /// Computes the bitwise AND of 128 bits (representing single-precision (32-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 128-bit /// value, and set `ZF` to 1 if the sign bit of each 32-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `ZF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testz_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_testz_ps(a: __m128, b: __m128) -> i32 { vtestzps(a, b) } /// Computes the bitwise AND of 128 bits (representing single-precision (32-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 128-bit /// value, and set `ZF` to 1 if the sign bit of each 32-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return the `CF` value. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testc_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_testc_ps(a: __m128, b: __m128) -> i32 { vtestcps(a, b) } /// Computes the bitwise AND of 128 bits (representing single-precision (32-bit) /// floating-point elements) in `a` and `b`, producing an intermediate 128-bit /// value, and set `ZF` to 1 if the sign bit of each 32-bit element in the /// intermediate value is zero, otherwise set `ZF` to 0. Compute the bitwise /// NOT of `a` and then AND with `b`, producing an intermediate value, and set /// `CF` to 1 if the sign bit of each 32-bit element in the intermediate value /// is zero, otherwise set `CF` to 0. Return 1 if both the `ZF` and `CF` values /// are zero, otherwise return 0. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_testnzc_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vtestps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_testnzc_ps(a: __m128, b: __m128) -> i32 { vtestnzcps(a, b) } /// Sets each bit of the returned mask based on the most significant bit of the /// corresponding packed double-precision (64-bit) floating-point element in /// `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movemask_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovmskpd))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_movemask_pd(a: __m256d) -> i32 { movmskpd256(a) } /// Sets each bit of the returned mask based on the most significant bit of the /// corresponding packed single-precision (32-bit) floating-point element in /// `a`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_movemask_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vmovmskps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_movemask_ps(a: __m256) -> i32 { movmskps256(a) } /// Returns vector of type __m256d with all elements set to zero. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_pd) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxorps))] // FIXME vxorpd expected #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setzero_pd() -> __m256d { _mm256_set1_pd(0.0) } /// Returns vector of type __m256 with all elements set to zero. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_ps) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxorps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setzero_ps() -> __m256 { _mm256_set1_ps(0.0) } /// Returns vector of type __m256i with all elements set to zero. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setzero_si256) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vxor))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setzero_si256() -> __m256i { _mm256_set1_epi8(0) } /// Sets packed double-precision (64-bit) floating-point elements in returned /// vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_pd) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { _mm256_setr_pd(d, c, b, a) } /// Sets packed single-precision (32-bit) floating-point elements in returned /// vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_ps) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_ps( a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32, ) -> __m256 { _mm256_setr_ps(h, g, f, e, d, c, b, a) } /// Sets packed 8-bit integers in returned vector with the supplied values in /// reverse order. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi8) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_epi8( e00: i8, e01: i8, e02: i8, e03: i8, e04: i8, e05: i8, e06: i8, e07: i8, e08: i8, e09: i8, e10: i8, e11: i8, e12: i8, e13: i8, e14: i8, e15: i8, e16: i8, e17: i8, e18: i8, e19: i8, e20: i8, e21: i8, e22: i8, e23: i8, e24: i8, e25: i8, e26: i8, e27: i8, e28: i8, e29: i8, e30: i8, e31: i8, ) -> __m256i { #[rustfmt::skip] _mm256_setr_epi8( e31, e30, e29, e28, e27, e26, e25, e24, e23, e22, e21, e20, e19, e18, e17, e16, e15, e14, e13, e12, e11, e10, e09, e08, e07, e06, e05, e04, e03, e02, e01, e00, ) } /// Sets packed 16-bit integers in returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi16) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_epi16( e00: i16, e01: i16, e02: i16, e03: i16, e04: i16, e05: i16, e06: i16, e07: i16, e08: i16, e09: i16, e10: i16, e11: i16, e12: i16, e13: i16, e14: i16, e15: i16, ) -> __m256i { #[rustfmt::skip] _mm256_setr_epi16( e15, e14, e13, e12, e11, e10, e09, e08, e07, e06, e05, e04, e03, e02, e01, e00, ) } /// Sets packed 32-bit integers in returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi32) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_epi32( e0: i32, e1: i32, e2: i32, e3: i32, e4: i32, e5: i32, e6: i32, e7: i32, ) -> __m256i { _mm256_setr_epi32(e7, e6, e5, e4, e3, e2, e1, e0) } /// Sets packed 64-bit integers in returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_epi64x) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { _mm256_setr_epi64x(d, c, b, a) } /// Sets packed double-precision (64-bit) floating-point elements in returned /// vector with the supplied values in reverse order. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_pd) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { __m256d(a, b, c, d) } /// Sets packed single-precision (32-bit) floating-point elements in returned /// vector with the supplied values in reverse order. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_ps) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_ps( a: f32, b: f32, c: f32, d: f32, e: f32, f: f32, g: f32, h: f32, ) -> __m256 { __m256(a, b, c, d, e, f, g, h) } /// Sets packed 8-bit integers in returned vector with the supplied values in /// reverse order. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi8) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_epi8( e00: i8, e01: i8, e02: i8, e03: i8, e04: i8, e05: i8, e06: i8, e07: i8, e08: i8, e09: i8, e10: i8, e11: i8, e12: i8, e13: i8, e14: i8, e15: i8, e16: i8, e17: i8, e18: i8, e19: i8, e20: i8, e21: i8, e22: i8, e23: i8, e24: i8, e25: i8, e26: i8, e27: i8, e28: i8, e29: i8, e30: i8, e31: i8, ) -> __m256i { #[rustfmt::skip] transmute(i8x32::new( e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, )) } /// Sets packed 16-bit integers in returned vector with the supplied values in /// reverse order. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi16) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_epi16( e00: i16, e01: i16, e02: i16, e03: i16, e04: i16, e05: i16, e06: i16, e07: i16, e08: i16, e09: i16, e10: i16, e11: i16, e12: i16, e13: i16, e14: i16, e15: i16, ) -> __m256i { #[rustfmt::skip] transmute(i16x16::new( e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, )) } /// Sets packed 32-bit integers in returned vector with the supplied values in /// reverse order. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi32) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_epi32( e0: i32, e1: i32, e2: i32, e3: i32, e4: i32, e5: i32, e6: i32, e7: i32, ) -> __m256i { transmute(i32x8::new(e0, e1, e2, e3, e4, e5, e6, e7)) } /// Sets packed 64-bit integers in returned vector with the supplied values in /// reverse order. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_epi64x) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { transmute(i64x4::new(a, b, c, d)) } /// Broadcasts double-precision (64-bit) floating-point value `a` to all /// elements of returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_pd) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set1_pd(a: f64) -> __m256d { _mm256_setr_pd(a, a, a, a) } /// Broadcasts single-precision (32-bit) floating-point value `a` to all /// elements of returned vector. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_ps) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set1_ps(a: f32) -> __m256 { _mm256_setr_ps(a, a, a, a, a, a, a, a) } /// Broadcasts 8-bit integer `a` to all elements of returned vector. /// This intrinsic may generate the `vpbroadcastb`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi8) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vpshufb))] #[cfg_attr(test, assert_instr(vinsertf128))] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set1_epi8(a: i8) -> __m256i { #[rustfmt::skip] _mm256_setr_epi8( a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, ) } /// Broadcasts 16-bit integer `a` to all all elements of returned vector. /// This intrinsic may generate the `vpbroadcastw`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi16) #[inline] #[target_feature(enable = "avx")] //#[cfg_attr(test, assert_instr(vpshufb))] #[cfg_attr(test, assert_instr(vinsertf128))] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set1_epi16(a: i16) -> __m256i { _mm256_setr_epi16(a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a) } /// Broadcasts 32-bit integer `a` to all elements of returned vector. /// This intrinsic may generate the `vpbroadcastd`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi32) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set1_epi32(a: i32) -> __m256i { _mm256_setr_epi32(a, a, a, a, a, a, a, a) } /// Broadcasts 64-bit integer `a` to all elements of returned vector. /// This intrinsic may generate the `vpbroadcastq`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set1_epi64x) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(all(test, target_arch = "x86_64"), assert_instr(vinsertf128))] #[cfg_attr(all(test, target_arch = "x86"), assert_instr(vbroadcastsd))] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set1_epi64x(a: i64) -> __m256i { _mm256_setr_epi64x(a, a, a, a) } /// Cast vector of type __m256d to type __m256. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd_ps) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castpd_ps(a: __m256d) -> __m256 { transmute(a) } /// Cast vector of type __m256 to type __m256d. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps_pd) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castps_pd(a: __m256) -> __m256d { transmute(a) } /// Casts vector of type __m256 to type __m256i. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps_si256) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castps_si256(a: __m256) -> __m256i { transmute(a) } /// Casts vector of type __m256i to type __m256. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_ps) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castsi256_ps(a: __m256i) -> __m256 { transmute(a) } /// Casts vector of type __m256d to type __m256i. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd_si256) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castpd_si256(a: __m256d) -> __m256i { transmute(a) } /// Casts vector of type __m256i to type __m256d. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_pd) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castsi256_pd(a: __m256i) -> __m256d { transmute(a) } /// Casts vector of type __m256 to type __m128. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps256_ps128) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castps256_ps128(a: __m256) -> __m128 { simd_shuffle4(a, a, [0, 1, 2, 3]) } /// Casts vector of type __m256d to type __m128d. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd256_pd128) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castpd256_pd128(a: __m256d) -> __m128d { simd_shuffle2(a, a, [0, 1]) } /// Casts vector of type __m256i to type __m128i. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi256_si128) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castsi256_si128(a: __m256i) -> __m128i { let a = a.as_i64x4(); let dst: i64x2 = simd_shuffle2(a, a, [0, 1]); transmute(dst) } /// Casts vector of type __m128 to type __m256; /// the upper 128 bits of the result are undefined. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castps128_ps256) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castps128_ps256(a: __m128) -> __m256 { // FIXME simd_shuffle8(a, a, [0, 1, 2, 3, -1, -1, -1, -1]) simd_shuffle8(a, a, [0, 1, 2, 3, 0, 0, 0, 0]) } /// Casts vector of type __m128d to type __m256d; /// the upper 128 bits of the result are undefined. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castpd128_pd256) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castpd128_pd256(a: __m128d) -> __m256d { // FIXME simd_shuffle4(a, a, [0, 1, -1, -1]) simd_shuffle4(a, a, [0, 1, 0, 0]) } /// Casts vector of type __m128i to type __m256i; /// the upper 128 bits of the result are undefined. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_castsi128_si256) #[inline] #[target_feature(enable = "avx")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_castsi128_si256(a: __m128i) -> __m256i { let a = a.as_i64x2(); // FIXME simd_shuffle4(a, a, [0, 1, -1, -1]) let dst: i64x4 = simd_shuffle4(a, a, [0, 1, 0, 0]); transmute(dst) } /// Constructs a 256-bit floating-point vector of `[8 x float]` from a /// 128-bit floating-point vector of `[4 x float]`. The lower 128 bits contain /// the value of the source vector. The upper 128 bits are set to zero. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextps128_ps256) #[inline] #[target_feature(enable = "avx,sse")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_zextps128_ps256(a: __m128) -> __m256 { simd_shuffle8(a, _mm_setzero_ps(), [0, 1, 2, 3, 4, 5, 6, 7]) } /// Constructs a 256-bit integer vector from a 128-bit integer vector. /// The lower 128 bits contain the value of the source vector. The upper /// 128 bits are set to zero. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextsi128_si256) #[inline] #[target_feature(enable = "avx,sse2")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_zextsi128_si256(a: __m128i) -> __m256i { let b = _mm_setzero_si128().as_i64x2(); let dst: i64x4 = simd_shuffle4(a.as_i64x2(), b, [0, 1, 2, 3]); transmute(dst) } /// Constructs a 256-bit floating-point vector of `[4 x double]` from a /// 128-bit floating-point vector of `[2 x double]`. The lower 128 bits /// contain the value of the source vector. The upper 128 bits are set /// to zero. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_zextpd128_pd256) #[inline] #[target_feature(enable = "avx,sse2")] // This intrinsic is only used for compilation and does not generate any // instructions, thus it has zero latency. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_zextpd128_pd256(a: __m128d) -> __m256d { simd_shuffle4(a, _mm_setzero_pd(), [0, 1, 2, 3]) } /// Returns vector of type `__m256` with undefined elements. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_ps) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_undefined_ps() -> __m256 { _mm256_set1_ps(0.0) } /// Returns vector of type `__m256d` with undefined elements. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_pd) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_undefined_pd() -> __m256d { _mm256_set1_pd(0.0) } /// Returns vector of type __m256i with undefined elements. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_undefined_si256) #[inline] #[target_feature(enable = "avx")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_undefined_si256() -> __m256i { // FIXME: this function should return MaybeUninit<__m256i> mem::MaybeUninit::<__m256i>::uninit().assume_init() } /// Sets packed __m256 returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_m128) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_m128(hi: __m128, lo: __m128) -> __m256 { simd_shuffle8(lo, hi, [0, 1, 2, 3, 4, 5, 6, 7]) } /// Sets packed __m256d returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_m128d) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_m128d(hi: __m128d, lo: __m128d) -> __m256d { let hi: __m128 = transmute(hi); let lo: __m128 = transmute(lo); transmute(_mm256_set_m128(hi, lo)) } /// Sets packed __m256i returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_set_m128i) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_set_m128i(hi: __m128i, lo: __m128i) -> __m256i { let hi: __m128 = transmute(hi); let lo: __m128 = transmute(lo); transmute(_mm256_set_m128(hi, lo)) } /// Sets packed __m256 returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_m128) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_m128(lo: __m128, hi: __m128) -> __m256 { _mm256_set_m128(hi, lo) } /// Sets packed __m256d returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_m128d) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_m128d(lo: __m128d, hi: __m128d) -> __m256d { _mm256_set_m128d(hi, lo) } /// Sets packed __m256i returned vector with the supplied values. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_setr_m128i) #[inline] #[target_feature(enable = "avx")] #[cfg_attr(test, assert_instr(vinsertf128))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_m128i(lo: __m128i, hi: __m128i) -> __m256i { _mm256_set_m128i(hi, lo) } /// Loads two 128-bit values (composed of 4 packed single-precision (32-bit) /// floating-point elements) from memory, and combine them into a 256-bit /// value. /// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu2_m128) #[inline] #[target_feature(enable = "avx,sse")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_loadu2_m128(hiaddr: *const f32, loaddr: *const f32) -> __m256 { let a = _mm256_castps128_ps256(_mm_loadu_ps(loaddr)); _mm256_insertf128_ps::<1>(a, _mm_loadu_ps(hiaddr)) } /// Loads two 128-bit values (composed of 2 packed double-precision (64-bit) /// floating-point elements) from memory, and combine them into a 256-bit /// value. /// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu2_m128d) #[inline] #[target_feature(enable = "avx,sse2")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_loadu2_m128d(hiaddr: *const f64, loaddr: *const f64) -> __m256d { let a = _mm256_castpd128_pd256(_mm_loadu_pd(loaddr)); _mm256_insertf128_pd::<1>(a, _mm_loadu_pd(hiaddr)) } /// Loads two 128-bit values (composed of integer data) from memory, and combine /// them into a 256-bit value. /// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_loadu2_m128i) #[inline] #[target_feature(enable = "avx,sse2")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_loadu2_m128i(hiaddr: *const __m128i, loaddr: *const __m128i) -> __m256i { let a = _mm256_castsi128_si256(_mm_loadu_si128(loaddr)); _mm256_insertf128_si256::<1>(a, _mm_loadu_si128(hiaddr)) } /// Stores the high and low 128-bit halves (each composed of 4 packed /// single-precision (32-bit) floating-point elements) from `a` into memory two /// different 128-bit locations. /// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu2_m128) #[inline] #[target_feature(enable = "avx,sse")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_storeu2_m128(hiaddr: *mut f32, loaddr: *mut f32, a: __m256) { let lo = _mm256_castps256_ps128(a); _mm_storeu_ps(loaddr, lo); let hi = _mm256_extractf128_ps::<1>(a); _mm_storeu_ps(hiaddr, hi); } /// Stores the high and low 128-bit halves (each composed of 2 packed /// double-precision (64-bit) floating-point elements) from `a` into memory two /// different 128-bit locations. /// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu2_m128d) #[inline] #[target_feature(enable = "avx,sse2")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_storeu2_m128d(hiaddr: *mut f64, loaddr: *mut f64, a: __m256d) { let lo = _mm256_castpd256_pd128(a); _mm_storeu_pd(loaddr, lo); let hi = _mm256_extractf128_pd::<1>(a); _mm_storeu_pd(hiaddr, hi); } /// Stores the high and low 128-bit halves (each composed of integer data) from /// `a` into memory two different 128-bit locations. /// `hiaddr` and `loaddr` do not need to be aligned on any particular boundary. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_storeu2_m128i) #[inline] #[target_feature(enable = "avx,sse2")] // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_storeu2_m128i(hiaddr: *mut __m128i, loaddr: *mut __m128i, a: __m256i) { let lo = _mm256_castsi256_si128(a); _mm_storeu_si128(loaddr, lo); let hi = _mm256_extractf128_si256::<1>(a); _mm_storeu_si128(hiaddr, hi); } /// Returns the first element of the input vector of `[8 x float]`. /// /// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_cvtss_f32) #[inline] #[target_feature(enable = "avx")] //#[cfg_attr(test, assert_instr(movss))] FIXME #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_cvtss_f32(a: __m256) -> f32 { simd_extract(a, 0) } /// LLVM intrinsics used in the above functions #[allow(improper_ctypes)] extern "C" { #[link_name = "llvm.x86.avx.addsub.pd.256"] fn addsubpd256(a: __m256d, b: __m256d) -> __m256d; #[link_name = "llvm.x86.avx.addsub.ps.256"] fn addsubps256(a: __m256, b: __m256) -> __m256; #[link_name = "llvm.x86.avx.round.pd.256"] fn roundpd256(a: __m256d, b: i32) -> __m256d; #[link_name = "llvm.x86.avx.round.ps.256"] fn roundps256(a: __m256, b: i32) -> __m256; #[link_name = "llvm.x86.avx.sqrt.ps.256"] fn sqrtps256(a: __m256) -> __m256; #[link_name = "llvm.x86.avx.blendv.pd.256"] fn vblendvpd(a: __m256d, b: __m256d, c: __m256d) -> __m256d; #[link_name = "llvm.x86.avx.blendv.ps.256"] fn vblendvps(a: __m256, b: __m256, c: __m256) -> __m256; #[link_name = "llvm.x86.avx.dp.ps.256"] fn vdpps(a: __m256, b: __m256, imm8: i32) -> __m256; #[link_name = "llvm.x86.avx.hadd.pd.256"] fn vhaddpd(a: __m256d, b: __m256d) -> __m256d; #[link_name = "llvm.x86.avx.hadd.ps.256"] fn vhaddps(a: __m256, b: __m256) -> __m256; #[link_name = "llvm.x86.avx.hsub.pd.256"] fn vhsubpd(a: __m256d, b: __m256d) -> __m256d; #[link_name = "llvm.x86.avx.hsub.ps.256"] fn vhsubps(a: __m256, b: __m256) -> __m256; #[link_name = "llvm.x86.sse2.cmp.pd"] fn vcmppd(a: __m128d, b: __m128d, imm8: i8) -> __m128d; #[link_name = "llvm.x86.avx.cmp.pd.256"] fn vcmppd256(a: __m256d, b: __m256d, imm8: u8) -> __m256d; #[link_name = "llvm.x86.sse.cmp.ps"] fn vcmpps(a: __m128, b: __m128, imm8: i8) -> __m128; #[link_name = "llvm.x86.avx.cmp.ps.256"] fn vcmpps256(a: __m256, b: __m256, imm8: u8) -> __m256; #[link_name = "llvm.x86.sse2.cmp.sd"] fn vcmpsd(a: __m128d, b: __m128d, imm8: i8) -> __m128d; #[link_name = "llvm.x86.sse.cmp.ss"] fn vcmpss(a: __m128, b: __m128, imm8: i8) -> __m128; #[link_name = "llvm.x86.avx.cvtdq2.ps.256"] fn vcvtdq2ps(a: i32x8) -> __m256; #[link_name = "llvm.x86.avx.cvt.pd2.ps.256"] fn vcvtpd2ps(a: __m256d) -> __m128; #[link_name = "llvm.x86.avx.cvt.ps2dq.256"] fn vcvtps2dq(a: __m256) -> i32x8; #[link_name = "llvm.x86.avx.cvtt.pd2dq.256"] fn vcvttpd2dq(a: __m256d) -> i32x4; #[link_name = "llvm.x86.avx.cvt.pd2dq.256"] fn vcvtpd2dq(a: __m256d) -> i32x4; #[link_name = "llvm.x86.avx.cvtt.ps2dq.256"] fn vcvttps2dq(a: __m256) -> i32x8; #[link_name = "llvm.x86.avx.vzeroall"] fn vzeroall(); #[link_name = "llvm.x86.avx.vzeroupper"] fn vzeroupper(); #[link_name = "llvm.x86.avx.vpermilvar.ps.256"] fn vpermilps256(a: __m256, b: i32x8) -> __m256; #[link_name = "llvm.x86.avx.vpermilvar.ps"] fn vpermilps(a: __m128, b: i32x4) -> __m128; #[link_name = "llvm.x86.avx.vpermilvar.pd.256"] fn vpermilpd256(a: __m256d, b: i64x4) -> __m256d; #[link_name = "llvm.x86.avx.vpermilvar.pd"] fn vpermilpd(a: __m128d, b: i64x2) -> __m128d; #[link_name = "llvm.x86.avx.vperm2f128.ps.256"] fn vperm2f128ps256(a: __m256, b: __m256, imm8: i8) -> __m256; #[link_name = "llvm.x86.avx.vperm2f128.pd.256"] fn vperm2f128pd256(a: __m256d, b: __m256d, imm8: i8) -> __m256d; #[link_name = "llvm.x86.avx.vperm2f128.si.256"] fn vperm2f128si256(a: i32x8, b: i32x8, imm8: i8) -> i32x8; #[link_name = "llvm.x86.avx.vbroadcastf128.ps.256"] fn vbroadcastf128ps256(a: &__m128) -> __m256; #[link_name = "llvm.x86.avx.vbroadcastf128.pd.256"] fn vbroadcastf128pd256(a: &__m128d) -> __m256d; #[link_name = "llvm.x86.avx.storeu.pd.256"] fn storeupd256(mem_addr: *mut f64, a: __m256d); #[link_name = "llvm.x86.avx.storeu.ps.256"] fn storeups256(mem_addr: *mut f32, a: __m256); #[link_name = "llvm.x86.avx.storeu.dq.256"] fn storeudq256(mem_addr: *mut i8, a: i8x32); #[link_name = "llvm.x86.avx.maskload.pd.256"] fn maskloadpd256(mem_addr: *const i8, mask: i64x4) -> __m256d; #[link_name = "llvm.x86.avx.maskstore.pd.256"] fn maskstorepd256(mem_addr: *mut i8, mask: i64x4, a: __m256d); #[link_name = "llvm.x86.avx.maskload.pd"] fn maskloadpd(mem_addr: *const i8, mask: i64x2) -> __m128d; #[link_name = "llvm.x86.avx.maskstore.pd"] fn maskstorepd(mem_addr: *mut i8, mask: i64x2, a: __m128d); #[link_name = "llvm.x86.avx.maskload.ps.256"] fn maskloadps256(mem_addr: *const i8, mask: i32x8) -> __m256; #[link_name = "llvm.x86.avx.maskstore.ps.256"] fn maskstoreps256(mem_addr: *mut i8, mask: i32x8, a: __m256); #[link_name = "llvm.x86.avx.maskload.ps"] fn maskloadps(mem_addr: *const i8, mask: i32x4) -> __m128; #[link_name = "llvm.x86.avx.maskstore.ps"] fn maskstoreps(mem_addr: *mut i8, mask: i32x4, a: __m128); #[link_name = "llvm.x86.avx.ldu.dq.256"] fn vlddqu(mem_addr: *const i8) -> i8x32; #[link_name = "llvm.x86.avx.rcp.ps.256"] fn vrcpps(a: __m256) -> __m256; #[link_name = "llvm.x86.avx.rsqrt.ps.256"] fn vrsqrtps(a: __m256) -> __m256; #[link_name = "llvm.x86.avx.ptestz.256"] fn ptestz256(a: i64x4, b: i64x4) -> i32; #[link_name = "llvm.x86.avx.ptestc.256"] fn ptestc256(a: i64x4, b: i64x4) -> i32; #[link_name = "llvm.x86.avx.ptestnzc.256"] fn ptestnzc256(a: i64x4, b: i64x4) -> i32; #[link_name = "llvm.x86.avx.vtestz.pd.256"] fn vtestzpd256(a: __m256d, b: __m256d) -> i32; #[link_name = "llvm.x86.avx.vtestc.pd.256"] fn vtestcpd256(a: __m256d, b: __m256d) -> i32; #[link_name = "llvm.x86.avx.vtestnzc.pd.256"] fn vtestnzcpd256(a: __m256d, b: __m256d) -> i32; #[link_name = "llvm.x86.avx.vtestz.pd"] fn vtestzpd(a: __m128d, b: __m128d) -> i32; #[link_name = "llvm.x86.avx.vtestc.pd"] fn vtestcpd(a: __m128d, b: __m128d) -> i32; #[link_name = "llvm.x86.avx.vtestnzc.pd"] fn vtestnzcpd(a: __m128d, b: __m128d) -> i32; #[link_name = "llvm.x86.avx.vtestz.ps.256"] fn vtestzps256(a: __m256, b: __m256) -> i32; #[link_name = "llvm.x86.avx.vtestc.ps.256"] fn vtestcps256(a: __m256, b: __m256) -> i32; #[link_name = "llvm.x86.avx.vtestnzc.ps.256"] fn vtestnzcps256(a: __m256, b: __m256) -> i32; #[link_name = "llvm.x86.avx.vtestz.ps"] fn vtestzps(a: __m128, b: __m128) -> i32; #[link_name = "llvm.x86.avx.vtestc.ps"] fn vtestcps(a: __m128, b: __m128) -> i32; #[link_name = "llvm.x86.avx.vtestnzc.ps"] fn vtestnzcps(a: __m128, b: __m128) -> i32; #[link_name = "llvm.x86.avx.movmsk.pd.256"] fn movmskpd256(a: __m256d) -> i32; #[link_name = "llvm.x86.avx.movmsk.ps.256"] fn movmskps256(a: __m256) -> i32; } #[cfg(test)] mod tests { use crate::hint::black_box; use stdarch_test::simd_test; use crate::core_arch::x86::*; #[simd_test(enable = "avx")] unsafe fn test_mm256_add_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_add_pd(a, b); let e = _mm256_setr_pd(6., 8., 10., 12.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_add_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_add_ps(a, b); let e = _mm256_setr_ps(10., 12., 14., 16., 18., 20., 22., 24.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_and_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set1_pd(0.6); let r = _mm256_and_pd(a, b); let e = _mm256_set1_pd(0.5); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_and_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set1_ps(0.6); let r = _mm256_and_ps(a, b); let e = _mm256_set1_ps(0.5); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_or_pd() { let a = _mm256_set1_pd(1.); let b = _mm256_set1_pd(0.6); let r = _mm256_or_pd(a, b); let e = _mm256_set1_pd(1.2); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_or_ps() { let a = _mm256_set1_ps(1.); let b = _mm256_set1_ps(0.6); let r = _mm256_or_ps(a, b); let e = _mm256_set1_ps(1.2); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_shuffle_pd() { let a = _mm256_setr_pd(1., 4., 5., 8.); let b = _mm256_setr_pd(2., 3., 6., 7.); let r = _mm256_shuffle_pd::<0b11_11_11_11>(a, b); let e = _mm256_setr_pd(4., 3., 8., 7.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_shuffle_ps() { let a = _mm256_setr_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_setr_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_shuffle_ps::<0b00_00_11_11>(a, b); let e = _mm256_setr_ps(8., 8., 2., 2., 16., 16., 10., 10.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_andnot_pd() { let a = _mm256_set1_pd(0.); let b = _mm256_set1_pd(0.6); let r = _mm256_andnot_pd(a, b); assert_eq_m256d(r, b); } #[simd_test(enable = "avx")] unsafe fn test_mm256_andnot_ps() { let a = _mm256_set1_ps(0.); let b = _mm256_set1_ps(0.6); let r = _mm256_andnot_ps(a, b); assert_eq_m256(r, b); } #[simd_test(enable = "avx")] unsafe fn test_mm256_max_pd() { let a = _mm256_setr_pd(1., 4., 5., 8.); let b = _mm256_setr_pd(2., 3., 6., 7.); let r = _mm256_max_pd(a, b); let e = _mm256_setr_pd(2., 4., 6., 8.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_max_ps() { let a = _mm256_setr_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_setr_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_max_ps(a, b); let e = _mm256_setr_ps(2., 4., 6., 8., 10., 12., 14., 16.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_min_pd() { let a = _mm256_setr_pd(1., 4., 5., 8.); let b = _mm256_setr_pd(2., 3., 6., 7.); let r = _mm256_min_pd(a, b); let e = _mm256_setr_pd(1., 3., 5., 7.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_min_ps() { let a = _mm256_setr_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_setr_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_min_ps(a, b); let e = _mm256_setr_ps(1., 3., 5., 7., 9., 11., 13., 15.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_mul_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_mul_pd(a, b); let e = _mm256_setr_pd(5., 12., 21., 32.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_mul_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_mul_ps(a, b); let e = _mm256_setr_ps(9., 20., 33., 48., 65., 84., 105., 128.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_addsub_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_addsub_pd(a, b); let e = _mm256_setr_pd(-4., 8., -4., 12.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_addsub_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 1., 2., 3., 4.); let b = _mm256_setr_ps(5., 6., 7., 8., 5., 6., 7., 8.); let r = _mm256_addsub_ps(a, b); let e = _mm256_setr_ps(-4., 8., -4., 12., -4., 8., -4., 12.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_sub_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_sub_pd(a, b); let e = _mm256_setr_pd(-4., -4., -4., -4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_sub_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., -1., -2., -3., -4.); let b = _mm256_setr_ps(5., 6., 7., 8., 3., 2., 1., 0.); let r = _mm256_sub_ps(a, b); let e = _mm256_setr_ps(-4., -4., -4., -4., -4., -4., -4., -4.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_round_pd() { let a = _mm256_setr_pd(1.55, 2.2, 3.99, -1.2); let result_closest = _mm256_round_pd::<0b0000>(a); let result_down = _mm256_round_pd::<0b0001>(a); let result_up = _mm256_round_pd::<0b0010>(a); let expected_closest = _mm256_setr_pd(2., 2., 4., -1.); let expected_down = _mm256_setr_pd(1., 2., 3., -2.); let expected_up = _mm256_setr_pd(2., 3., 4., -1.); assert_eq_m256d(result_closest, expected_closest); assert_eq_m256d(result_down, expected_down); assert_eq_m256d(result_up, expected_up); } #[simd_test(enable = "avx")] unsafe fn test_mm256_floor_pd() { let a = _mm256_setr_pd(1.55, 2.2, 3.99, -1.2); let result_down = _mm256_floor_pd(a); let expected_down = _mm256_setr_pd(1., 2., 3., -2.); assert_eq_m256d(result_down, expected_down); } #[simd_test(enable = "avx")] unsafe fn test_mm256_ceil_pd() { let a = _mm256_setr_pd(1.55, 2.2, 3.99, -1.2); let result_up = _mm256_ceil_pd(a); let expected_up = _mm256_setr_pd(2., 3., 4., -1.); assert_eq_m256d(result_up, expected_up); } #[simd_test(enable = "avx")] unsafe fn test_mm256_round_ps() { let a = _mm256_setr_ps(1.55, 2.2, 3.99, -1.2, 1.55, 2.2, 3.99, -1.2); let result_closest = _mm256_round_ps::<0b0000>(a); let result_down = _mm256_round_ps::<0b0001>(a); let result_up = _mm256_round_ps::<0b0010>(a); let expected_closest = _mm256_setr_ps(2., 2., 4., -1., 2., 2., 4., -1.); let expected_down = _mm256_setr_ps(1., 2., 3., -2., 1., 2., 3., -2.); let expected_up = _mm256_setr_ps(2., 3., 4., -1., 2., 3., 4., -1.); assert_eq_m256(result_closest, expected_closest); assert_eq_m256(result_down, expected_down); assert_eq_m256(result_up, expected_up); } #[simd_test(enable = "avx")] unsafe fn test_mm256_floor_ps() { let a = _mm256_setr_ps(1.55, 2.2, 3.99, -1.2, 1.55, 2.2, 3.99, -1.2); let result_down = _mm256_floor_ps(a); let expected_down = _mm256_setr_ps(1., 2., 3., -2., 1., 2., 3., -2.); assert_eq_m256(result_down, expected_down); } #[simd_test(enable = "avx")] unsafe fn test_mm256_ceil_ps() { let a = _mm256_setr_ps(1.55, 2.2, 3.99, -1.2, 1.55, 2.2, 3.99, -1.2); let result_up = _mm256_ceil_ps(a); let expected_up = _mm256_setr_ps(2., 3., 4., -1., 2., 3., 4., -1.); assert_eq_m256(result_up, expected_up); } #[simd_test(enable = "avx")] unsafe fn test_mm256_sqrt_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let r = _mm256_sqrt_pd(a); let e = _mm256_setr_pd(2., 3., 4., 5.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_sqrt_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let r = _mm256_sqrt_ps(a); let e = _mm256_setr_ps(2., 3., 4., 5., 2., 3., 4., 5.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_div_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_div_ps(a, b); let e = _mm256_setr_ps(1., 3., 8., 5., 0.5, 1., 0.25, 0.5); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_div_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_div_pd(a, b); let e = _mm256_setr_pd(1., 3., 8., 5.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_blend_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_blend_pd::<0x0>(a, b); assert_eq_m256d(r, _mm256_setr_pd(4., 9., 16., 25.)); let r = _mm256_blend_pd::<0x3>(a, b); assert_eq_m256d(r, _mm256_setr_pd(4., 3., 16., 25.)); let r = _mm256_blend_pd::<0xF>(a, b); assert_eq_m256d(r, _mm256_setr_pd(4., 3., 2., 5.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_blend_ps() { let a = _mm256_setr_ps(1., 4., 5., 8., 9., 12., 13., 16.); let b = _mm256_setr_ps(2., 3., 6., 7., 10., 11., 14., 15.); let r = _mm256_blend_ps::<0x0>(a, b); assert_eq_m256(r, _mm256_setr_ps(1., 4., 5., 8., 9., 12., 13., 16.)); let r = _mm256_blend_ps::<0x3>(a, b); assert_eq_m256(r, _mm256_setr_ps(2., 3., 5., 8., 9., 12., 13., 16.)); let r = _mm256_blend_ps::<0xF>(a, b); assert_eq_m256(r, _mm256_setr_ps(2., 3., 6., 7., 9., 12., 13., 16.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_blendv_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let c = _mm256_setr_pd(0., 0., !0 as f64, !0 as f64); let r = _mm256_blendv_pd(a, b, c); let e = _mm256_setr_pd(4., 9., 2., 5.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_blendv_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); #[rustfmt::skip] let c = _mm256_setr_ps( 0., 0., 0., 0., !0 as f32, !0 as f32, !0 as f32, !0 as f32, ); let r = _mm256_blendv_ps(a, b, c); let e = _mm256_setr_ps(4., 9., 16., 25., 8., 9., 64., 50.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_dp_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_dp_ps::<0xFF>(a, b); let e = _mm256_setr_ps(200., 200., 200., 200., 2387., 2387., 2387., 2387.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_hadd_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_hadd_pd(a, b); let e = _mm256_setr_pd(13., 7., 41., 7.); assert_eq_m256d(r, e); let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_hadd_pd(a, b); let e = _mm256_setr_pd(3., 11., 7., 15.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_hadd_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_hadd_ps(a, b); let e = _mm256_setr_ps(13., 41., 7., 7., 13., 41., 17., 114.); assert_eq_m256(r, e); let a = _mm256_setr_ps(1., 2., 3., 4., 1., 2., 3., 4.); let b = _mm256_setr_ps(5., 6., 7., 8., 5., 6., 7., 8.); let r = _mm256_hadd_ps(a, b); let e = _mm256_setr_ps(3., 7., 11., 15., 3., 7., 11., 15.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_hsub_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_hsub_pd(a, b); let e = _mm256_setr_pd(-5., 1., -9., -3.); assert_eq_m256d(r, e); let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_hsub_pd(a, b); let e = _mm256_setr_pd(-1., -1., -1., -1.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_hsub_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_hsub_ps(a, b); let e = _mm256_setr_ps(-5., -9., 1., -3., -5., -9., -1., 14.); assert_eq_m256(r, e); let a = _mm256_setr_ps(1., 2., 3., 4., 1., 2., 3., 4.); let b = _mm256_setr_ps(5., 6., 7., 8., 5., 6., 7., 8.); let r = _mm256_hsub_ps(a, b); let e = _mm256_setr_ps(-1., -1., -1., -1., -1., -1., -1., -1.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_xor_pd() { let a = _mm256_setr_pd(4., 9., 16., 25.); let b = _mm256_set1_pd(0.); let r = _mm256_xor_pd(a, b); assert_eq_m256d(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_xor_ps() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let b = _mm256_set1_ps(0.); let r = _mm256_xor_ps(a, b); assert_eq_m256(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm_cmp_pd() { let a = _mm_setr_pd(4., 9.); let b = _mm_setr_pd(4., 3.); let r = _mm_cmp_pd::<_CMP_GE_OS>(a, b); assert!(get_m128d(r, 0).is_nan()); assert!(get_m128d(r, 1).is_nan()); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cmp_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_cmp_pd::<_CMP_GE_OS>(a, b); let e = _mm256_set1_pd(0.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_cmp_ps() { let a = _mm_setr_ps(4., 3., 2., 5.); let b = _mm_setr_ps(4., 9., 16., 25.); let r = _mm_cmp_ps::<_CMP_GE_OS>(a, b); assert!(get_m128(r, 0).is_nan()); assert_eq!(get_m128(r, 1), 0.); assert_eq!(get_m128(r, 2), 0.); assert_eq!(get_m128(r, 3), 0.); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cmp_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 1., 2., 3., 4.); let b = _mm256_setr_ps(5., 6., 7., 8., 5., 6., 7., 8.); let r = _mm256_cmp_ps::<_CMP_GE_OS>(a, b); let e = _mm256_set1_ps(0.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_cmp_sd() { let a = _mm_setr_pd(4., 9.); let b = _mm_setr_pd(4., 3.); let r = _mm_cmp_sd::<_CMP_GE_OS>(a, b); assert!(get_m128d(r, 0).is_nan()); assert_eq!(get_m128d(r, 1), 9.); } #[simd_test(enable = "avx")] unsafe fn test_mm_cmp_ss() { let a = _mm_setr_ps(4., 3., 2., 5.); let b = _mm_setr_ps(4., 9., 16., 25.); let r = _mm_cmp_ss::<_CMP_GE_OS>(a, b); assert!(get_m128(r, 0).is_nan()); assert_eq!(get_m128(r, 1), 3.); assert_eq!(get_m128(r, 2), 2.); assert_eq!(get_m128(r, 3), 5.); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvtepi32_pd() { let a = _mm_setr_epi32(4, 9, 16, 25); let r = _mm256_cvtepi32_pd(a); let e = _mm256_setr_pd(4., 9., 16., 25.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvtepi32_ps() { let a = _mm256_setr_epi32(4, 9, 16, 25, 4, 9, 16, 25); let r = _mm256_cvtepi32_ps(a); let e = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvtpd_ps() { let a = _mm256_setr_pd(4., 9., 16., 25.); let r = _mm256_cvtpd_ps(a); let e = _mm_setr_ps(4., 9., 16., 25.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvtps_epi32() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let r = _mm256_cvtps_epi32(a); let e = _mm256_setr_epi32(4, 9, 16, 25, 4, 9, 16, 25); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvtps_pd() { let a = _mm_setr_ps(4., 9., 16., 25.); let r = _mm256_cvtps_pd(a); let e = _mm256_setr_pd(4., 9., 16., 25.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvttpd_epi32() { let a = _mm256_setr_pd(4., 9., 16., 25.); let r = _mm256_cvttpd_epi32(a); let e = _mm_setr_epi32(4, 9, 16, 25); assert_eq_m128i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvtpd_epi32() { let a = _mm256_setr_pd(4., 9., 16., 25.); let r = _mm256_cvtpd_epi32(a); let e = _mm_setr_epi32(4, 9, 16, 25); assert_eq_m128i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvttps_epi32() { let a = _mm256_setr_ps(4., 9., 16., 25., 4., 9., 16., 25.); let r = _mm256_cvttps_epi32(a); let e = _mm256_setr_epi32(4, 9, 16, 25, 4, 9, 16, 25); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_extractf128_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_extractf128_ps::<0>(a); let e = _mm_setr_ps(4., 3., 2., 5.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_extractf128_pd() { let a = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_extractf128_pd::<0>(a); let e = _mm_setr_pd(4., 3.); assert_eq_m128d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_extractf128_si256() { let a = _mm256_setr_epi64x(4, 3, 2, 5); let r = _mm256_extractf128_si256::<0>(a); let e = _mm_setr_epi64x(4, 3); assert_eq_m128i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_zeroall() { _mm256_zeroall(); } #[simd_test(enable = "avx")] unsafe fn test_mm256_zeroupper() { _mm256_zeroupper(); } #[simd_test(enable = "avx")] unsafe fn test_mm256_permutevar_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let b = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_permutevar_ps(a, b); let e = _mm256_setr_ps(3., 2., 5., 4., 9., 64., 50., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_permutevar_ps() { let a = _mm_setr_ps(4., 3., 2., 5.); let b = _mm_setr_epi32(1, 2, 3, 4); let r = _mm_permutevar_ps(a, b); let e = _mm_setr_ps(3., 2., 5., 4.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_permute_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let r = _mm256_permute_ps::<0x1b>(a); let e = _mm256_setr_ps(5., 2., 3., 4., 50., 64., 9., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_permute_ps() { let a = _mm_setr_ps(4., 3., 2., 5.); let r = _mm_permute_ps::<0x1b>(a); let e = _mm_setr_ps(5., 2., 3., 4.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_permutevar_pd() { let a = _mm256_setr_pd(4., 3., 2., 5.); let b = _mm256_setr_epi64x(1, 2, 3, 4); let r = _mm256_permutevar_pd(a, b); let e = _mm256_setr_pd(4., 3., 5., 2.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_permutevar_pd() { let a = _mm_setr_pd(4., 3.); let b = _mm_setr_epi64x(3, 0); let r = _mm_permutevar_pd(a, b); let e = _mm_setr_pd(3., 4.); assert_eq_m128d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_permute_pd() { let a = _mm256_setr_pd(4., 3., 2., 5.); let r = _mm256_permute_pd::<5>(a); let e = _mm256_setr_pd(3., 4., 5., 2.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_permute_pd() { let a = _mm_setr_pd(4., 3.); let r = _mm_permute_pd::<1>(a); let e = _mm_setr_pd(3., 4.); assert_eq_m128d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_permute2f128_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 1., 2., 3., 4.); let b = _mm256_setr_ps(5., 6., 7., 8., 5., 6., 7., 8.); let r = _mm256_permute2f128_ps::<0x13>(a, b); let e = _mm256_setr_ps(5., 6., 7., 8., 1., 2., 3., 4.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_permute2f128_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_permute2f128_pd::<0x31>(a, b); let e = _mm256_setr_pd(3., 4., 7., 8.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_permute2f128_si256() { let a = _mm256_setr_epi32(1, 2, 3, 4, 1, 2, 3, 4); let b = _mm256_setr_epi32(5, 6, 7, 8, 5, 6, 7, 8); let r = _mm256_permute2f128_si256::<0x20>(a, b); let e = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_broadcast_ss() { let r = _mm256_broadcast_ss(&3.); let e = _mm256_set1_ps(3.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_broadcast_ss() { let r = _mm_broadcast_ss(&3.); let e = _mm_set1_ps(3.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_broadcast_sd() { let r = _mm256_broadcast_sd(&3.); let e = _mm256_set1_pd(3.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_broadcast_ps() { let a = _mm_setr_ps(4., 3., 2., 5.); let r = _mm256_broadcast_ps(&a); let e = _mm256_setr_ps(4., 3., 2., 5., 4., 3., 2., 5.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_broadcast_pd() { let a = _mm_setr_pd(4., 3.); let r = _mm256_broadcast_pd(&a); let e = _mm256_setr_pd(4., 3., 4., 3.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_insertf128_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let b = _mm_setr_ps(4., 9., 16., 25.); let r = _mm256_insertf128_ps::<0>(a, b); let e = _mm256_setr_ps(4., 9., 16., 25., 8., 9., 64., 50.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_insertf128_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm_setr_pd(5., 6.); let r = _mm256_insertf128_pd::<0>(a, b); let e = _mm256_setr_pd(5., 6., 3., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_insertf128_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm_setr_epi64x(5, 6); let r = _mm256_insertf128_si256::<0>(a, b); let e = _mm256_setr_epi64x(5, 6, 3, 4); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_insert_epi8() { #[rustfmt::skip] let a = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); let r = _mm256_insert_epi8::<31>(a, 0); #[rustfmt::skip] let e = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_insert_epi16() { #[rustfmt::skip] let a = _mm256_setr_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ); let r = _mm256_insert_epi16::<15>(a, 0); #[rustfmt::skip] let e = _mm256_setr_epi16( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_insert_epi32() { let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); let r = _mm256_insert_epi32::<7>(a, 0); let e = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 0); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_load_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let p = &a as *const _ as *const f64; let r = _mm256_load_pd(p); let e = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_store_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let mut r = _mm256_undefined_pd(); _mm256_store_pd(&mut r as *mut _ as *mut f64, a); assert_eq_m256d(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_load_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let p = &a as *const _ as *const f32; let r = _mm256_load_ps(p); let e = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_store_ps() { let a = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); let mut r = _mm256_undefined_ps(); _mm256_store_ps(&mut r as *mut _ as *mut f32, a); assert_eq_m256(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_loadu_pd() { let a = &[1.0f64, 2., 3., 4.]; let p = a.as_ptr(); let r = _mm256_loadu_pd(black_box(p)); let e = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_storeu_pd() { let a = _mm256_set1_pd(9.); let mut r = _mm256_undefined_pd(); _mm256_storeu_pd(&mut r as *mut _ as *mut f64, a); assert_eq_m256d(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_loadu_ps() { let a = &[4., 3., 2., 5., 8., 9., 64., 50.]; let p = a.as_ptr(); let r = _mm256_loadu_ps(black_box(p)); let e = _mm256_setr_ps(4., 3., 2., 5., 8., 9., 64., 50.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_storeu_ps() { let a = _mm256_set1_ps(9.); let mut r = _mm256_undefined_ps(); _mm256_storeu_ps(&mut r as *mut _ as *mut f32, a); assert_eq_m256(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_load_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let p = &a as *const _; let r = _mm256_load_si256(p); let e = _mm256_setr_epi64x(1, 2, 3, 4); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_store_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let mut r = _mm256_undefined_si256(); _mm256_store_si256(&mut r as *mut _, a); assert_eq_m256i(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_loadu_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let p = &a as *const _; let r = _mm256_loadu_si256(black_box(p)); let e = _mm256_setr_epi64x(1, 2, 3, 4); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_storeu_si256() { let a = _mm256_set1_epi8(9); let mut r = _mm256_undefined_si256(); _mm256_storeu_si256(&mut r as *mut _, a); assert_eq_m256i(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_maskload_pd() { let a = &[1.0f64, 2., 3., 4.]; let p = a.as_ptr(); let mask = _mm256_setr_epi64x(0, !0, 0, !0); let r = _mm256_maskload_pd(black_box(p), mask); let e = _mm256_setr_pd(0., 2., 0., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_maskstore_pd() { let mut r = _mm256_set1_pd(0.); let mask = _mm256_setr_epi64x(0, !0, 0, !0); let a = _mm256_setr_pd(1., 2., 3., 4.); _mm256_maskstore_pd(&mut r as *mut _ as *mut f64, mask, a); let e = _mm256_setr_pd(0., 2., 0., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_maskload_pd() { let a = &[1.0f64, 2.]; let p = a.as_ptr(); let mask = _mm_setr_epi64x(0, !0); let r = _mm_maskload_pd(black_box(p), mask); let e = _mm_setr_pd(0., 2.); assert_eq_m128d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_maskstore_pd() { let mut r = _mm_set1_pd(0.); let mask = _mm_setr_epi64x(0, !0); let a = _mm_setr_pd(1., 2.); _mm_maskstore_pd(&mut r as *mut _ as *mut f64, mask, a); let e = _mm_setr_pd(0., 2.); assert_eq_m128d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_maskload_ps() { let a = &[1.0f32, 2., 3., 4., 5., 6., 7., 8.]; let p = a.as_ptr(); let mask = _mm256_setr_epi32(0, !0, 0, !0, 0, !0, 0, !0); let r = _mm256_maskload_ps(black_box(p), mask); let e = _mm256_setr_ps(0., 2., 0., 4., 0., 6., 0., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_maskstore_ps() { let mut r = _mm256_set1_ps(0.); let mask = _mm256_setr_epi32(0, !0, 0, !0, 0, !0, 0, !0); let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); _mm256_maskstore_ps(&mut r as *mut _ as *mut f32, mask, a); let e = _mm256_setr_ps(0., 2., 0., 4., 0., 6., 0., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_maskload_ps() { let a = &[1.0f32, 2., 3., 4.]; let p = a.as_ptr(); let mask = _mm_setr_epi32(0, !0, 0, !0); let r = _mm_maskload_ps(black_box(p), mask); let e = _mm_setr_ps(0., 2., 0., 4.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm_maskstore_ps() { let mut r = _mm_set1_ps(0.); let mask = _mm_setr_epi32(0, !0, 0, !0); let a = _mm_setr_ps(1., 2., 3., 4.); _mm_maskstore_ps(&mut r as *mut _ as *mut f32, mask, a); let e = _mm_setr_ps(0., 2., 0., 4.); assert_eq_m128(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_movehdup_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_movehdup_ps(a); let e = _mm256_setr_ps(2., 2., 4., 4., 6., 6., 8., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_moveldup_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_moveldup_ps(a); let e = _mm256_setr_ps(1., 1., 3., 3., 5., 5., 7., 7.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_movedup_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_movedup_pd(a); let e = _mm256_setr_pd(1., 1., 3., 3.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_lddqu_si256() { #[rustfmt::skip] let a = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); let p = &a as *const _; let r = _mm256_lddqu_si256(black_box(p)); #[rustfmt::skip] let e = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_stream_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let mut r = _mm256_undefined_si256(); _mm256_stream_si256(&mut r as *mut _, a); assert_eq_m256i(r, a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_stream_pd() { #[repr(align(32))] struct Memory { pub data: [f64; 4], } let a = _mm256_set1_pd(7.0); let mut mem = Memory { data: [-1.0; 4] }; _mm256_stream_pd(&mut mem.data[0] as *mut f64, a); for i in 0..4 { assert_eq!(mem.data[i], get_m256d(a, i)); } } #[simd_test(enable = "avx")] unsafe fn test_mm256_stream_ps() { #[repr(align(32))] struct Memory { pub data: [f32; 8], } let a = _mm256_set1_ps(7.0); let mut mem = Memory { data: [-1.0; 8] }; _mm256_stream_ps(&mut mem.data[0] as *mut f32, a); for i in 0..8 { assert_eq!(mem.data[i], get_m256(a, i)); } } #[simd_test(enable = "avx")] unsafe fn test_mm256_rcp_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_rcp_ps(a); #[rustfmt::skip] let e = _mm256_setr_ps( 0.99975586, 0.49987793, 0.33325195, 0.24993896, 0.19995117, 0.16662598, 0.14282227, 0.12496948, ); let rel_err = 0.00048828125; for i in 0..8 { assert_approx_eq!(get_m256(r, i), get_m256(e, i), 2. * rel_err); } } #[simd_test(enable = "avx")] unsafe fn test_mm256_rsqrt_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_rsqrt_ps(a); #[rustfmt::skip] let e = _mm256_setr_ps( 0.99975586, 0.7069092, 0.5772705, 0.49987793, 0.44714355, 0.40820313, 0.3779297, 0.3534546, ); let rel_err = 0.00048828125; for i in 0..8 { assert_approx_eq!(get_m256(r, i), get_m256(e, i), 2. * rel_err); } } #[simd_test(enable = "avx")] unsafe fn test_mm256_unpackhi_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_unpackhi_pd(a, b); let e = _mm256_setr_pd(2., 6., 4., 8.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_unpackhi_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_unpackhi_ps(a, b); let e = _mm256_setr_ps(3., 11., 4., 12., 7., 15., 8., 16.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_unpacklo_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_unpacklo_pd(a, b); let e = _mm256_setr_pd(1., 5., 3., 7.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_unpacklo_ps() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let b = _mm256_setr_ps(9., 10., 11., 12., 13., 14., 15., 16.); let r = _mm256_unpacklo_ps(a, b); let e = _mm256_setr_ps(1., 9., 2., 10., 5., 13., 6., 14.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testz_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm256_setr_epi64x(5, 6, 7, 8); let r = _mm256_testz_si256(a, b); assert_eq!(r, 0); let b = _mm256_set1_epi64x(0); let r = _mm256_testz_si256(a, b); assert_eq!(r, 1); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testc_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm256_setr_epi64x(5, 6, 7, 8); let r = _mm256_testc_si256(a, b); assert_eq!(r, 0); let b = _mm256_set1_epi64x(0); let r = _mm256_testc_si256(a, b); assert_eq!(r, 1); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testnzc_si256() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let b = _mm256_setr_epi64x(5, 6, 7, 8); let r = _mm256_testnzc_si256(a, b); assert_eq!(r, 1); let a = _mm256_setr_epi64x(0, 0, 0, 0); let b = _mm256_setr_epi64x(0, 0, 0, 0); let r = _mm256_testnzc_si256(a, b); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testz_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_testz_pd(a, b); assert_eq!(r, 1); let a = _mm256_set1_pd(-1.); let r = _mm256_testz_pd(a, a); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testc_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_testc_pd(a, b); assert_eq!(r, 1); let a = _mm256_set1_pd(1.); let b = _mm256_set1_pd(-1.); let r = _mm256_testc_pd(a, b); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testnzc_pd() { let a = _mm256_setr_pd(1., 2., 3., 4.); let b = _mm256_setr_pd(5., 6., 7., 8.); let r = _mm256_testnzc_pd(a, b); assert_eq!(r, 0); let a = _mm256_setr_pd(1., -1., -1., -1.); let b = _mm256_setr_pd(-1., -1., 1., 1.); let r = _mm256_testnzc_pd(a, b); assert_eq!(r, 1); } #[simd_test(enable = "avx")] unsafe fn test_mm_testz_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 6.); let r = _mm_testz_pd(a, b); assert_eq!(r, 1); let a = _mm_set1_pd(-1.); let r = _mm_testz_pd(a, a); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm_testc_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 6.); let r = _mm_testc_pd(a, b); assert_eq!(r, 1); let a = _mm_set1_pd(1.); let b = _mm_set1_pd(-1.); let r = _mm_testc_pd(a, b); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm_testnzc_pd() { let a = _mm_setr_pd(1., 2.); let b = _mm_setr_pd(5., 6.); let r = _mm_testnzc_pd(a, b); assert_eq!(r, 0); let a = _mm_setr_pd(1., -1.); let b = _mm_setr_pd(-1., -1.); let r = _mm_testnzc_pd(a, b); assert_eq!(r, 1); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testz_ps() { let a = _mm256_set1_ps(1.); let r = _mm256_testz_ps(a, a); assert_eq!(r, 1); let a = _mm256_set1_ps(-1.); let r = _mm256_testz_ps(a, a); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testc_ps() { let a = _mm256_set1_ps(1.); let r = _mm256_testc_ps(a, a); assert_eq!(r, 1); let b = _mm256_set1_ps(-1.); let r = _mm256_testc_ps(a, b); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm256_testnzc_ps() { let a = _mm256_set1_ps(1.); let r = _mm256_testnzc_ps(a, a); assert_eq!(r, 0); let a = _mm256_setr_ps(1., -1., -1., -1., -1., -1., -1., -1.); let b = _mm256_setr_ps(-1., -1., 1., 1., 1., 1., 1., 1.); let r = _mm256_testnzc_ps(a, b); assert_eq!(r, 1); } #[simd_test(enable = "avx")] unsafe fn test_mm_testz_ps() { let a = _mm_set1_ps(1.); let r = _mm_testz_ps(a, a); assert_eq!(r, 1); let a = _mm_set1_ps(-1.); let r = _mm_testz_ps(a, a); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm_testc_ps() { let a = _mm_set1_ps(1.); let r = _mm_testc_ps(a, a); assert_eq!(r, 1); let b = _mm_set1_ps(-1.); let r = _mm_testc_ps(a, b); assert_eq!(r, 0); } #[simd_test(enable = "avx")] unsafe fn test_mm_testnzc_ps() { let a = _mm_set1_ps(1.); let r = _mm_testnzc_ps(a, a); assert_eq!(r, 0); let a = _mm_setr_ps(1., -1., -1., -1.); let b = _mm_setr_ps(-1., -1., 1., 1.); let r = _mm_testnzc_ps(a, b); assert_eq!(r, 1); } #[simd_test(enable = "avx")] unsafe fn test_mm256_movemask_pd() { let a = _mm256_setr_pd(1., -2., 3., -4.); let r = _mm256_movemask_pd(a); assert_eq!(r, 0xA); } #[simd_test(enable = "avx")] unsafe fn test_mm256_movemask_ps() { let a = _mm256_setr_ps(1., -2., 3., -4., 1., -2., 3., -4.); let r = _mm256_movemask_ps(a); assert_eq!(r, 0xAA); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setzero_pd() { let r = _mm256_setzero_pd(); assert_eq_m256d(r, _mm256_set1_pd(0.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setzero_ps() { let r = _mm256_setzero_ps(); assert_eq_m256(r, _mm256_set1_ps(0.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setzero_si256() { let r = _mm256_setzero_si256(); assert_eq_m256i(r, _mm256_set1_epi8(0)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_pd() { let r = _mm256_set_pd(1., 2., 3., 4.); assert_eq_m256d(r, _mm256_setr_pd(4., 3., 2., 1.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_ps() { let r = _mm256_set_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, _mm256_setr_ps(8., 7., 6., 5., 4., 3., 2., 1.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_epi8() { #[rustfmt::skip] let r = _mm256_set_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); #[rustfmt::skip] let e = _mm256_setr_epi8( 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_epi16() { #[rustfmt::skip] let r = _mm256_set_epi16( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ); #[rustfmt::skip] let e = _mm256_setr_epi16( 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_epi32() { let r = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); assert_eq_m256i(r, _mm256_setr_epi32(8, 7, 6, 5, 4, 3, 2, 1)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_epi64x()
#[simd_test(enable = "avx")] unsafe fn test_mm256_setr_pd() { let r = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, _mm256_setr_pd(1., 2., 3., 4.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_ps() { let r = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_epi8() { #[rustfmt::skip] let r = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); #[rustfmt::skip] let e = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_epi16() { #[rustfmt::skip] let r = _mm256_setr_epi16( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ); #[rustfmt::skip] let e = _mm256_setr_epi16( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_epi32() { let r = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8); assert_eq_m256i(r, _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_epi64x() { let r = _mm256_setr_epi64x(1, 2, 3, 4); assert_eq_m256i(r, _mm256_setr_epi64x(1, 2, 3, 4)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set1_pd() { let r = _mm256_set1_pd(1.); assert_eq_m256d(r, _mm256_set1_pd(1.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set1_ps() { let r = _mm256_set1_ps(1.); assert_eq_m256(r, _mm256_set1_ps(1.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set1_epi8() { let r = _mm256_set1_epi8(1); assert_eq_m256i(r, _mm256_set1_epi8(1)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set1_epi16() { let r = _mm256_set1_epi16(1); assert_eq_m256i(r, _mm256_set1_epi16(1)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set1_epi32() { let r = _mm256_set1_epi32(1); assert_eq_m256i(r, _mm256_set1_epi32(1)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set1_epi64x() { let r = _mm256_set1_epi64x(1); assert_eq_m256i(r, _mm256_set1_epi64x(1)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castpd_ps() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_castpd_ps(a); let e = _mm256_setr_ps(0., 1.875, 0., 2., 0., 2.125, 0., 2.25); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castps_pd() { let a = _mm256_setr_ps(0., 1.875, 0., 2., 0., 2.125, 0., 2.25); let r = _mm256_castps_pd(a); let e = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castps_si256() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_castps_si256(a); #[rustfmt::skip] let e = _mm256_setr_epi8( 0, 0, -128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, -128, 64, 0, 0, -96, 64, 0, 0, -64, 64, 0, 0, -32, 64, 0, 0, 0, 65, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castsi256_ps() { #[rustfmt::skip] let a = _mm256_setr_epi8( 0, 0, -128, 63, 0, 0, 0, 64, 0, 0, 64, 64, 0, 0, -128, 64, 0, 0, -96, 64, 0, 0, -64, 64, 0, 0, -32, 64, 0, 0, 0, 65, ); let r = _mm256_castsi256_ps(a); let e = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castpd_si256() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_castpd_si256(a); assert_eq_m256d(transmute(r), a); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castsi256_pd() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let r = _mm256_castsi256_pd(a); assert_eq_m256d(r, transmute(a)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castps256_ps128() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_castps256_ps128(a); assert_eq_m128(r, _mm_setr_ps(1., 2., 3., 4.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castpd256_pd128() { let a = _mm256_setr_pd(1., 2., 3., 4.); let r = _mm256_castpd256_pd128(a); assert_eq_m128d(r, _mm_setr_pd(1., 2.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_castsi256_si128() { let a = _mm256_setr_epi64x(1, 2, 3, 4); let r = _mm256_castsi256_si128(a); assert_eq_m128i(r, _mm_setr_epi64x(1, 2)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_zextps128_ps256() { let a = _mm_setr_ps(1., 2., 3., 4.); let r = _mm256_zextps128_ps256(a); let e = _mm256_setr_ps(1., 2., 3., 4., 0., 0., 0., 0.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_zextsi128_si256() { let a = _mm_setr_epi64x(1, 2); let r = _mm256_zextsi128_si256(a); let e = _mm256_setr_epi64x(1, 2, 0, 0); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_zextpd128_pd256() { let a = _mm_setr_pd(1., 2.); let r = _mm256_zextpd128_pd256(a); let e = _mm256_setr_pd(1., 2., 0., 0.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_m128() { let hi = _mm_setr_ps(5., 6., 7., 8.); let lo = _mm_setr_ps(1., 2., 3., 4.); let r = _mm256_set_m128(hi, lo); let e = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_m128d() { let hi = _mm_setr_pd(3., 4.); let lo = _mm_setr_pd(1., 2.); let r = _mm256_set_m128d(hi, lo); let e = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_set_m128i() { #[rustfmt::skip] let hi = _mm_setr_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); #[rustfmt::skip] let lo = _mm_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ); let r = _mm256_set_m128i(hi, lo); #[rustfmt::skip] let e = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_m128() { let lo = _mm_setr_ps(1., 2., 3., 4.); let hi = _mm_setr_ps(5., 6., 7., 8.); let r = _mm256_setr_m128(lo, hi); let e = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_m128d() { let lo = _mm_setr_pd(1., 2.); let hi = _mm_setr_pd(3., 4.); let r = _mm256_setr_m128d(lo, hi); let e = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_setr_m128i() { #[rustfmt::skip] let lo = _mm_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ); #[rustfmt::skip] let hi = _mm_setr_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); let r = _mm256_setr_m128i(lo, hi); #[rustfmt::skip] let e = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_loadu2_m128() { let hi = &[5., 6., 7., 8.]; let hiaddr = hi.as_ptr(); let lo = &[1., 2., 3., 4.]; let loaddr = lo.as_ptr(); let r = _mm256_loadu2_m128(hiaddr, loaddr); let e = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); assert_eq_m256(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_loadu2_m128d() { let hi = &[3., 4.]; let hiaddr = hi.as_ptr(); let lo = &[1., 2.]; let loaddr = lo.as_ptr(); let r = _mm256_loadu2_m128d(hiaddr, loaddr); let e = _mm256_setr_pd(1., 2., 3., 4.); assert_eq_m256d(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_loadu2_m128i() { #[rustfmt::skip] let hi = _mm_setr_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); #[rustfmt::skip] let lo = _mm_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, ); let r = _mm256_loadu2_m128i(&hi as *const _ as *const _, &lo as *const _ as *const _); #[rustfmt::skip] let e = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); assert_eq_m256i(r, e); } #[simd_test(enable = "avx")] unsafe fn test_mm256_storeu2_m128() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let mut hi = _mm_undefined_ps(); let mut lo = _mm_undefined_ps(); _mm256_storeu2_m128( &mut hi as *mut _ as *mut f32, &mut lo as *mut _ as *mut f32, a, ); assert_eq_m128(hi, _mm_setr_ps(5., 6., 7., 8.)); assert_eq_m128(lo, _mm_setr_ps(1., 2., 3., 4.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_storeu2_m128d() { let a = _mm256_setr_pd(1., 2., 3., 4.); let mut hi = _mm_undefined_pd(); let mut lo = _mm_undefined_pd(); _mm256_storeu2_m128d( &mut hi as *mut _ as *mut f64, &mut lo as *mut _ as *mut f64, a, ); assert_eq_m128d(hi, _mm_setr_pd(3., 4.)); assert_eq_m128d(lo, _mm_setr_pd(1., 2.)); } #[simd_test(enable = "avx")] unsafe fn test_mm256_storeu2_m128i() { #[rustfmt::skip] let a = _mm256_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, ); let mut hi = _mm_undefined_si128(); let mut lo = _mm_undefined_si128(); _mm256_storeu2_m128i(&mut hi as *mut _, &mut lo as *mut _, a); #[rustfmt::skip] let e_hi = _mm_setr_epi8( 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 ); #[rustfmt::skip] let e_lo = _mm_setr_epi8( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ); assert_eq_m128i(hi, e_hi); assert_eq_m128i(lo, e_lo); } #[simd_test(enable = "avx")] unsafe fn test_mm256_cvtss_f32() { let a = _mm256_setr_ps(1., 2., 3., 4., 5., 6., 7., 8.); let r = _mm256_cvtss_f32(a); assert_eq!(r, 1.); } }
{ let r = _mm256_set_epi64x(1, 2, 3, 4); assert_eq_m256i(r, _mm256_setr_epi64x(4, 3, 2, 1)); }
build.py
from __future__ import division, absolute_import, print_function import os import sys from distutils.command.build import build as old_build from distutils.util import get_platform from numpy.distutils.command.config_compiler import show_fortran_compilers class build(old_build): sub_commands = [('config_cc', lambda *args: True), ('config_fc', lambda *args: True), ('build_src', old_build.has_ext_modules), ] + old_build.sub_commands user_options = old_build.user_options + [ ('fcompiler=', None, "specify the Fortran compiler type"), ('jobs=', 'j',
('help-fcompiler', None, "list available Fortran compilers", show_fortran_compilers), ] def initialize_options(self): old_build.initialize_options(self) self.fcompiler = None self.jobs = None def finalize_options(self): if self.jobs: try: self.jobs = int(self.jobs) except ValueError: raise ValueError("--jobs/-j argument must be an integer") build_scripts = self.build_scripts old_build.finalize_options(self) plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3]) if build_scripts is None: self.build_scripts = os.path.join(self.build_base, 'scripts' + plat_specifier) def run(self): old_build.run(self)
"number of parallel jobs"), ] help_options = old_build.help_options + [
support_types.py
# Collection of supporting functions for wrapper functions __author__ = 'AndrewAnnex' from ctypes import c_char_p, c_bool, c_int, c_double, c_char, c_void_p, sizeof, \ POINTER, pointer, Array, create_string_buffer, create_unicode_buffer, cast, Structure, \ CFUNCTYPE, string_at import numpy from numpy import ctypeslib as numpc import six errorformat = """ ================================================================================ Toolkit version: {tkvsn} {short} -- {explain} {long} {traceback} ================================================================================\ """ class SpiceyError(Exception): """ SpiceyError wraps CSPICE errors. :type value: str """ def __init__(self, value): self.value = value def __str__(self): return self.value def toDoubleVector(x): return DoubleArray.from_param(param=x) def toDoubleMatrix(x): return DoubleMatrix.from_param(param=x) def toIntVector(x): return IntArray.from_param(param=x) def toBoolVector(x): return BoolArray.from_param(param=x) def toPythonString(inString): if six.PY2: if isinstance(inString, c_char_p): return toPythonString(inString.value) return string_at(inString) elif six.PY3: if isinstance(inString, c_char_p): return toPythonString(inString.value) return bytes.decode(string_at(inString)) def listtocharvector(x): assert (isinstance(x, list)) return (c_char_p * len(x))(*[stringToCharP(y) for y in x]) def charvector(ndim=1, lenvals=10): return ((c_char * lenvals) * ndim)() def listtodoublematrix(data, x=3, y=3): matrix = ((c_double * x) * y)() for i, row in enumerate(data): matrix[i] = tuple(row) return matrix def emptyCharArray(xLen=None, yLen=None): if not yLen: yLen = 1 if not xLen: xLen = 1 if isinstance(xLen, c_int): xLen = xLen.value if isinstance(yLen, c_int): yLen = yLen.value return ((c_char * xLen) * yLen)() def emptyDoubleMatrix(x=3, y=3): return ((c_double * x) * y)() def emptyDoubleVector(n): if isinstance(n, c_int): n = n.value assert(isinstance(n, int)) return (c_double * n)() def emptyIntVector(n): if isinstance(n, c_int): n = n.value assert (isinstance(n, int)) return (c_int * n)() def vectorToList(x): if isinstance(x[0], bool): return numpy.fromiter(x, numpy.bool, count=len(x)) elif isinstance(x[0], int): return numpy.fromiter(x, numpy.int_, count=len(x)) elif isinstance(x[0], float): return numpy.fromiter(x, numpy.float64, count=len(x)) elif isinstance(x[0].value, bytes): return [toPythonString(y) for y in x] def matrixToList(x): return numpc.as_array(x) def stringToCharP(inobject, inlen=None): """ :param inobject: input string, int for getting null string of length of int :param inlen: optional parameter, length of a given string can be specified :return: """ if inlen and isinstance(inobject, str): return create_string_buffer(inobject.encode(encoding='UTF-8'), inlen) if isinstance(inobject, bytes): return inobject if isinstance(inobject, c_int): return stringToCharP(" " * inobject.value) if isinstance(inobject, int): return stringToCharP(" " * inobject) return c_char_p(inobject.encode(encoding='UTF-8')) def listToCharArray(inList, xLen=None, yLen=None): assert (isinstance(inList, list)) if not yLen: yLen = len(inList) if not xLen: xLen = max(len(s) for s in inList) + 1 if isinstance(xLen, c_int): xLen = xLen.value if isinstance(yLen, c_int): yLen = yLen.value return ((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList]) def listToCharArrayPtr(inList, xLen=None, yLen=None): assert (isinstance(inList, list)) if not yLen: yLen = len(inList) if not xLen: xLen = max(len(s) for s in inList) + 1 if isinstance(xLen, c_int): xLen = xLen.value if isinstance(yLen, c_int): yLen = yLen.value return cast(((c_char * xLen) * yLen)(*[stringToCharP(l, inlen=xLen) for l in inList]), c_char_p) class DoubleArrayType: # Class type that will handle all double vectors, inspiration from python cookbook 3rd edition def from_param(self, param): typename = type(param).__name__ if hasattr(self, 'from_' + typename): return getattr(self, 'from_' + typename)(param) elif isinstance(param, Array): return param else: raise TypeError("Can't convert %s" % typename) # Cast from lists/tuples def from_list(self, param): val = ((c_double) * len(param))(*param) return val # Cast from Tuple def from_tuple(self, param): val = ((c_double) * len(param))(*param) return val # Cast from a numpy array, def from_ndarray(self, param): # return param.data_as(POINTER(c_double)) # the above older method does not work with functions which take vectors of known size return numpy.ctypeslib.as_ctypes(param) # Cast from array.array objects def from_array(self, param): if param.typecode != 'd': raise TypeError('must be an array of doubles') ptr, _ = param.buffer_info() return cast(ptr, POINTER(c_double)) class DoubleMatrixType: # Class type that will handle all double matricies, inspiration from python cookbook 3rd edition def from_param(self, param): typename = type(param).__name__ if hasattr(self, 'from_' + typename): return getattr(self, 'from_' + typename)(param) elif isinstance(param, Array): return param else: raise TypeError("Can't convert %s" % typename) # Cast from lists/tuples def from_list(self, param): val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param]) return val # Cast from Tuple def from_tuple(self, param): val = ((c_double * len(param[0])) * len(param))(*[DoubleArray.from_param(x) for x in param]) return val # Cast from a numpy array def from_ndarray(self, param): #return param.data_as(POINTER(c_double)) return numpy.ctypeslib.as_ctypes(param) # Cast from a numpy matrix def from_matrix(self, param): #return param.data_as(POINTER(c_double)) return numpy.ctypeslib.as_ctypes(param) class IntArrayType: # Class type that will handle all int vectors, inspiration from python cookbook 3rd edition def from_param(self, param): typename = type(param).__name__ if hasattr(self, 'from_' + typename): return getattr(self, 'from_' + typename)(param) elif isinstance(param, Array): return param else: raise TypeError("Can't convert %s" % typename) # Cast from lists/tuples def from_list(self, param): val = ((c_int) * len(param))(*param) return val # Cast from Tuple def from_tuple(self, param): val = ((c_int) * len(param))(*param) return val # Cast from a numpy array def from_ndarray(self, param): #return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be.. #return numpy.ctypeslib.as_ctypes(param) return self.from_param(param.tolist()) # Cast from array.array objects def from_array(self, param): if param.typecode != 'i': raise TypeError('must be an array of ints') ptr, _ = param.buffer_info() return cast(ptr, POINTER(c_int)) class BoolArrayType: # Class type that will handle all int vectors, inspiration from python cookbook 3rd edition def from_param(self, param): typename = type(param).__name__ if hasattr(self, 'from_' + typename): return getattr(self, 'from_' + typename)(param) elif isinstance(param, Array): return param else: raise TypeError("Can't convert %s" % typename) # Cast from lists/tuples def from_list(self, param): val = ((c_bool) * len(param))(*param) return val # Cast from Tuple def from_tuple(self, param): val = ((c_bool) * len(param))(*param) return val # Cast from a numpy array def from_ndarray(self, param): #return param.data_as(POINTER(c_int)) # not sure if long is same as int, it should be.. #return numpy.ctypeslib.as_ctypes(param) return self.from_param(param.tolist()) DoubleArray = DoubleArrayType() IntArray = IntArrayType() BoolArray = BoolArrayType() DoubleMatrix = DoubleMatrixType() class Plane(Structure): _fields_ = [ ('_normal', c_double * 3), ('_constant', c_double) ] @property def normal(self): return vectorToList(self._normal) @property def constant(self): return self._constant def __str__(self): return '<Plane: normal=%s; constant=%s>' % (', '.join([str(x) for x in self._normal]), self._constant) class Ellipse(Structure): _fields_ = [ ('_center', c_double * 3), ('_semi_major', c_double * 3), ('_semi_minor', c_double * 3) ] @property def center(self): return vectorToList(self._center) @property def semi_major(self): return vectorToList(self._semi_major) @property def semi_minor(self): return vectorToList(self._semi_minor) def __str__(self): return '<SpiceEllipse: center = %s, semi_major = %s, semi_minor = %s>' % \ (self.center, self.semi_major, self.semi_minor) class DataType(object): SPICE_CHR = 0 SPICE_DP = 1 SPICE_INT = 2 SPICE_TIME = 3 SPICE_BOOL = 4 CHR = 0 DP = 1 INT = 2 TIME = 3 BOOL = 4 def __init__(self): pass class SpiceEKDataType(c_int): _fields_ = [ ('SPICE_CHR', c_int(0)), ('SPICE_DP', c_int(1)), ('SPICE_INT', c_int(2)), ('SPICE_TIME', c_int(3)), ('SPICE_BOOL', c_int(4)), ] class SpiceEKExprClass(c_int): _fields_ = [ ('SPICE_EK_EXP_COL', c_int(0)), ('SPICE_EK_EXP_FUNC', c_int(1)), ('SPICE_EK_EXP_EXPR', c_int(2)) ] class SpiceEKAttDsc(Structure): _fields_ = [ ('_cclass', c_int), ('_dtype', SpiceEKDataType), ('_strlen', c_int), ('_size', c_int), ('_indexd', c_bool), ('_nullok', c_bool) ] @property def cclass(self): return self._cclass @property def dtype(self): return self._dtype.value @property def strlen(self): return self._strlen @property def size(self): return self._size @property def indexd(self): return self._indexd @property def nullok(self): return self._nullok def __str__(self): return '<SpiceEKAttDsc cclass = %s, dtype = %s, strlen = %s, size = %s, indexd = %s, nullok = %s >' % \ (self.cclass, self.dtype, self.strlen, self.size, self.indexd, self.nullok) class SpiceEKSegSum(Structure): _fields_ = [ ('_tabnam', c_char * 65), ('_nrows', c_int), ('_ncols', c_int), ('_cnames', (c_char * 100) * 33), ('_cdescrs', SpiceEKAttDsc * 100) ] @property def tabnam(self): return toPythonString(self._tabnam) @property def nrows(self): return self._nrows @property def ncols(self): return self._ncols @property def cnames(self): return vectorToList(self._cnames)[0:self.ncols] @property def cdescrs(self): return self._cdescrs[0:self.ncols] def __str__(self): return '<SpiceEKSegSum tabnam = %s, nrows = %s, ncols = %s, cnames = %s, cdescrs = %s >' % (self.tabnam, self.nrows, self.ncols, self.cnames, self.cdescrs) #SpiceCell implementation below is inpart from github.com/DaRasch/spiceminer/ # and modified as needed for this author, maybe we should work together? ### helper classes/functions ### BITSIZE = {'char': sizeof(c_char), 'int': sizeof(c_int), 'double': sizeof(c_double)} def _char_getter(data_p, index, length): return toPythonString((c_char * length).from_address(data_p + index * length * BITSIZE['char'])) def _double_getter(data_p, index, length): return c_double.from_address(data_p + index * BITSIZE['double']).value def _int_getter(data_p, index, length): return c_int.from_address(data_p + index * BITSIZE['int']).value def SPICEDOUBLE_CELL(size): return SpiceCell.double(size) def SPICEINT_CELL(size): return SpiceCell.integer(size) def SPICECHAR_CELL(size, length): return SpiceCell.character(size, length) class SpiceCell(Structure): #Most written by DaRasch DATATYPES_ENUM = {'char': 0, 'double': 1, 'int': 2, 'time': 3, 'bool': 4} DATATYPES_GET = [_char_getter, _double_getter] + [_int_getter] * 3 baseSize = 6 minCharLen = 6 CTRLBLOCK = 6 _fields_ = [ ('dtype', c_int), ('length', c_int), ('size', c_int), ('card', c_int), ('isSet', c_int), ('adjust', c_int), ('init', c_int), ('base', c_void_p), ('data', c_void_p) ] def __init__(self, dtype=None, length=None, size=None, card=None, isSet=None, base=None, data=None): super(SpiceCell, self).__init__() self.dtype = dtype self.length = length self.size = size self.card = card self.isSet = isSet self.adjust = 0 # Always False, because not implemented self.init = 0 # Always False, because this is the constructor self.base = base # void pointer self.data = data def __str__(self): return '<SpiceCell dtype = %s, length = %s, size = %s, card = %s, isSet = %s, adjust = %s, init = %s, base = %s, data = %s>' % (self.dtype, self.length, self.size, self.card, self.isSet, self.adjust, self.init, self.base, self.data) def is_int(self): return self.dtype == 2 def is_double(self): return self.dtype == 1 def is_char(self): return self.dtype == 0 def is_time(self): return self.dtype == 3 def is_bool(self): return self.dtype == 4 def is_set(self): return self.isSet == 1 @classmethod def character(cls, size, length): base = (c_char * ((cls.CTRLBLOCK + size) * length))() data = (c_char * (size * length)).from_buffer( base, cls.CTRLBLOCK * BITSIZE['char'] * length) instance = cls(cls.DATATYPES_ENUM['char'], length, size, 0, 1, cast(base, c_void_p), cast(data, c_void_p)) return instance @classmethod def integer(cls, size): base = (c_int * (cls.CTRLBLOCK + size))() data = (c_int * size).from_buffer( base, cls.CTRLBLOCK * BITSIZE['int']) instance = cls(cls.DATATYPES_ENUM['int'], 0, size, 0, 1, cast(base, c_void_p), cast(data, c_void_p)) return instance @classmethod def double(cls, size): base = (c_double * (cls.CTRLBLOCK + size))() data = (c_double * size).from_buffer( base, cls.CTRLBLOCK * BITSIZE['double']) instance = cls(cls.DATATYPES_ENUM['double'], 0, size, 0, 1, cast(base, c_void_p), cast(data, c_void_p)) return instance def __len__(self): return self.card def
(self): getter = SpiceCell.DATATYPES_GET[self.dtype] length, card, data = self.length, self.card, self.data for i in range(card): yield (getter(data, i, length)) def __contains__(self, key): return key in self.__iter__() def __getitem__(self, key): getter = SpiceCell.DATATYPES_GET[self.dtype] length, card, data = self.length, self.card, self.data if isinstance(key, slice): start, stop, step = key.start or 0, key.stop or -1, key.step or 1 #TODO Typechecking if card == 0: return [] else: return list(getter(data, i, length) for i in range(start % card, stop % card + 1, step)) if key in range(-card, card): return getter(data, key, length) elif not isinstance(key, int): msg = 'SpiceCell inices must be integers, not {}'.format(type(key)) raise TypeError(msg) else: raise IndexError('SpiceCell index out of range') def reset(self): self.card = 0 self.init = 0
__iter__
xapi_test.go
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package localstore import ( "fmt" "io/ioutil" "math" "sort" "testing" "time" "github.com/golang/protobuf/proto" "github.com/juju/errors" . "github.com/pingcap/check" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/types" "github.com/pingcap/tipb/go-tipb" ) func TestT(t *testing.T) { CustomVerboseFlag = true TestingT(t) } var _ = Suite(&testXAPISuite{}) type testXAPISuite struct { } var tbInfo = &simpleTableInfo{ tID: 1, cTypes: []byte{mysql.TypeVarchar, mysql.TypeDouble}, cIDs: []int64{3, 4}, indices: []int{0}, // column 3 of varchar type. iIDs: []int64{5}, } func (s *testXAPISuite) TestSelect(c *C) { defer testleak.AfterTest(c)() store := createMemStore(time.Now().Nanosecond()) count := int64(10) err := prepareTableData(store, tbInfo, count, genValues) c.Check(err, IsNil) // Select Table request. txn, err := store.Begin() c.Check(err, IsNil) client := store.GetClient() req, err := prepareSelectRequest(tbInfo, txn.StartTS()) c.Check(err, IsNil) resp := client.Send(req) subResp, err := resp.Next() c.Check(err, IsNil) data, err := ioutil.ReadAll(subResp) c.Check(err, IsNil) selResp := new(tipb.SelectResponse) proto.Unmarshal(data, selResp) c.Check(selResp.Chunks, HasLen, 1) chunk := &selResp.Chunks[0] c.Check(chunk.RowsMeta, HasLen, int(count)) var dataOffset int64 for i, rowMeta := range chunk.RowsMeta { handle := int64(i + 1) expectedDatums := []types.Datum{types.NewDatum(handle)} expectedDatums = append(expectedDatums, genValues(handle, tbInfo)...) var expectedEncoded []byte expectedEncoded, err = codec.EncodeValue(nil, expectedDatums...) c.Assert(err, IsNil) c.Assert(chunk.RowsData[dataOffset:dataOffset+rowMeta.Length], BytesEquals, expectedEncoded) dataOffset += rowMeta.Length } txn.Commit() // Select Index request. txn, err = store.Begin() c.Check(err, IsNil) client = store.GetClient() req, err = prepareIndexRequest(tbInfo, txn.StartTS()) c.Check(err, IsNil) resp = client.Send(req) subResp, err = resp.Next() c.Check(err, IsNil) data, err = ioutil.ReadAll(subResp) c.Check(err, IsNil) idxResp := new(tipb.SelectResponse) proto.Unmarshal(data, idxResp) chunk = &idxResp.Chunks[0] c.Check(chunk.RowsMeta, HasLen, int(count)) handles := make([]int, 0, 10) for _, rowMeta := range chunk.RowsMeta { handles = append(handles, int(rowMeta.Handle)) } sort.Ints(handles) for i, h := range handles { c.Assert(h, Equals, i+1) } txn.Commit() store.Close() } // simpleTableInfo just have the minimum information enough to describe the table. // The first column is pk handle column. type simpleTableInfo struct { tID int64 // table ID. cTypes []byte // columns not including pk handle column. cIDs []int64 indices []int // indexed column offsets. only single column index for now. iIDs []int64 } func (s *simpleTableInfo) toPBTableInfo() *tipb.TableInfo { tbInfo := new(tipb.TableInfo) tbInfo.TableId = s.tID pkColumn := new(tipb.ColumnInfo) pkColumn.Tp = int32(mysql.TypeLonglong) // It's ok to just use table ID for pk column ID, as it doesn't have a column kv. pkColumn.ColumnId = tbInfo.TableId pkColumn.PkHandle = true pkColumn.Flag = 0 tbInfo.Columns = append(tbInfo.Columns, pkColumn) for i, colTp := range s.cTypes { coInfo := &tipb.ColumnInfo{ ColumnId: s.cIDs[i], Tp: int32(colTp), PkHandle: false, } tbInfo.Columns = append(tbInfo.Columns, coInfo) } return tbInfo } func (s *simpleTableInfo) toPBIndexInfo(idxOff int) *tipb.IndexInfo { idxInfo := new(tipb.IndexInfo) idxInfo.TableId = s.tID idxInfo.IndexId = s.iIDs[idxOff] colOff := s.indices[idxOff] idxInfo.Columns = []*tipb.ColumnInfo{ { ColumnId: s.cIDs[colOff], Tp: int32(s.cTypes[colOff]), PkHandle: false, }, } return idxInfo } func genValues(handle int64, tbl *simpleTableInfo) []types.Datum { values := make([]types.Datum, 0, len(tbl.cTypes)) for _, tp := range tbl.cTypes { switch tp { case mysql.TypeLong: values = append(values, types.NewDatum(handle)) case mysql.TypeVarchar: values = append(values, types.NewDatum(fmt.Sprintf("varchar:%d", handle))) case mysql.TypeDouble: values = append(values, types.NewDatum(float64(handle)/10)) default: values = append(values, types.Datum{}) } } return values } type genValueFunc func(handle int64, tbl *simpleTableInfo) []types.Datum func prepareTableData(store kv.Storage, tbl *simpleTableInfo, count int64, gen genValueFunc) error { txn, err := store.Begin() if err != nil { return errors.Trace(err) } for i := int64(1); i <= count; i++ { setRow(txn, i, tbl, gen) } return txn.Commit() } func
(txn kv.Transaction, handle int64, tbl *simpleTableInfo, gen genValueFunc) error { rowKey := tablecodec.EncodeRowKey(tbl.tID, codec.EncodeInt(nil, handle)) columnValues := gen(handle, tbl) value, err := tablecodec.EncodeRow(columnValues, tbl.cIDs) if err != nil { return errors.Trace(err) } err = txn.Set(rowKey, value) if err != nil { return errors.Trace(err) } for i, idxCol := range tbl.indices { idxVal := columnValues[idxCol] encoded, err := codec.EncodeKey(nil, idxVal, types.NewDatum(handle)) if err != nil { return errors.Trace(err) } idxKey := tablecodec.EncodeIndexSeekKey(tbl.tID, tbl.iIDs[i], encoded) err = txn.Set(idxKey, []byte{0}) if err != nil { return errors.Trace(err) } } return nil } func prepareSelectRequest(simpleInfo *simpleTableInfo, startTs uint64) (*kv.Request, error) { selReq := new(tipb.SelectRequest) selReq.TableInfo = simpleInfo.toPBTableInfo() selReq.StartTs = startTs selReq.Ranges = []*tipb.KeyRange{fullPBTableRange} data, err := proto.Marshal(selReq) if err != nil { return nil, errors.Trace(err) } req := new(kv.Request) req.Tp = kv.ReqTypeSelect req.Concurrency = 1 req.KeyRanges = []kv.KeyRange{fullTableRange(simpleInfo.tID)} req.Data = data return req, nil } func fullTableRange(tid int64) kv.KeyRange { return kv.KeyRange{ StartKey: tablecodec.EncodeRowKey(tid, codec.EncodeInt(nil, math.MinInt64)), EndKey: tablecodec.EncodeRowKey(tid, codec.EncodeInt(nil, math.MaxInt64)), } } var fullPBTableRange = &tipb.KeyRange{ Low: codec.EncodeInt(nil, math.MinInt64), High: codec.EncodeInt(nil, math.MaxInt64), } var fullPBIndexRange = &tipb.KeyRange{ Low: []byte{0}, High: []byte{255}, } func fullIndexRange(tid int64, idxID int64) kv.KeyRange { return kv.KeyRange{ StartKey: tablecodec.EncodeIndexSeekKey(tid, idxID, []byte{0}), EndKey: tablecodec.EncodeIndexSeekKey(tid, idxID, []byte{255}), } } func prepareIndexRequest(simpleInfo *simpleTableInfo, startTs uint64) (*kv.Request, error) { selReq := new(tipb.SelectRequest) selReq.IndexInfo = simpleInfo.toPBIndexInfo(0) selReq.StartTs = startTs selReq.Ranges = []*tipb.KeyRange{fullPBIndexRange} data, err := proto.Marshal(selReq) if err != nil { return nil, errors.Trace(err) } req := new(kv.Request) req.Tp = kv.ReqTypeIndex req.Concurrency = 1 req.KeyRanges = []kv.KeyRange{fullIndexRange(simpleInfo.tID, simpleInfo.iIDs[0])} req.Data = data return req, nil }
setRow
main.go
package gu import "github.com/ContextLogic/cldr" var Locale = &cldr.Locale{ Locale: "gu", Number: cldr.Number{ Symbols: symbols, Formats: formats, Currencies: currencies, }, Calendar: calendar, PluralRule: pluralRule, } func init()
{ cldr.RegisterLocale(Locale) }
spinnerView.js
"use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var tslib_1 = require("tslib"); var React = tslib_1.__importStar(require("react")); var react_1 = require("react"); var spinner_1 = tslib_1.__importDefault(require("@atlaskit/spinner")); var escHelper_1 = require("../escHelper"); var styles_1 = require("../styles"); var SpinnerView = /** @class */ (function (_super) { tslib_1.__extends(SpinnerView, _super); function
() { return _super !== null && _super.apply(this, arguments) || this; } SpinnerView.prototype.componentDidMount = function () { this.escHelper = new escHelper_1.EscHelper(this.props.onCancel); }; SpinnerView.prototype.componentWillUnmount = function () { if (this.escHelper) { this.escHelper.teardown(); } }; SpinnerView.prototype.render = function () { return (React.createElement(styles_1.CenterView, null, React.createElement(spinner_1.default, { size: "large", invertColor: true }))); }; return SpinnerView; }(react_1.Component)); exports.SpinnerView = SpinnerView; //# sourceMappingURL=spinnerView.js.map
SpinnerView
box_ops.py
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Utilities for bounding box manipulation and GIoU. """ import torch import numpy as np from torchvision.ops.boxes import box_area from typing import Tuple #### Bounding box utilities imported from torchvision and converted to numpy def np_box_area(boxes: np.array) -> np.array: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (Tensor[N, 4]): boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with ``0 <= x1 < x2`` and ``0 <= y1 < y2``. Returns: area (Tensor[N]): area for each box """ assert boxes.ndim == 2 and boxes.shape[-1] == 4 return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py # with slight modifications def _box_inter_union(boxes1: np.array, boxes2: np.array) -> Tuple[np.array, np.array]: area1 = np_box_area(boxes1) area2 = np_box_area(boxes2) lt = np.maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] rb = np.minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] wh = (rb - lt).clip(min=0) # [N,M,2] inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter return inter, union def np_box_iou(boxes1: np.array, boxes2: np.array) -> np.array: """ Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and ``0 <= y1 < y2``. Args: boxes1 (Tensor[N, 4]) boxes2 (Tensor[M, 4]) Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ inter, union = _box_inter_union(boxes1, boxes2) iou = inter / union return iou def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=-1) def box_xyxy_to_cxcywh(x): x0, y0, x1, y1 = x.unbind(-1) b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return torch.stack(b, dim=-1) # modified from torchvision to also return the union def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] wh = (rb - lt).clamp(min=0) # [N,M,2] inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union def
(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check assert (boxes1[:, 2:] >= boxes1[:, :2]).all() assert (boxes2[:, 2:] >= boxes2[:, :2]).all() iou, union = box_iou(boxes1, boxes2) lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) wh = (rb - lt).clamp(min=0) # [N,M,2] area = wh[:, :, 0] * wh[:, :, 1] return iou - (area - union) / area def masks_to_boxes(masks): """Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format """ if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device) h, w = masks.shape[-2:] y = torch.arange(0, h, dtype=torch.float) x = torch.arange(0, w, dtype=torch.float) y, x = torch.meshgrid(y, x) x_mask = masks * x.unsqueeze(0) x_max = x_mask.flatten(1).max(-1)[0] x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] y_mask = masks * y.unsqueeze(0) y_max = y_mask.flatten(1).max(-1)[0] y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] return torch.stack([x_min, y_min, x_max, y_max], 1)
generalized_box_iou
stats.py
import datetime import hashlib import time from collections import namedtuple, OrderedDict from copy import copy from itertools import chain import csv import gevent from .exception import StopUser, CatchResponseError import logging console_logger = logging.getLogger("locust.stats_logger") STATS_NAME_WIDTH = 60 STATS_TYPE_WIDTH = 8 """Default interval for how frequently results are written to console.""" CONSOLE_STATS_INTERVAL_SEC = 2 """Default interval for how frequently results are written to history.""" HISTORY_STATS_INTERVAL_SEC = 5 """Default interval for how frequently CSV files are written if this option is configured.""" CSV_STATS_INTERVAL_SEC = 1 CSV_STATS_FLUSH_INTERVAL_SEC = 10 """ Default window size/resolution - in seconds - when calculating the current response time percentile """ CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW = 10 CachedResponseTimes = namedtuple("CachedResponseTimes", ["response_times", "num_requests"]) PERCENTILES_TO_REPORT = [0.50, 0.66, 0.75, 0.80, 0.90, 0.95, 0.98, 0.99, 0.999, 0.9999, 1.0] class RequestStatsAdditionError(Exception): pass def get_readable_percentiles(percentile_list): """ Converts a list of percentiles from 0-1 fraction to 0%-100% view for using in console & csv reporting :param percentile_list: The list of percentiles in range 0-1 :return: The list of string representation for each percentile in 0%-100% view """ return [ f"{int(percentile * 100) if (percentile * 100).is_integer() else round(100 * percentile, 6)}%" for percentile in percentile_list ] def calculate_response_time_percentile(response_times, num_requests, percent): """ Get the response time that a certain number of percent of the requests finished within. Arguments: response_times: A StatsEntry.response_times dict num_requests: Number of request made (could be derived from response_times, but we save some CPU cycles by using the value which we already store) percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0 """ num_of_request = int((num_requests * percent)) processed_count = 0 for response_time in sorted(response_times.keys(), reverse=True): processed_count += response_times[response_time] if num_requests - processed_count <= num_of_request: return response_time # if all response times were None return 0 def calculate_response_time_average(response_times, num_requests): """ Get the response time that a certain number of percent of the requests finished within. Arguments: response_times: A StatsEntry.response_times dict num_requests: Number of request made (could be derived from response_times, but we save some CPU cycles by using the value which we already store) percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0 """ num_of_request = int(num_requests) sum_val = 0 processed_count = 0 for response_time in sorted(response_times.keys(), reverse=True): processed_count += response_times[response_time] sum_val += response_time * response_times[response_time] num_of_request = processed_count if num_of_request > 0: return int(sum_val / float(num_of_request)) else:
def calculate_response_time_max(response_times, num_requests): """ Get the response time that a certain number of percent of the requests finished within. Arguments: response_times: A StatsEntry.response_times dict num_requests: Number of request made (could be derived from response_times, but we save some CPU cycles by using the value which we already store) percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0 """ num_of_request = int(num_requests) max_val = 0 processed_count = 0 for response_time in sorted(response_times.keys(), reverse=True): processed_count += response_times[response_time] if response_time > max_val: max_val = response_time if max_val is None: return None return int(max_val) def calculate_response_time_min(response_times, num_requests): """ Get the response time that a certain number of percent of the requests finished within. Arguments: response_times: A StatsEntry.response_times dict num_requests: Number of request made (could be derived from response_times, but we save some CPU cycles by using the value which we already store) percent: The percentile we want to calculate. Specified in range: 0.0 - 1.0 """ num_of_request = int(num_requests) min_val = None processed_count = 0 for response_time in sorted(response_times.keys(), reverse=True): processed_count += response_times[response_time] if min_val is None: min_val = response_time elif response_time < min_val: min_val = response_time if min_val is None: return None return int(min_val) def diff_response_time_dicts(latest, old): """ Returns the delta between two {response_times:request_count} dicts. Used together with the response_times cache to get the response times for the last X seconds, which in turn is used to calculate the current response time percentiles. """ new = {} for t in latest: diff = latest[t] - old.get(t, 0) if diff: new[t] = diff return new class RequestStats: """ Class that holds the request statistics. """ def __init__(self, use_response_times_cache=True): """ :param use_response_times_cache: The value of use_response_times_cache will be set for each StatsEntry() when they are created. Settings it to False saves some memory and CPU cycles which we can do on Worker nodes where the response_times_cache is not needed. """ self.use_response_times_cache = use_response_times_cache self.entries = {} self.errors = {} self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache) self.history = [] @property def num_requests(self): return self.total.num_requests @property def num_none_requests(self): return self.total.num_none_requests @property def num_failures(self): return self.total.num_failures @property def last_request_timestamp(self): return self.total.last_request_timestamp @property def start_time(self): return self.total.start_time def log_request(self, method, name, response_time, content_length): self.total.log(response_time, content_length) self.get(name, method).log(response_time, content_length) def log_error(self, method, name, error): self.total.log_error(error) self.get(name, method).log_error(error) # store error in errors dict key = StatsError.create_key(method, name, error) entry = self.errors.get(key) if not entry: entry = StatsError(method, name, error) self.errors[key] = entry entry.occurred() def get(self, name, method): """ Retrieve a StatsEntry instance by name and method """ entry = self.entries.get((name, method)) if not entry: entry = StatsEntry(self, name, method, use_response_times_cache=self.use_response_times_cache) self.entries[(name, method)] = entry return entry def reset_all(self): """ Go through all stats entries and reset them to zero """ self.total.reset() self.errors = {} for r in self.entries.values(): r.reset() self.history = [] def clear_all(self): """ Remove all stats entries and errors """ self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache) self.entries = {} self.errors = {} self.history = [] def serialize_stats(self): return [ self.entries[key].get_stripped_report() for key in self.entries.keys() if not (self.entries[key].num_requests == 0 and self.entries[key].num_failures == 0) ] def serialize_errors(self): return dict([(k, e.to_dict()) for k, e in self.errors.items()]) class StatsEntry: """ Represents a single stats entry (name and method) """ name = None """ Name (URL) of this stats entry """ method = None """ Method (GET, POST, PUT, etc.) """ num_requests = None """ The number of requests made """ num_none_requests = None """ The number of requests made with a None response time (typically async requests) """ num_failures = None """ Number of failed request """ total_response_time = None """ Total sum of the response times """ min_response_time = None """ Minimum response time """ max_response_time = None """ Maximum response time """ num_reqs_per_sec = None """ A {second => request_count} dict that holds the number of requests made per second """ num_fail_per_sec = None """ A (second => failure_count) dict that hold the number of failures per second """ response_times = None """ A {response_time => count} dict that holds the response time distribution of all the requests. The keys (the response time in ms) are rounded to store 1, 2, ... 9, 10, 20. .. 90, 100, 200 .. 900, 1000, 2000 ... 9000, in order to save memory. This dict is used to calculate the median and percentile response times. """ use_response_times_cache = False """ If set to True, the copy of the response_time dict will be stored in response_times_cache every second, and kept for 20 seconds (by default, will be CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10). We can use this dict to calculate the *current* median response time, as well as other response time percentiles. """ response_times_cache = None """ If use_response_times_cache is set to True, this will be a {timestamp => CachedResponseTimes()} OrderedDict that holds a copy of the response_times dict for each of the last 20 seconds. """ total_content_length = None """ The sum of the content length of all the requests for this entry """ start_time = None """ Time of the first request for this entry """ last_request_timestamp = None """ Time of the last request for this entry """ def __init__(self, stats, name, method, use_response_times_cache=False): self.stats = stats self.name = name self.method = method self.use_response_times_cache = use_response_times_cache self.reset() def reset(self): self.start_time = time.time() self.num_requests = 0 self.num_none_requests = 0 self.num_failures = 0 self.total_response_time = 0 self.response_times = {} self.min_response_time = None self.max_response_time = 0 self.last_request_timestamp = None self.num_reqs_per_sec = {} self.num_fail_per_sec = {} self.total_content_length = 0 if self.use_response_times_cache: self.response_times_cache = OrderedDict() self._cache_response_times(int(time.time())) def log(self, response_time, content_length): # get the time current_time = time.time() t = int(current_time) if self.use_response_times_cache and self.last_request_timestamp and t > int(self.last_request_timestamp): # see if we shall make a copy of the response_times dict and store in the cache self._cache_response_times(t - 1) self.num_requests += 1 self._log_time_of_request(current_time) self._log_response_time(response_time) # increase total content-length self.total_content_length += content_length def _log_time_of_request(self, current_time): t = int(current_time) self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1 self.last_request_timestamp = current_time def _log_response_time(self, response_time): if response_time is None: self.num_none_requests += 1 return self.total_response_time += response_time if self.min_response_time is None: self.min_response_time = response_time self.min_response_time = min(self.min_response_time, response_time) self.max_response_time = max(self.max_response_time, response_time) # to avoid to much data that has to be transferred to the master node when # running in distributed mode, we save the response time rounded in a dict # so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000 if response_time < 100: rounded_response_time = round(response_time) elif response_time < 1000: rounded_response_time = round(response_time, -1) elif response_time < 10000: rounded_response_time = round(response_time, -2) else: rounded_response_time = round(response_time, -3) # increase request count for the rounded key in response time dict self.response_times.setdefault(rounded_response_time, 0) self.response_times[rounded_response_time] += 1 def log_error(self, error): self.num_failures += 1 t = int(time.time()) self.num_fail_per_sec[t] = self.num_fail_per_sec.setdefault(t, 0) + 1 @property def fail_ratio(self): try: return float(self.num_failures) / self.num_requests except ZeroDivisionError: if self.num_failures > 0: return 1.0 else: return 0.0 @property def avg_response_time(self): try: return float(self.total_response_time) / (self.num_requests - self.num_none_requests) except ZeroDivisionError: return 0 @property def median_response_time(self): if not self.response_times: return 0 median = median_from_dict(self.num_requests - self.num_none_requests, self.response_times) or 0 # Since we only use two digits of precision when calculating the median response time # while still using the exact values for min and max response times, the following checks # makes sure that we don't report a median > max or median < min when a StatsEntry only # have one (or very few) really slow requests if median > self.max_response_time: median = self.max_response_time elif median < self.min_response_time: median = self.min_response_time return median @property def current_rps(self): if self.stats.last_request_timestamp is None: return 0 slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0)) reqs = [ self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2) ] return avg(reqs) @property def current_fail_per_sec(self): if self.stats.last_request_timestamp is None: return 0 slice_start_time = max(int(self.stats.last_request_timestamp) - 12, int(self.stats.start_time or 0)) reqs = [ self.num_fail_per_sec.get(t, 0) for t in range(slice_start_time, int(self.stats.last_request_timestamp) - 2) ] return avg(reqs) @property def total_rps(self): if not self.stats.last_request_timestamp or not self.stats.start_time: return 0.0 try: return self.num_requests / (self.stats.last_request_timestamp - self.stats.start_time) except ZeroDivisionError: return 0.0 @property def total_fail_per_sec(self): if not self.stats.last_request_timestamp or not self.stats.start_time: return 0.0 try: return self.num_failures / (self.stats.last_request_timestamp - self.stats.start_time) except ZeroDivisionError: return 0.0 @property def avg_content_length(self): try: return self.total_content_length / self.num_requests except ZeroDivisionError: return 0 def extend(self, other): """ Extend the data from the current StatsEntry with the stats from another StatsEntry instance. """ # save the old last_request_timestamp, to see if we should store a new copy # of the response times in the response times cache old_last_request_timestamp = self.last_request_timestamp if self.last_request_timestamp is not None and other.last_request_timestamp is not None: self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp) elif other.last_request_timestamp is not None: self.last_request_timestamp = other.last_request_timestamp self.start_time = min(self.start_time, other.start_time) self.num_requests = self.num_requests + other.num_requests self.num_none_requests = self.num_none_requests + other.num_none_requests self.num_failures = self.num_failures + other.num_failures self.total_response_time = self.total_response_time + other.total_response_time self.max_response_time = max(self.max_response_time, other.max_response_time) if self.min_response_time is not None and other.min_response_time is not None: self.min_response_time = min(self.min_response_time, other.min_response_time) elif other.min_response_time is not None: # this means self.min_response_time is None, so we can safely replace it self.min_response_time = other.min_response_time self.total_content_length = self.total_content_length + other.total_content_length for key in other.response_times: self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key] for key in other.num_reqs_per_sec: self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key] for key in other.num_fail_per_sec: self.num_fail_per_sec[key] = self.num_fail_per_sec.get(key, 0) + other.num_fail_per_sec[key] if self.use_response_times_cache: # If we've entered a new second, we'll cache the response times. Note that there # might still be reports from other worker nodes - that contains requests for the same # time periods - that hasn't been received/accounted for yet. This will cause the cache to # lag behind a second or two, but since StatsEntry.current_response_time_percentile() # (which is what the response times cache is used for) uses an approximation of the # last 10 seconds anyway, it should be fine to ignore this. last_time = self.last_request_timestamp and int(self.last_request_timestamp) or None if last_time and last_time > (old_last_request_timestamp and int(old_last_request_timestamp) or 0): self._cache_response_times(last_time) def serialize(self): return { "name": self.name, "method": self.method, "last_request_timestamp": self.last_request_timestamp, "start_time": self.start_time, "num_requests": self.num_requests, "num_none_requests": self.num_none_requests, "num_failures": self.num_failures, "total_response_time": self.total_response_time, "max_response_time": self.max_response_time, "min_response_time": self.min_response_time, "total_content_length": self.total_content_length, "response_times": self.response_times, "num_reqs_per_sec": self.num_reqs_per_sec, "num_fail_per_sec": self.num_fail_per_sec, } @classmethod def unserialize(cls, data): obj = cls(None, data["name"], data["method"]) for key in [ "last_request_timestamp", "start_time", "num_requests", "num_none_requests", "num_failures", "total_response_time", "max_response_time", "min_response_time", "total_content_length", "response_times", "num_reqs_per_sec", "num_fail_per_sec", ]: setattr(obj, key, data[key]) return obj def get_stripped_report(self): """ Return the serialized version of this StatsEntry, and then clear the current stats. """ report = self.serialize() self.reset() return report def to_string(self, current=True): """ Return the stats as a string suitable for console output. If current is True, it'll show the RPS and failure rate for the last 10 seconds. If it's false, it'll show the total stats for the whole run. """ if current: rps = self.current_rps fail_per_sec = self.current_fail_per_sec else: rps = self.total_rps fail_per_sec = self.total_fail_per_sec return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s | %7d %7d %7d %7d | %7.2f %7.2f") % ( (self.method and self.method + " " or "") + self.name, self.num_requests, "%d(%.2f%%)" % (self.num_failures, self.fail_ratio * 100), self.avg_response_time, self.min_response_time or 0, self.max_response_time, self.median_response_time or 0, rps or 0, fail_per_sec or 0, ) def __str__(self): return self.to_string(current=True) def get_response_time_percentile(self, percent): """ Get the response time that a certain number of percent of the requests finished within. Percent specified in range: 0.0 - 1.0 """ return calculate_response_time_percentile(self.response_times, self.num_requests, percent) def get_current_response_time_average(self): """ Calculate the *current* response time for a certain percentile. We use a sliding window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW) when calculating this. """ if not self.use_response_times_cache: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile") # First, we want to determine which of the cached response_times dicts we should # use to get response_times for approximately 10 seconds ago. t = int(time.time()) # Since we can't be sure that the cache contains an entry for every second. # We'll construct a list of timestamps which we consider acceptable keys to be used # when trying to fetch the cached response_times. We construct this list in such a way # that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8, # and so on acceptable_timestamps = [] for i in range(9): acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i) acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i) cached = None for ts in acceptable_timestamps: if ts in self.response_times_cache: cached = self.response_times_cache[ts] break if cached: # If we fond an acceptable cached response times, we'll calculate a new response # times dict of the last 10 seconds (approximately) by diffing it with the current # total response times. Then we'll use that to calculate a response time percentile # for that timeframe return calculate_response_time_average( diff_response_time_dicts(self.response_times, cached.response_times), self.num_requests - cached.num_requests, ) def get_current_response_time_max(self): """ Calculate the *current* response time for a certain percentile. We use a sliding window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW) when calculating this. """ if not self.use_response_times_cache: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile") # First, we want to determine which of the cached response_times dicts we should # use to get response_times for approximately 10 seconds ago. t = int(time.time()) # Since we can't be sure that the cache contains an entry for every second. # We'll construct a list of timestamps which we consider acceptable keys to be used # when trying to fetch the cached response_times. We construct this list in such a way # that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8, # and so on acceptable_timestamps = [] for i in range(9): acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i) acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i) cached = None for ts in acceptable_timestamps: if ts in self.response_times_cache: cached = self.response_times_cache[ts] break if cached: # If we fond an acceptable cached response times, we'll calculate a new response # times dict of the last 10 seconds (approximately) by diffing it with the current # total response times. Then we'll use that to calculate a response time percentile # for that timeframe return calculate_response_time_max( diff_response_time_dicts(self.response_times, cached.response_times), self.num_requests - cached.num_requests, ) def get_current_response_time_min(self): """ Calculate the *current* response time for a certain percentile. We use a sliding window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW) when calculating this. """ if not self.use_response_times_cache: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile") # First, we want to determine which of the cached response_times dicts we should # use to get response_times for approximately 10 seconds ago. t = int(time.time()) # Since we can't be sure that the cache contains an entry for every second. # We'll construct a list of timestamps which we consider acceptable keys to be used # when trying to fetch the cached response_times. We construct this list in such a way # that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8, # and so on acceptable_timestamps = [] for i in range(9): acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW - i) acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + i) cached = None for ts in acceptable_timestamps: if ts in self.response_times_cache: cached = self.response_times_cache[ts] break if cached: # If we fond an acceptable cached response times, we'll calculate a new response # times dict of the last 10 seconds (approximately) by diffing it with the current # total response times. Then we'll use that to calculate a response time percentile # for that timeframe return calculate_response_time_min( diff_response_time_dicts(self.response_times, cached.response_times), self.num_requests - cached.num_requests, ) def get_current_response_time_percentile(self, percent): """ Calculate the *current* response time for a certain percentile. We use a sliding window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW) when calculating this. """ if not self.use_response_times_cache: raise ValueError( "StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile" ) # First, we want to determine which of the cached response_times dicts we should # use to get response_times for approximately 10 seconds ago. t = int(time.time()) # Since we can't be sure that the cache contains an entry for every second. # We'll construct a list of timestamps which we consider acceptable keys to be used # when trying to fetch the cached response_times. We construct this list in such a way # that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8, # and so on acceptable_timestamps = [] acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW) for i in range(1, 9): acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW - i) acceptable_timestamps.append(t - CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + i) cached = None for ts in acceptable_timestamps: if ts in self.response_times_cache: cached = self.response_times_cache[ts] break if cached: # If we fond an acceptable cached response times, we'll calculate a new response # times dict of the last 10 seconds (approximately) by diffing it with the current # total response times. Then we'll use that to calculate a response time percentile # for that timeframe return calculate_response_time_percentile( diff_response_time_dicts(self.response_times, cached.response_times), self.num_requests - cached.num_requests, percent, ) def percentile(self): if not self.num_requests: raise ValueError("Can't calculate percentile on url with no successful requests") tpl = f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8d {' '.join(['%6d'] * len(PERCENTILES_TO_REPORT))}" return tpl % ( (self.method, self.name) + tuple([self.get_response_time_percentile(p) for p in PERCENTILES_TO_REPORT]) + (self.num_requests,) ) def _cache_response_times(self, t): self.response_times_cache[t] = CachedResponseTimes( response_times=copy(self.response_times), num_requests=self.num_requests, ) # We'll use a cache size of CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 since - in the extreme case - # we might still use response times (from the cache) for t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-10 # to calculate the current response time percentile, if we're missing cached values for the subsequent # 20 seconds cache_size = CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 if len(self.response_times_cache) > cache_size: # only keep the latest 20 response_times dicts for i in range(len(self.response_times_cache) - cache_size): self.response_times_cache.popitem(last=False) class StatsError: def __init__(self, method, name, error, occurrences=0): self.method = method self.name = name self.error = error self.occurrences = occurrences @classmethod def parse_error(cls, error): string_error = repr(error) target = "object at 0x" target_index = string_error.find(target) if target_index < 0: return string_error start = target_index + len(target) - 2 end = string_error.find(">", start) if end < 0: return string_error hex_address = string_error[start:end] return string_error.replace(hex_address, "0x....") @classmethod def create_key(cls, method, name, error): key = "%s.%s.%r" % (method, name, StatsError.parse_error(error)) return hashlib.md5(key.encode("utf-8")).hexdigest() def occurred(self): self.occurrences += 1 def to_name(self): error = self.error if isinstance(error, CatchResponseError): # standalone unwrapped_error = error.args[0] if isinstance(error, str) and error.startswith("CatchResponseError("): # distributed length = len("CatchResponseError(") unwrapped_error = error[length:-1] else: # standalone, unwrapped exception unwrapped_error = repr(error) return "%s %s: %s" % (self.method, self.name, unwrapped_error) def to_dict(self): return { "method": self.method, "name": self.name, "error": StatsError.parse_error(self.error), "occurrences": self.occurrences, } @classmethod def from_dict(cls, data): return cls(data["method"], data["name"], data["error"], data["occurrences"]) def avg(values): return sum(values, 0.0) / max(len(values), 1) def median_from_dict(total, count): """ total is the number of requests made count is a dict {response_time: count} """ pos = (total - 1) / 2 for k in sorted(count.keys()): if pos < count[k]: return k pos -= count[k] def setup_distributed_stats_event_listeners(events, stats): def on_report_to_master(client_id, data): data["stats"] = stats.serialize_stats() data["stats_total"] = stats.total.get_stripped_report() data["errors"] = stats.serialize_errors() stats.errors = {} def on_worker_report(client_id, data): for stats_data in data["stats"]: entry = StatsEntry.unserialize(stats_data) request_key = (entry.name, entry.method) if not request_key in stats.entries: stats.entries[request_key] = StatsEntry(stats, entry.name, entry.method, use_response_times_cache=True) stats.entries[request_key].extend(entry) for error_key, error in data["errors"].items(): if error_key not in stats.errors: stats.errors[error_key] = StatsError.from_dict(error) else: stats.errors[error_key].occurrences += error["occurrences"] stats.total.extend(StatsEntry.unserialize(data["stats_total"])) events.report_to_master.add_listener(on_report_to_master) events.worker_report.add_listener(on_worker_report) def print_stats(stats, current=True): console_logger.info( (" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s | %7s %7s %7s %7s | %7s %7s") % ("Name", "# reqs", "# fails", "Avg", "Min", "Max", "Median", "req/s", "failures/s") ) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) for key in sorted(stats.entries.keys()): r = stats.entries[key] console_logger.info(r.to_string(current=current)) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) console_logger.info(stats.total.to_string(current=current)) console_logger.info("") def print_percentile_stats(stats): console_logger.info("Response time percentiles (approximated)") headers = ("Type", "Name") + tuple(get_readable_percentiles(PERCENTILES_TO_REPORT)) + ("# reqs",) console_logger.info( ( f" %-{str(STATS_TYPE_WIDTH)}s %-{str(STATS_NAME_WIDTH)}s %8s " f"{' '.join(['%6s'] * len(PERCENTILES_TO_REPORT))}" ) % headers ) separator = ( f'{"-" * STATS_TYPE_WIDTH}|{"-" * STATS_NAME_WIDTH}|{"-" * 9}|{("-" * 6 + "|") * len(PERCENTILES_TO_REPORT)}' ) console_logger.info(separator) for key in sorted(stats.entries.keys()): r = stats.entries[key] if r.response_times: console_logger.info(r.percentile()) console_logger.info(separator) if stats.total.response_times: console_logger.info(stats.total.percentile()) console_logger.info("") def print_error_report(stats): if not len(stats.errors): return console_logger.info("Error report") console_logger.info(" %-18s %-100s" % ("# occurrences", "Error")) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) for error in stats.errors.values(): console_logger.info(" %-18i %-100s" % (error.occurrences, error.to_name())) console_logger.info("-" * (80 + STATS_NAME_WIDTH)) console_logger.info("") def stats_printer(stats): def stats_printer_func(): while True: print_stats(stats) gevent.sleep(CONSOLE_STATS_INTERVAL_SEC) return stats_printer_func def sort_stats(stats): return [stats[key] for key in sorted(stats.keys())] def stats_history(runner): """Save current stats info to history for charts of report.""" while True: stats = runner.stats if not stats.total.use_response_times_cache: break r = { "time": datetime.datetime.now().strftime("%H:%M:%S"), "current_rps": stats.total.current_rps or 0, "current_fail_per_sec": stats.total.current_fail_per_sec or 0, "response_time_percentile_95": stats.total.get_current_response_time_percentile(0.95) or 0, "response_time_percentile_50": stats.total.get_current_response_time_percentile(0.5) or 0, "user_count": runner.user_count or 0, } stats.history.append(r) gevent.sleep(HISTORY_STATS_INTERVAL_SEC) class StatsCSV: """Write statistics to csv_writer stream.""" def __init__(self, environment, percentiles_to_report): super().__init__() self.environment = environment self.percentiles_to_report = percentiles_to_report self.percentiles_na = ["N/A"] * len(self.percentiles_to_report) self.requests_csv_columns = [ "Type", "Name", "Request Count", "Failure Count", "Median Response Time", "Average Response Time", "Min Response Time", "Max Response Time", "Average Content Size", "Requests/s", "Failures/s", ] + get_readable_percentiles(self.percentiles_to_report) self.failures_columns = [ "Method", "Name", "Error", "Occurrences", ] self.exceptions_columns = [ "Count", "Message", "Traceback", "Nodes", ] def _percentile_fields(self, stats_entry): return ( [int(stats_entry.get_response_time_percentile(x) or 0) for x in self.percentiles_to_report] if stats_entry.num_requests else self.percentiles_na ) def requests_csv(self, csv_writer): """Write requests csv with header and data rows.""" csv_writer.writerow(self.requests_csv_columns) self._requests_data_rows(csv_writer) def _requests_data_rows(self, csv_writer): """Write requests csv data row, excluding header.""" stats = self.environment.stats for stats_entry in chain(sort_stats(stats.entries), [stats.total]): csv_writer.writerow( chain( [ stats_entry.method, stats_entry.name, stats_entry.num_requests, stats_entry.num_failures, stats_entry.median_response_time, stats_entry.avg_response_time, stats_entry.min_response_time or 0, stats_entry.max_response_time, stats_entry.avg_content_length, stats_entry.total_rps, stats_entry.total_fail_per_sec, ], self._percentile_fields(stats_entry), ) ) def failures_csv(self, csv_writer): csv_writer.writerow(self.failures_columns) self._failures_data_rows(csv_writer) def _failures_data_rows(self, csv_writer): for stats_error in sort_stats(self.environment.stats.errors): csv_writer.writerow( [ stats_error.method, stats_error.name, stats_error.error, stats_error.occurrences, ] ) def exceptions_csv(self, csv_writer): csv_writer.writerow(self.exceptions_columns) self._exceptions_data_rows(csv_writer) def _exceptions_data_rows(self, csv_writer): for exc in self.environment.runner.exceptions.values(): csv_writer.writerow( [ exc["count"], exc["msg"], exc["traceback"], ", ".join(exc["nodes"]) ] ) class StatsCSVFileWriter(StatsCSV): """Write statistics to to CSV files""" def __init__(self, environment, percentiles_to_report, base_filepath, full_history=False): super().__init__(environment, percentiles_to_report) self.base_filepath = base_filepath self.full_history = full_history self.requests_csv_filehandle = open(self.base_filepath + "_stats.csv", "w") self.requests_csv_writer = csv.writer(self.requests_csv_filehandle) self.stats_history_csv_filehandle = open(self.stats_history_file_name(), "w") self.stats_history_csv_writer = csv.writer(self.stats_history_csv_filehandle) self.failures_csv_filehandle = open(self.base_filepath + "_failures.csv", "w") self.failures_csv_writer = csv.writer(self.failures_csv_filehandle) self.failures_csv_data_start = 0 self.exceptions_csv_filehandle = open(self.base_filepath + "_exceptions.csv", "w") self.exceptions_csv_writer = csv.writer(self.exceptions_csv_filehandle) self.exceptions_csv_data_start = 0 self.stats_history_csv_columns = [ "Timestamp", "User Count", "Type", "Name", "Requests/s", "Failures/s", *get_readable_percentiles(self.percentiles_to_report), "Total Request Count", "Total Failure Count", "Total Median Response Time", "Total Average Response Time", "Total Min Response Time", "Total Max Response Time", "Total Average Content Size", ] def __call__(self): self.stats_writer() def stats_writer(self): """Writes all the csv files for the locust run.""" # Write header row for all files and save position for non-append files self.requests_csv_writer.writerow(self.requests_csv_columns) requests_csv_data_start = self.requests_csv_filehandle.tell() self.stats_history_csv_writer.writerow(self.stats_history_csv_columns) self.failures_csv_writer.writerow(self.failures_columns) self.failures_csv_data_start = self.failures_csv_filehandle.tell() self.exceptions_csv_writer.writerow(self.exceptions_columns) self.exceptions_csv_data_start = self.exceptions_csv_filehandle.tell() # Continuously write date rows for all files last_flush_time = 0 while True: now = time.time() self.requests_csv_filehandle.seek(requests_csv_data_start) self._requests_data_rows(self.requests_csv_writer) self.requests_csv_filehandle.truncate() self._stats_history_data_rows(self.stats_history_csv_writer, now) self.failures_csv_filehandle.seek(self.failures_csv_data_start) self._failures_data_rows(self.failures_csv_writer) self.failures_csv_filehandle.truncate() self.exceptions_csv_filehandle.seek((self.exceptions_csv_data_start)) self._exceptions_data_rows(self.exceptions_csv_writer) self.exceptions_csv_filehandle.truncate() if now - last_flush_time > CSV_STATS_FLUSH_INTERVAL_SEC: self.requests_flush() self.stats_history_flush() self.failures_flush() self.exceptions_flush() last_flush_time = now gevent.sleep(CSV_STATS_INTERVAL_SEC) def _stats_history_data_rows(self, csv_writer, now): """ Write CSV rows with the *current* stats. By default only includes the Aggregated stats entry, but if self.full_history is set to True, a row for each entry will will be included. Note that this method differs from the other methods as it appends time-stamped data to the file, whereas the other methods overwrites the data. """ stats = self.environment.stats timestamp = int(now) stats_entries = [] if self.full_history: stats_entries = sort_stats(stats.entries) for stats_entry in chain(stats_entries, [stats.total]): csv_writer.writerow( chain( ( timestamp, self.environment.runner.user_count, stats_entry.method or "", stats_entry.name, f"{stats_entry.current_rps:2f}", f"{stats_entry.current_fail_per_sec:2f}", ), self._percentile_fields(stats_entry), ( stats_entry.num_requests, stats_entry.num_failures, stats_entry.median_response_time, stats_entry.avg_response_time, stats_entry.min_response_time or 0, stats_entry.max_response_time, stats_entry.avg_content_length, ), ) ) def requests_flush(self): self.requests_csv_filehandle.flush() def stats_history_flush(self): self.stats_history_csv_filehandle.flush() def failures_flush(self): self.failures_csv_filehandle.flush() def exceptions_flush(self): self.exceptions_csv_filehandle.flush() def close_files(self): self.requests_csv_filehandle.close() self.stats_history_csv_filehandle.close() self.failures_csv_filehandle.close() self.exceptions_csv_filehandle.close() def stats_history_file_name(self): return self.base_filepath + "_stats_history.csv"
return 0
06-args.py
from sys import argv
print "This script is called: ", script print "The first variable is: ", first print "The second variable is: ", second
script, first, second = argv
netcompare.py
#!/usr/bin/env python # Copyright 2015 Criteo. All rights reserved. # # The contents of this file are licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import re
import argparse import yaml from ciscoconfparse import CiscoConfParse def cli_parser(argv=None): parser = argparse.ArgumentParser( description='Generating configuration commands by finding differences' ' between two Cisco IOS style configuration files') parser.add_argument('--origin', metavar='origin', type=str, help='Origin configuration file') parser.add_argument('--target', metavar='target', type=str, help='Target configuration file') parser.add_argument('--vendor', help='Vendor or OS definition', type=str, metavar='vendor') parser.add_argument('--config', metavar='config', type=str, help='config file name', default='etc/netcompare.yml') return parser.parse_args(argv) def clean_line(line, vendor): cleaned_lines = [] if vendor == 'tmsh': # Remove text after a # (Because CiscoConfParse crash if there is a # bracket in a comment remove_comment = re.search('(?P<before_comment>[^\#]*)\#', line) if remove_comment: line = remove_comment.group('before_comment') # match " begin } end" tmsh_curly_bracket_left = re.search( '^(?P<space>\s*)(?P<begin>.*)' '(?P<bracket>[\}\{])(?' 'P<end>[^\}\{]*)$', line) if tmsh_curly_bracket_left: # replace # " begin } end" # by # " begin } # end cleaned_lines = clean_line(tmsh_curly_bracket_left. group('begin'), vendor) cleaned_lines.append(tmsh_curly_bracket_left.group('bracket')) cleaned_lines.append(tmsh_curly_bracket_left.group('end'). rstrip(' \t\r\n\0')) else: cleaned_lines.append(line.rstrip(' \t\r\n\0')) else: cleaned_lines.append(line.rstrip(' \t\r\n\0')) return cleaned_lines def clean_file(file, vendor, config): with open(file) as file_opened: list = file_opened.readlines() list_clean = [] try: config[vendor]['dont_compare'] for line in list: for dont_compare in config[vendor]['dont_compare']: if dont_compare in line: break else: list_clean = (list_clean + clean_line(line, vendor)) return list_clean except: for line in list: list_clean = (list_clean + clean_line(line, vendor)) return list_clean def get_one_line(line, vendor, config): if line[0] == 'NO': line_text_no = re.match("^(\s*)" + config[vendor]['no_command'] + " (.*)", line[1]) if line_text_no: cmd = (line_text_no.group(1) + line_text_no.group(2)) else: line_text_without_no = re.match("^(\s*)(.*)", line[1]) cmd = (line_text_without_no.group(1) + config[vendor]['no_command'] + " " + line_text_without_no.group(2)) return cmd else: return line[1] def get_diff_lines(d, vendor, config, depth=0): result = [] for k, v in sorted(d.items(), key=lambda x: x[0]): result.append(get_one_line(k, vendor, config)) result.extend(get_diff_lines(v, vendor, config, depth+1)) return result def netcompare(origin, target, vendor, config): origin_file = CiscoConfParse(origin, comment=config[vendor] ['CiscoConfParse_comment'], syntax=config[vendor] ['CiscoConfParse_syntax'], factory=False) target_file = CiscoConfParse(target, comment=config[vendor] ['CiscoConfParse_comment'], syntax=config[vendor] ['CiscoConfParse_syntax'], factory=False) result = {} for line_origin in origin_file.objs: eq_lines = (target_file.find_objects( '^' + re.escape(line_origin.text) + '$')) for line_target in eq_lines: if line_origin.geneology_text == line_target.geneology_text: break else: # Delete needed pointer = result index = len(line_origin.geneology_text) for cmd in line_origin.geneology_text: index = index - 1 if ('NO', cmd) in pointer: break if ('_CR', cmd) in pointer: pointer = pointer.get(('_CR', cmd)) elif index == 0: pointer[('NO', cmd)] = {} pointer = pointer.get(('NO', cmd)) else: pointer[('_CR', cmd)] = {} pointer = pointer.get(('_CR', cmd)) for line_target in target_file.objs: find = 0 eq_lines = (origin_file.find_objects( '^' + re.escape(line_target.text) + '$')) for line_origin in eq_lines: if line_origin.geneology_text == line_target.geneology_text: find = 1 if find == 0: # Create needed pointer = result for cmd in line_target.geneology_text: if not ('_CR', cmd) in pointer: pointer[('_CR', cmd)] = {} pointer = pointer.get(('_CR', cmd)) return result def main(argv=None): args = cli_parser(argv) with open(args.config, 'r') as f: config = yaml.load(f) origin_list = clean_file(args.origin, args.vendor, config) target_list = clean_file(args.target, args.vendor, config) display_commands = netcompare(origin_list, target_list, args.vendor, config) result = get_diff_lines(display_commands, args.vendor, config) for line in result: print line if __name__ == '__main__': main()
clientset_generated.go
/* Copyright 2021 TriggerMesh Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by client-gen. DO NOT EDIT. package fake import ( clientset "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset" extensionsv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/extensions/v1alpha1" fakeextensionsv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/extensions/v1alpha1/fake" flowv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/flow/v1alpha1" fakeflowv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/flow/v1alpha1/fake" routingv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/routing/v1alpha1" fakeroutingv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/routing/v1alpha1/fake" sourcesv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/sources/v1alpha1" fakesourcesv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/sources/v1alpha1/fake" targetsv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/targets/v1alpha1" faketargetsv1alpha1 "github.com/triggermesh/triggermesh/pkg/client/generated/clientset/internalclientset/typed/targets/v1alpha1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/testing" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, // without applying any validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { if err := o.Add(obj); err != nil { panic(err) } } cs := &Clientset{tracker: o} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { gvr := action.GetResource() ns := action.GetNamespace() watch, err := o.Watch(gvr, ns) if err != nil
return true, watch, nil }) return cs } // Clientset implements clientset.Interface. Meant to be embedded into a // struct to get a default implementation. This makes faking out just the method // you want to test easier. type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } var ( _ clientset.Interface = &Clientset{} _ testing.FakeClient = &Clientset{} ) // ExtensionsV1alpha1 retrieves the ExtensionsV1alpha1Client func (c *Clientset) ExtensionsV1alpha1() extensionsv1alpha1.ExtensionsV1alpha1Interface { return &fakeextensionsv1alpha1.FakeExtensionsV1alpha1{Fake: &c.Fake} } // FlowV1alpha1 retrieves the FlowV1alpha1Client func (c *Clientset) FlowV1alpha1() flowv1alpha1.FlowV1alpha1Interface { return &fakeflowv1alpha1.FakeFlowV1alpha1{Fake: &c.Fake} } // RoutingV1alpha1 retrieves the RoutingV1alpha1Client func (c *Clientset) RoutingV1alpha1() routingv1alpha1.RoutingV1alpha1Interface { return &fakeroutingv1alpha1.FakeRoutingV1alpha1{Fake: &c.Fake} } // SourcesV1alpha1 retrieves the SourcesV1alpha1Client func (c *Clientset) SourcesV1alpha1() sourcesv1alpha1.SourcesV1alpha1Interface { return &fakesourcesv1alpha1.FakeSourcesV1alpha1{Fake: &c.Fake} } // TargetsV1alpha1 retrieves the TargetsV1alpha1Client func (c *Clientset) TargetsV1alpha1() targetsv1alpha1.TargetsV1alpha1Interface { return &faketargetsv1alpha1.FakeTargetsV1alpha1{Fake: &c.Fake} }
{ return false, nil, err }
query.go
// Copyright (c) 2018 Northwestern Mutual. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package manager import ( "github.com/melonwool/grammes/gremconnect" "github.com/melonwool/grammes/gremerror" "github.com/melonwool/grammes/logging" "github.com/melonwool/grammes/query" ) // Query handles the querying actions to the server. type queryManager struct { dialer gremconnect.Dialer logger logging.Logger executeRequest executor } // NewQueryManager returns a new Query Manager that // implements the QueryManager interface. func newQueryManager(dialer gremconnect.Dialer, logger logging.Logger, executor executor) *queryManager
func (m *queryManager) setLogger(newLogger logging.Logger) { m.logger = newLogger } // ExecuteQuery takes a query object to form a // request to the gremlin server after turning it // into a string. func (m *queryManager) ExecuteQuery(query query.Query) ([][]byte, error) { return m.ExecuteBoundStringQuery(query.String(), map[string]string{}, map[string]string{}) } // ExecuteStringQuery takes a string query and // uses it to make a request to the gremlin server. func (m *queryManager) ExecuteStringQuery(query string) ([][]byte, error) { return m.ExecuteBoundStringQuery(query, map[string]string{}, map[string]string{}) } // Query Bindings: // https://www.codeigniter.com/userguide3/database/queries.html#query-bindings // ExecuteBoundQuery takes a query object and bindings to allow // for simplified queries to the gremlin server. func (m *queryManager) ExecuteBoundQuery(query query.Query, bindings, rebindings map[string]string) ([][]byte, error) { return m.ExecuteBoundStringQuery(query.String(), bindings, rebindings) } // ExecuteBoundStringQuery uses bindings and rebindings to allow // for simplified queries to the gremlin server. func (m *queryManager) ExecuteBoundStringQuery(query string, bindings, rebindings map[string]string) ([][]byte, error) { if m.dialer.IsDisposed() { return nil, gremerror.ErrDisposedConnection } // log the command that will be executed. m.logger.PrintQuery(query) return m.executeRequest(query, bindings, rebindings) }
{ return &queryManager{ dialer: dialer, logger: logger, executeRequest: executor, } }
clientset_generated.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" fakediscovery "k8s.io/client-go/discovery/fake" clientset "k8s.io/client-go/kubernetes" admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" fakeadmissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" fakeappsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1/fake" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" fakeappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2/fake" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" fakeauthenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1/fake" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" fakeauthenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1/fake" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" fakeauthorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1/fake" authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" fakeauthorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1/fake" autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" fakeautoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1/fake" autoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1" fakeautoscalingv2alpha1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2alpha1/fake" batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" fakebatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1/fake" batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" fakebatchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1/fake" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" fakecertificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1/fake" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" fakecorev1 "k8s.io/client-go/kubernetes/typed/core/v1/fake" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" fakeextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1/fake" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" fakepolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" fakerbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/fake" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" fakerbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1/fake" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" fakeschedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/fake" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" fakesettingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1/fake" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" fakestoragev1 "k8s.io/client-go/kubernetes/typed/storage/v1/fake" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" fakestoragev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake" "k8s.io/client-go/testing" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, // without applying any validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { if err := o.Add(obj); err != nil { panic(err) } } fakePtr := testing.Fake{} fakePtr.AddReactor("*", "*", testing.ObjectReaction(o)) fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil)) return &Clientset{fakePtr} } // Clientset implements clientset.Interface. Meant to be embedded into a // struct to get a default implementation. This makes faking out just the method // you want to test easier. type Clientset struct { testing.Fake } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return &fakediscovery.FakeDiscovery{Fake: &c.Fake} } var _ clientset.Interface = &Clientset{} // AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { return &fakeadmissionregistrationv1alpha1.FakeAdmissionregistrationV1alpha1{Fake: &c.Fake} } // Admissionregistration retrieves the AdmissionregistrationV1alpha1Client func (c *Clientset) Admissionregistration() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { return &fakeadmissionregistrationv1alpha1.FakeAdmissionregistrationV1alpha1{Fake: &c.Fake} } // AppsV1beta1 retrieves the AppsV1beta1Client func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { return &fakeappsv1beta1.FakeAppsV1beta1{Fake: &c.Fake} } // AppsV1beta2 retrieves the AppsV1beta2Client func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { return &fakeappsv1beta2.FakeAppsV1beta2{Fake: &c.Fake} } // Apps retrieves the AppsV1beta2Client func (c *Clientset) Apps() appsv1beta2.AppsV1beta2Interface { return &fakeappsv1beta2.FakeAppsV1beta2{Fake: &c.Fake} } // AuthenticationV1 retrieves the AuthenticationV1Client func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} } // Authentication retrieves the AuthenticationV1Client func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { return &fakeauthenticationv1.FakeAuthenticationV1{Fake: &c.Fake} } // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return &fakeauthenticationv1beta1.FakeAuthenticationV1beta1{Fake: &c.Fake} } // AuthorizationV1 retrieves the AuthorizationV1Client func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} } // Authorization retrieves the AuthorizationV1Client func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { return &fakeauthorizationv1.FakeAuthorizationV1{Fake: &c.Fake} } // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return &fakeauthorizationv1beta1.FakeAuthorizationV1beta1{Fake: &c.Fake} } // AutoscalingV1 retrieves the AutoscalingV1Client func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} } // Autoscaling retrieves the AutoscalingV1Client func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { return &fakeautoscalingv1.FakeAutoscalingV1{Fake: &c.Fake} } // AutoscalingV2alpha1 retrieves the AutoscalingV2alpha1Client func (c *Clientset) AutoscalingV2alpha1() autoscalingv2alpha1.AutoscalingV2alpha1Interface { return &fakeautoscalingv2alpha1.FakeAutoscalingV2alpha1{Fake: &c.Fake} } // BatchV1 retrieves the BatchV1Client func (c *Clientset) BatchV1() batchv1.BatchV1Interface { return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} } // Batch retrieves the BatchV1Client func (c *Clientset) Batch() batchv1.BatchV1Interface { return &fakebatchv1.FakeBatchV1{Fake: &c.Fake} } // BatchV2alpha1 retrieves the BatchV2alpha1Client func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { return &fakebatchv2alpha1.FakeBatchV2alpha1{Fake: &c.Fake} } // CertificatesV1beta1 retrieves the CertificatesV1beta1Client func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} } // Certificates retrieves the CertificatesV1beta1Client func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { return &fakecertificatesv1beta1.FakeCertificatesV1beta1{Fake: &c.Fake} } // CoreV1 retrieves the CoreV1Client func (c *Clientset) CoreV1() corev1.CoreV1Interface { return &fakecorev1.FakeCoreV1{Fake: &c.Fake}
return &fakecorev1.FakeCoreV1{Fake: &c.Fake} } // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} } // Extensions retrieves the ExtensionsV1beta1Client func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { return &fakeextensionsv1beta1.FakeExtensionsV1beta1{Fake: &c.Fake} } // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} } // Networking retrieves the NetworkingV1Client func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} } // PolicyV1beta1 retrieves the PolicyV1beta1Client func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} } // Policy retrieves the PolicyV1beta1Client func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { return &fakepolicyv1beta1.FakePolicyV1beta1{Fake: &c.Fake} } // RbacV1beta1 retrieves the RbacV1beta1Client func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} } // Rbac retrieves the RbacV1beta1Client func (c *Clientset) Rbac() rbacv1beta1.RbacV1beta1Interface { return &fakerbacv1beta1.FakeRbacV1beta1{Fake: &c.Fake} } // RbacV1alpha1 retrieves the RbacV1alpha1Client func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { return &fakerbacv1alpha1.FakeRbacV1alpha1{Fake: &c.Fake} } // SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} } // Scheduling retrieves the SchedulingV1alpha1Client func (c *Clientset) Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface { return &fakeschedulingv1alpha1.FakeSchedulingV1alpha1{Fake: &c.Fake} } // SettingsV1alpha1 retrieves the SettingsV1alpha1Client func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} } // Settings retrieves the SettingsV1alpha1Client func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { return &fakesettingsv1alpha1.FakeSettingsV1alpha1{Fake: &c.Fake} } // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return &fakestoragev1beta1.FakeStorageV1beta1{Fake: &c.Fake} } // StorageV1 retrieves the StorageV1Client func (c *Clientset) StorageV1() storagev1.StorageV1Interface { return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} } // Storage retrieves the StorageV1Client func (c *Clientset) Storage() storagev1.StorageV1Interface { return &fakestoragev1.FakeStorageV1{Fake: &c.Fake} }
} // Core retrieves the CoreV1Client func (c *Clientset) Core() corev1.CoreV1Interface {
check_match.rs
use super::_match::{MatchCheckCtxt, Matrix, expand_pattern, is_useful}; use super::_match::Usefulness::*; use super::_match::WitnessPreference::*; use super::{Pattern, PatternContext, PatternError, PatternKind}; use rustc::middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor}; use rustc::middle::expr_use_visitor::{LoanCause, MutateMode}; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization::cmt_; use rustc::middle::region; use rustc::session::Session; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::subst::Substs; use rustc::lint; use rustc_errors::{Applicability, DiagnosticBuilder}; use rustc::util::common::ErrorReported; use rustc::hir::def::*; use rustc::hir::def_id::DefId; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::hir::{self, Pat, PatKind}; use smallvec::smallvec; use std::slice; use syntax::ast; use syntax::ptr::P; use syntax_pos::{Span, DUMMY_SP, MultiSpan}; struct OuterVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx> } impl<'a, 'tcx> Visitor<'tcx> for OuterVisitor<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::OnlyBodies(&self.tcx.hir()) } fn visit_body(&mut self, body: &'tcx hir::Body) { intravisit::walk_body(self, body); let def_id = self.tcx.hir().body_owner_def_id(body.id()); let _ = self.tcx.check_match(def_id); } } pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { tcx.hir().krate().visit_all_item_likes(&mut OuterVisitor { tcx }.as_deep_visitor()); tcx.sess.abort_if_errors(); } pub(crate) fn check_match<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, ) -> Result<(), ErrorReported> { let body_id = if let Some(id) = tcx.hir().as_local_node_id(def_id) { tcx.hir().body_owned_by(id) } else { return Ok(()); }; tcx.sess.track_errors(|| { MatchVisitor { tcx, tables: tcx.body_tables(body_id), region_scope_tree: &tcx.region_scope_tree(def_id), param_env: tcx.param_env(def_id), identity_substs: Substs::identity_for_item(tcx, def_id), }.visit_body(tcx.hir().body(body_id)); }) } fn create_e0004<'a>(sess: &'a Session, sp: Span, error_message: String) -> DiagnosticBuilder<'a> { struct_span_err!(sess, sp, E0004, "{}", &error_message) } struct MatchVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: &'a ty::TypeckTables<'tcx>, param_env: ty::ParamEnv<'tcx>, identity_substs: &'tcx Substs<'tcx>, region_scope_tree: &'a region::ScopeTree, } impl<'a, 'tcx> Visitor<'tcx> for MatchVisitor<'a, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { NestedVisitorMap::None } fn visit_expr(&mut self, ex: &'tcx hir::Expr) { intravisit::walk_expr(self, ex); match ex.node { hir::ExprKind::Match(ref scrut, ref arms, source) => { self.check_match(scrut, arms, source); } _ => {} } } fn visit_local(&mut self, loc: &'tcx hir::Local) { intravisit::walk_local(self, loc); self.check_irrefutable(&loc.pat, match loc.source { hir::LocalSource::Normal => "local binding", hir::LocalSource::ForLoopDesugar => "`for` loop binding", }); // Check legality of move bindings and `@` patterns. self.check_patterns(false, slice::from_ref(&loc.pat)); } fn visit_body(&mut self, body: &'tcx hir::Body) { intravisit::walk_body(self, body); for arg in &body.arguments { self.check_irrefutable(&arg.pat, "function argument"); self.check_patterns(false, slice::from_ref(&arg.pat)); } } } impl<'a, 'tcx> PatternContext<'a, 'tcx> { fn report_inlining_errors(&self, pat_span: Span) { for error in &self.errors { match *error { PatternError::StaticInPattern(span) => { self.span_e0158(span, "statics cannot be referenced in patterns") } PatternError::AssociatedConstInPattern(span) => { self.span_e0158(span, "associated consts cannot be referenced in patterns") } PatternError::FloatBug => { // FIXME(#31407) this is only necessary because float parsing is buggy ::rustc::mir::interpret::struct_error( self.tcx.at(pat_span), "could not evaluate float literal (see issue #31407)", ).emit(); } PatternError::NonConstPath(span) => { ::rustc::mir::interpret::struct_error( self.tcx.at(span), "runtime values cannot be referenced in patterns", ).emit(); } } } } fn span_e0158(&self, span: Span, text: &str) { span_err!(self.tcx.sess, span, E0158, "{}", text) } } impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { fn check_patterns(&self, has_guard: bool, pats: &[P<Pat>]) { check_legality_of_move_bindings(self, has_guard, pats); for pat in pats { check_legality_of_bindings_in_at_patterns(self, pat); } } fn check_match( &self, scrut: &hir::Expr, arms: &'tcx [hir::Arm], source: hir::MatchSource) { for arm in arms { // First, check legality of move bindings. self.check_patterns(arm.guard.is_some(), &arm.pats); // Second, if there is a guard on each arm, make sure it isn't // assigning or borrowing anything mutably. if let Some(ref guard) = arm.guard { if self.tcx.check_for_mutation_in_guard_via_ast_walk() { check_for_mutation_in_guard(self, &guard); } } // Third, perform some lints. for pat in &arm.pats { check_for_bindings_named_same_as_variants(self, pat); } } let module = self.tcx.hir().get_module_parent(scrut.id); MatchCheckCtxt::create_and_enter(self.tcx, self.param_env, module, |ref mut cx| { let mut have_errors = false; let inlined_arms : Vec<(Vec<_>, _)> = arms.iter().map(|arm| ( arm.pats.iter().map(|pat| { let mut patcx = PatternContext::new(self.tcx, self.param_env.and(self.identity_substs), self.tables); let pattern = expand_pattern(cx, patcx.lower_pattern(&pat)); if !patcx.errors.is_empty() { patcx.report_inlining_errors(pat.span); have_errors = true; } (pattern, &**pat) }).collect(), arm.guard.as_ref().map(|g| match g { hir::Guard::If(ref e) => &**e, }) )).collect(); // Bail out early if inlining failed. if have_errors { return; } // Fourth, check for unreachable arms. check_arms(cx, &inlined_arms, source); // Then, if the match has no arms, check whether the scrutinee // is uninhabited. let pat_ty = self.tables.node_id_to_type(scrut.hir_id); let module = self.tcx.hir().get_module_parent(scrut.id); if inlined_arms.is_empty() { let scrutinee_is_uninhabited = if self.tcx.features().exhaustive_patterns { self.tcx.is_ty_uninhabited_from(module, pat_ty) } else { match pat_ty.sty { ty::Never => true, ty::Adt(def, _) => def.variants.is_empty(), _ => false } }; if !scrutinee_is_uninhabited { // We know the type is inhabited, so this must be wrong let mut err = create_e0004(self.tcx.sess, scrut.span, format!("non-exhaustive patterns: type `{}` \ is non-empty", pat_ty)); span_help!(&mut err, scrut.span, "ensure that all possible cases are being handled, \ possibly by adding wildcards or more match arms"); err.emit(); } // If the type *is* uninhabited, it's vacuously exhaustive return; } let matrix: Matrix = inlined_arms .iter() .filter(|&&(_, guard)| guard.is_none()) .flat_map(|arm| &arm.0) .map(|pat| smallvec![pat.0]) .collect(); let scrut_ty = self.tables.node_id_to_type(scrut.hir_id); check_exhaustive(cx, scrut_ty, scrut.span, &matrix); }) } fn check_irrefutable(&self, pat: &'tcx Pat, origin: &str) { let module = self.tcx.hir().get_module_parent(pat.id); MatchCheckCtxt::create_and_enter(self.tcx, self.param_env, module, |ref mut cx| { let mut patcx = PatternContext::new(self.tcx, self.param_env.and(self.identity_substs), self.tables); let pattern = patcx.lower_pattern(pat); let pattern_ty = pattern.ty; let pats: Matrix = vec![smallvec![ expand_pattern(cx, pattern) ]].into_iter().collect(); let wild_pattern = Pattern { ty: pattern_ty, span: DUMMY_SP, kind: box PatternKind::Wild, }; let witness = match is_useful(cx, &pats, &[&wild_pattern], ConstructWitness) { UsefulWithWitness(witness) => witness, NotUseful => return, Useful => bug!() }; let pattern_string = witness[0].single_pattern().to_string(); let mut diag = struct_span_err!( self.tcx.sess, pat.span, E0005, "refutable pattern in {}: `{}` not covered", origin, pattern_string ); let label_msg = match pat.node { PatKind::Path(hir::QPath::Resolved(None, ref path)) if path.segments.len() == 1 && path.segments[0].args.is_none() => { format!("interpreted as {} {} pattern, not new variable", path.def.article(), path.def.kind_name()) } _ => format!("pattern `{}` not covered", pattern_string), }; diag.span_label(pat.span, label_msg); diag.emit(); }); } } fn check_for_bindings_named_same_as_variants(cx: &MatchVisitor, pat: &Pat) { pat.walk(|p| { if let PatKind::Binding(_, _, ident, None) = p.node { if let Some(&bm) = cx.tables.pat_binding_modes().get(p.hir_id) { if bm != ty::BindByValue(hir::MutImmutable) { // Nothing to check. return true; } let pat_ty = cx.tables.pat_ty(p); if let ty::Adt(edef, _) = pat_ty.sty { if edef.is_enum() && edef.variants.iter().any(|variant| { variant.ident == ident && variant.ctor_kind == CtorKind::Const }) { let ty_path = cx.tcx.item_path_str(edef.did); let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170, "pattern binding `{}` is named the same as one \ of the variants of the type `{}`", ident, ty_path); err.span_suggestion_with_applicability( p.span, "to match on the variant, qualify the path", format!("{}::{}", ty_path, ident), Applicability::MachineApplicable ); err.emit(); } } } else { cx.tcx.sess.delay_span_bug(p.span, "missing binding mode"); } } true }); } /// Checks for common cases of "catchall" patterns that may not be intended as such. fn pat_is_catchall(pat: &Pat) -> bool { match pat.node { PatKind::Binding(.., None) => true, PatKind::Binding(.., Some(ref s)) => pat_is_catchall(s), PatKind::Ref(ref s, _) => pat_is_catchall(s), PatKind::Tuple(ref v, _) => v.iter().all(|p| { pat_is_catchall(&p) }), _ => false } } // Check for unreachable patterns fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, arms: &[(Vec<(&'a Pattern<'tcx>, &hir::Pat)>, Option<&hir::Expr>)], source: hir::MatchSource) { let mut seen = Matrix::empty(); let mut catchall = None; let mut printed_if_let_err = false; for (arm_index, &(ref pats, guard)) in arms.iter().enumerate() { for &(pat, hir_pat) in pats { let v = smallvec![pat]; match is_useful(cx, &seen, &v, LeaveOutWitness) { NotUseful => { match source { hir::MatchSource::IfLetDesugar { .. } => { if cx.tcx.features().irrefutable_let_patterns { cx.tcx.lint_node( lint::builtin::IRREFUTABLE_LET_PATTERNS, hir_pat.id, pat.span, "irrefutable if-let pattern"); } else { if printed_if_let_err { // we already printed an irrefutable if-let pattern error. // We don't want two, that's just confusing. } else { // find the first arm pattern so we can use its span let &(ref first_arm_pats, _) = &arms[0]; let first_pat = &first_arm_pats[0]; let span = first_pat.0.span; struct_span_err!(cx.tcx.sess, span, E0162, "irrefutable if-let pattern") .span_label(span, "irrefutable pattern") .emit(); printed_if_let_err = true; } } } hir::MatchSource::WhileLetDesugar => { // check which arm we're on. match arm_index { // The arm with the user-specified pattern. 0 => { cx.tcx.lint_node( lint::builtin::UNREACHABLE_PATTERNS, hir_pat.id, pat.span, "unreachable pattern"); }, // The arm with the wildcard pattern. 1 => { if cx.tcx.features().irrefutable_let_patterns { cx.tcx.lint_node( lint::builtin::IRREFUTABLE_LET_PATTERNS, hir_pat.id, pat.span, "irrefutable while-let pattern"); } else { // find the first arm pattern so we can use its span let &(ref first_arm_pats, _) = &arms[0]; let first_pat = &first_arm_pats[0]; let span = first_pat.0.span; struct_span_err!(cx.tcx.sess, span, E0165, "irrefutable while-let pattern") .span_label(span, "irrefutable pattern") .emit(); } }, _ => bug!(), } }, hir::MatchSource::ForLoopDesugar | hir::MatchSource::Normal => { let mut err = cx.tcx.struct_span_lint_node( lint::builtin::UNREACHABLE_PATTERNS, hir_pat.id, pat.span, "unreachable pattern", ); // if we had a catchall pattern, hint at that if let Some(catchall) = catchall { err.span_label(pat.span, "unreachable pattern"); err.span_label(catchall, "matches any value"); } err.emit(); }, // Unreachable patterns in try expressions occur when one of the arms // are an uninhabited type. Which is OK. hir::MatchSource::TryDesugar => {} } } Useful => (), UsefulWithWitness(_) => bug!() } if guard.is_none() { seen.push(v); if catchall.is_none() && pat_is_catchall(hir_pat) { catchall = Some(pat.span); } } } } } fn check_exhaustive<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, scrut_ty: Ty<'tcx>, sp: Span, matrix: &Matrix<'p, 'tcx>) { let wild_pattern = Pattern { ty: scrut_ty, span: DUMMY_SP, kind: box PatternKind::Wild, }; match is_useful(cx, matrix, &[&wild_pattern], ConstructWitness) { UsefulWithWitness(pats) => { let witnesses = if pats.is_empty() { vec![&wild_pattern] } else { pats.iter().map(|w| w.single_pattern()).collect() }; const LIMIT: usize = 3; let joined_patterns = match witnesses.len() { 0 => bug!(), 1 => format!("`{}`", witnesses[0]), 2..=LIMIT => { let (tail, head) = witnesses.split_last().unwrap(); let head: Vec<_> = head.iter().map(|w| w.to_string()).collect(); format!("`{}` and `{}`", head.join("`, `"), tail) }, _ => { let (head, tail) = witnesses.split_at(LIMIT); let head: Vec<_> = head.iter().map(|w| w.to_string()).collect(); format!("`{}` and {} more", head.join("`, `"), tail.len()) } }; let label_text = match witnesses.len() { 1 => format!("pattern {} not covered", joined_patterns), _ => format!("patterns {} not covered", joined_patterns) }; create_e0004(cx.tcx.sess, sp, format!("non-exhaustive patterns: {} not covered", joined_patterns)) .span_label(sp, label_text) .emit(); } NotUseful => { // This is good, wildcard pattern isn't reachable }, _ => bug!() } } // Legality of move bindings checking fn check_legality_of_move_bindings(cx: &MatchVisitor, has_guard: bool, pats: &[P<Pat>]) { let mut by_ref_span = None; for pat in pats { pat.each_binding(|_, hir_id, span, _path| { if let Some(&bm) = cx.tables.pat_binding_modes().get(hir_id) { if let ty::BindByReference(..) = bm { by_ref_span = Some(span); } } else { cx.tcx.sess.delay_span_bug(pat.span, "missing binding mode"); } }) } let span_vec = &mut Vec::new(); let check_move = |p: &Pat, sub: Option<&Pat>, span_vec: &mut Vec<Span>| { // check legality of moving out of the enum // x @ Foo(..) is legal, but x @ Foo(y) isn't. if sub.map_or(false, |p| p.contains_bindings()) { struct_span_err!(cx.tcx.sess, p.span, E0007, "cannot bind by-move with sub-bindings") .span_label(p.span, "binds an already bound by-move value by moving it") .emit(); } else if has_guard && !cx.tcx.allow_bind_by_move_patterns_with_guards() { let mut err = struct_span_err!(cx.tcx.sess, p.span, E0008, "cannot bind by-move into a pattern guard"); err.span_label(p.span, "moves value into pattern guard"); if cx.tcx.sess.opts.unstable_features.is_nightly_build() && cx.tcx.use_mir_borrowck() { err.help("add #![feature(bind_by_move_pattern_guards)] to the \ crate attributes to enable"); } err.emit(); } else if let Some(_by_ref_span) = by_ref_span { span_vec.push(p.span); } }; for pat in pats { pat.walk(|p| { if let PatKind::Binding(_, _, _, ref sub) = p.node { if let Some(&bm) = cx.tables.pat_binding_modes().get(p.hir_id) { match bm { ty::BindByValue(..) => { let pat_ty = cx.tables.node_id_to_type(p.hir_id); if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { check_move(p, sub.as_ref().map(|p| &**p), span_vec); } } _ => {} } } else { cx.tcx.sess.delay_span_bug(pat.span, "missing binding mode"); } } true }); } if !span_vec.is_empty(){ let span = MultiSpan::from_spans(span_vec.clone()); let mut err = struct_span_err!( cx.tcx.sess, span, E0009, "cannot bind by-move and by-ref in the same pattern", ); err.span_label(by_ref_span.unwrap(), "both by-ref and by-move used"); for span in span_vec.iter(){ err.span_label(*span, "by-move pattern here"); } err.emit(); } } /// Ensures that a pattern guard doesn't borrow by mutable reference or /// assign. /// /// FIXME: this should be done by borrowck. fn check_for_mutation_in_guard(cx: &MatchVisitor, guard: &hir::Guard) { let mut checker = MutationChecker { cx, }; match guard { hir::Guard::If(expr) => ExprUseVisitor::new(&mut checker, cx.tcx, cx.param_env, cx.region_scope_tree, cx.tables, None).walk_expr(expr), }; } struct MutationChecker<'a, 'tcx: 'a> { cx: &'a MatchVisitor<'a, 'tcx>, } impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> { fn matched_pat(&mut self, _: &Pat, _: &cmt_, _: euv::MatchMode) {} fn consume(&mut self, _: ast::NodeId, _: Span, _: &cmt_, _: ConsumeMode) {} fn consume_pat(&mut self, _: &Pat, _: &cmt_, _: ConsumeMode) {} fn borrow(&mut self, _: ast::NodeId, span: Span, _: &cmt_, _: ty::Region<'tcx>, kind:ty:: BorrowKind, _: LoanCause) { match kind { ty::MutBorrow => { let mut err = struct_span_err!(self.cx.tcx.sess, span, E0301, "cannot mutably borrow in a pattern guard"); err.span_label(span, "borrowed mutably in pattern guard"); if self.cx.tcx.sess.opts.unstable_features.is_nightly_build() && self.cx.tcx.use_mir_borrowck() { err.help("add #![feature(bind_by_move_pattern_guards)] to the \ crate attributes to enable"); } err.emit(); } ty::ImmBorrow | ty::UniqueImmBorrow => {} } } fn decl_without_init(&mut self, _: ast::NodeId, _: Span)
fn mutate(&mut self, _: ast::NodeId, span: Span, _: &cmt_, mode: MutateMode) { match mode { MutateMode::JustWrite | MutateMode::WriteAndRead => { struct_span_err!(self.cx.tcx.sess, span, E0302, "cannot assign in a pattern guard") .span_label(span, "assignment in pattern guard") .emit(); } MutateMode::Init => {} } } } /// Forbids bindings in `@` patterns. This is necessary for memory safety, /// because of the way rvalues are handled in the borrow check. (See issue /// #14587.) fn check_legality_of_bindings_in_at_patterns(cx: &MatchVisitor, pat: &Pat) { AtBindingPatternVisitor { cx: cx, bindings_allowed: true }.visit_pat(pat); } struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> { cx: &'a MatchVisitor<'b, 'tcx>, bindings_allowed: bool } impl<'a, 'b, 'tcx, 'v> Visitor<'v> for AtBindingPatternVisitor<'a, 'b, 'tcx> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { NestedVisitorMap::None } fn visit_pat(&mut self, pat: &Pat) { match pat.node { PatKind::Binding(.., ref subpat) => { if !self.bindings_allowed { struct_span_err!(self.cx.tcx.sess, pat.span, E0303, "pattern bindings are not allowed after an `@`") .span_label(pat.span, "not allowed after `@`") .emit(); } if subpat.is_some() { let bindings_were_allowed = self.bindings_allowed; self.bindings_allowed = false; intravisit::walk_pat(self, pat); self.bindings_allowed = bindings_were_allowed; } } _ => intravisit::walk_pat(self, pat), } } }
{}
db_getter.go
package getter import ( "errors" "fmt" "github.com/goharbor/harbor/src/lib/orm" "github.com/goharbor/harbor/src/pkg/joblog" "github.com/goharbor/harbor/src/jobservice/errs" ) // DBGetter is responsible for retrieving DB log data type DBGetter struct { } // NewDBGetter is constructor of DBGetter func
() *DBGetter { return &DBGetter{} } // Retrieve implements @Interface.Retrieve func (dbg *DBGetter) Retrieve(logID string) ([]byte, error) { if len(logID) == 0 { return nil, errors.New("empty log identify") } jobLog, err := joblog.Mgr.Get(orm.Context(), logID) if err != nil { // Other errors have been ignored by GetJobLog() return nil, errs.NoObjectFoundError(fmt.Sprintf("log entity: %s", logID)) } return []byte(jobLog.Content), nil }
NewDBGetter
updateHostsFile.py
#!/usr/bin/env python3 # Script by Ben Limmer # https://github.com/l1m5 # # This Python script will combine all the host files you provide # as sources into one, unique host file to keep your internet browsing happy. import argparse import fnmatch import json import locale import os import platform import re import shutil import socket import subprocess import sys import tempfile import time from glob import glob import lxml # noqa: F401 from bs4 import BeautifulSoup # Detecting Python 3 for version-dependent implementations PY3 = sys.version_info >= (3, 0) if PY3: from urllib.request import urlopen else: raise Exception("We do not support Python 2 anymore.") # Syntactic sugar for "sudo" command in UNIX / Linux if platform.system() == "OpenBSD": SUDO = ["/usr/bin/doas"] else: SUDO = ["/usr/bin/env", "sudo"] # Project Settings BASEDIR_PATH = os.path.dirname(os.path.realpath(__file__)) def get_defaults(): """ Helper method for getting the default settings. Returns ------- default_settings : dict A dictionary of the default settings when updating host information. """ return { "numberofrules": 0, "datapath": path_join_robust(BASEDIR_PATH, "data"), "freshen": False, "replace": False, "backup": False, "skipstatichosts": False, "keepdomaincomments": True, "extensionspath": path_join_robust(BASEDIR_PATH, "extensions"), "extensions": [], "compress": False, "minimise": False, "outputsubfolder": "", "hostfilename": "hosts", "targetip": "0.0.0.0", "sourcedatafilename": "update.json", "sourcesdata": [], "readmefilename": "readme.md", "readmetemplate": path_join_robust(BASEDIR_PATH, "readme_template.md"), "readmedata": {}, "readmedatafilename": path_join_robust(BASEDIR_PATH, "readmeData.json"), "exclusionpattern": r"([a-zA-Z\d-]+\.){0,}", "exclusionregexs": [], "exclusions": [], "commonexclusions": [], "blacklistfile": path_join_robust(BASEDIR_PATH, "blacklist"), "whitelistfile": path_join_robust(BASEDIR_PATH, "whitelist"), } # End Project Settings def main(): parser = argparse.ArgumentParser( description="Creates a unified hosts " "file from hosts stored in the data subfolders." ) parser.add_argument( "--auto", "-a", dest="auto", default=True, action="store_true", help="Run without prompting.", ) parser.add_argument( "--backup", "-b", dest="backup", default=False, action="store_true", help="Backup the hosts files before they are overridden.", ) parser.add_argument( "--extensions", "-e", dest="extensions", default=["fakenews", "gambling", "unconv"], nargs="*", help="Host extensions to include in the final hosts file.", ) parser.add_argument( "--ip", "-i", dest="targetip", default="0.0.0.0", help="Target IP address. Default is 0.0.0.0.", ) parser.add_argument( "--keepdomaincomments", "-k", dest="keepdomaincomments", action="store_false", default=True, help="Do not keep domain line comments.", ) parser.add_argument( "--noupdate", "-n", dest="noupdate", default=True, action="store_true", help="Don't update from host data sources.", ) parser.add_argument( "--skipstatichosts", "-s", dest="skipstatichosts", default=False, action="store_true", help="Skip static localhost entries in the final hosts file.", ) parser.add_argument( "--nogendata", "-g", dest="nogendata", default=True, action="store_true", help="Skip generation of readmeData.json", ) parser.add_argument( "--output", "-o", dest="outputsubfolder", default="", help="Output subfolder for generated hosts file.", ) parser.add_argument( "--replace", "-r", dest="replace", default=False, action="store_true", help="Replace your active hosts file with this new hosts file.", ) parser.add_argument( "--flush-dns-cache", "-f", dest="flushdnscache", default=False, action="store_true", help="Attempt to flush DNS cache after replacing the hosts file.", ) parser.add_argument( "--compress", "-c", dest="compress", default=False, action="store_true", help="Compress the hosts file ignoring non-necessary lines " "(empty lines and comments) and putting multiple domains in " "each line. Improve the performance under Windows.", ) parser.add_argument( "--minimise", "-m", dest="minimise", default=True, action="store_true", help="Minimise the hosts file ignoring non-necessary lines " "(empty lines and comments).", ) parser.add_argument( "--whitelist", "-w", dest="whitelistfile", default=path_join_robust(BASEDIR_PATH, "whitelist"), help="Whitelist file to use while generating hosts files.", ) parser.add_argument( "--blacklist", "-x", dest="blacklistfile", default=path_join_robust(BASEDIR_PATH, "blacklist"), help="Blacklist file to use while generating hosts files.", ) global settings options = vars(parser.parse_args()) options["outputpath"] = path_join_robust(BASEDIR_PATH, options["outputsubfolder"]) options["freshen"] = not options["noupdate"] settings = get_defaults() settings.update(options) data_path = settings["datapath"] extensions_path = settings["extensionspath"] settings["sources"] = list_dir_no_hidden(data_path) settings["extensionsources"] = list_dir_no_hidden(extensions_path) # All our extensions folders... settings["extensions"] = [ os.path.basename(item) for item in list_dir_no_hidden(extensions_path) ] # ... intersected with the extensions passed-in as arguments, then sorted. settings["extensions"] = sorted( list(set(options["extensions"]).intersection(settings["extensions"])) ) auto = settings["auto"] exclusion_regexes = settings["exclusionregexs"] source_data_filename = settings["sourcedatafilename"] update_sources = prompt_for_update(freshen=settings["freshen"], update_auto=auto) if update_sources: update_all_sources(source_data_filename, settings["hostfilename"]) gather_exclusions = False if gather_exclusions: common_exclusions = settings["commonexclusions"] exclusion_pattern = settings["exclusionpattern"] exclusion_regexes = display_exclusion_options( common_exclusions=common_exclusions, exclusion_pattern=exclusion_pattern, exclusion_regexes=exclusion_regexes, ) extensions = settings["extensions"] sources_data = update_sources_data( settings["sourcesdata"], datapath=data_path, extensions=extensions, extensionspath=extensions_path, sourcedatafilename=source_data_filename, ) merge_file = create_initial_file() remove_old_hosts_file( path_join_robust(settings["outputpath"], "hosts"), settings["backup"] ) if settings["compress"]: final_file = open(path_join_robust(settings["outputpath"], "hosts_unconv"), "w+b") compressed_file = tempfile.NamedTemporaryFile() remove_dups_and_excl(merge_file, exclusion_regexes, compressed_file) compress_file(compressed_file, settings["targetip"], final_file) elif settings["minimise"]: final_file = open(path_join_robust(settings["outputpath"], "hosts_unconv"), "w+b") minimised_file = tempfile.NamedTemporaryFile() remove_dups_and_excl(merge_file, exclusion_regexes, minimised_file) minimise_file(minimised_file, settings["targetip"], final_file) else: final_file = remove_dups_and_excl(merge_file, exclusion_regexes) number_of_rules = settings["numberofrules"] output_subfolder = settings["outputsubfolder"] skip_static_hosts = settings["skipstatichosts"] write_opening_header( final_file, extensions=extensions, numberofrules=number_of_rules, outputsubfolder=output_subfolder, skipstatichosts=skip_static_hosts, ) final_file.close() if not settings["nogendata"]: update_readme_data( settings["readmedatafilename"], extensions=extensions, numberofrules=number_of_rules, outputsubfolder=output_subfolder, sourcesdata=sources_data, ) print_success( "Success! The hosts file has been saved in folder " + output_subfolder + "\nIt contains " + "{:,}".format(number_of_rules) + " unique entries." ) move_file = False # We only flush the DNS cache if we have # moved a new hosts file into place. if move_file: prompt_for_flush_dns_cache( flush_cache=settings["flushdnscache"], prompt_flush=not auto ) # Prompt the User def prompt_for_update(freshen, update_auto): """ Prompt the user to update all hosts files. If requested, the function will update all data sources after it checks that a hosts file does indeed exist. Parameters ---------- freshen : bool Whether data sources should be updated. This function will return if it is requested that data sources not be updated. update_auto : bool Whether or not to automatically update all data sources. Returns ------- update_sources : bool Whether or not we should update data sources for exclusion files. """ # Create a hosts file if it doesn't exist. hosts_file = path_join_robust(BASEDIR_PATH, "hosts_unconv") if not os.path.isfile(hosts_file): try: open(hosts_file, "w+").close() except (IOError, OSError): # Starting in Python 3.3, IOError is aliased # OSError. However, we have to catch both for # Python 2.x failures. print_failure( "ERROR: No 'hosts' file in the folder. Try creating one manually." ) if not freshen: return prompt = "Do you want to update all data sources?" if update_auto or query_yes_no(prompt): return True elif not update_auto: print("OK, we'll stick with what we've got locally.") return False def prompt_for_exclusions(skip_prompt): """ Prompt the user to exclude any custom domains from being blocked. Parameters ---------- skip_prompt : bool Whether or not to skip prompting for custom domains to be excluded. If true, the function returns immediately. Returns ------- gather_exclusions : bool Whether or not we should proceed to prompt the user to exclude any custom domains beyond those in the whitelist. """ prompt = ( "Do you want to exclude any domains?\n" "For example, hulu.com video streaming must be able to access " "its tracking and ad servers in order to play video." ) if not skip_prompt: if query_yes_no(prompt): return True else: print("OK, we'll only exclude domains in the whitelist.") return False def prompt_for_flush_dns_cache(flush_cache, prompt_flush): """ Prompt the user to flush the DNS cache. Parameters ---------- flush_cache : bool Whether to flush the DNS cache without prompting. prompt_flush : bool If `flush_cache` is False, whether we should prompt for flushing the cache. Otherwise, the function returns immediately. """ if flush_cache: flush_dns_cache() elif prompt_flush: if query_yes_no("Attempt to flush the DNS cache?"): flush_dns_cache() def prompt_for_move(final_file, **move_params): """ Prompt the user to move the newly created hosts file to its designated location in the OS. Parameters ---------- final_file : file The file object that contains the newly created hosts data. move_params : kwargs Dictionary providing additional parameters for moving the hosts file into place. Currently, those fields are: 1) auto 2) replace 3) skipstatichosts Returns ------- move_file : bool Whether or not the final hosts file was moved. """ skip_static_hosts = move_params["skipstatichosts"] if move_params["replace"] and not skip_static_hosts: move_file = True elif move_params["auto"] or skip_static_hosts: move_file = False else: prompt = "Do you want to replace your existing hosts file with the newly generated file?" move_file = query_yes_no(prompt) if move_file: move_hosts_file_into_place(final_file) return move_file # End Prompt the User def sort_sources(sources): """ Sorts the sources. The idea is that all Steven Black's list, file or entries get on top and the rest sorted alphabetically. Parameters ---------- sources: list The sources to sort. """ result = sorted( sources.copy(), key=lambda x: x.lower().replace("-", "").replace("_", "").replace(" ", ""), ) # Steven Black's repositories/files/lists should be on top! steven_black_positions = [ x for x, y in enumerate(result) if "stevenblack" in y.lower() ] for index in steven_black_positions: result.insert(0, result.pop(index)) return result # Exclusion logic def display_exclusion_options(common_exclusions, exclusion_pattern, exclusion_regexes): """ Display the exclusion options to the user. This function checks whether a user wants to exclude particular domains, and if so, excludes them. Parameters ---------- common_exclusions : list A list of common domains that are excluded from being blocked. One example is Hulu. This setting is set directly in the script and cannot be overwritten by the user. exclusion_pattern : str The exclusion pattern with which to create the domain regex. exclusion_regexes : list The list of regex patterns used to exclude domains. Returns ------- aug_exclusion_regexes : list The original list of regex patterns potentially with additional patterns from domains that the user chooses to exclude. """ for exclusion_option in common_exclusions: prompt = "Do you want to exclude the domain " + exclusion_option + " ?" if query_yes_no(prompt): exclusion_regexes = exclude_domain( exclusion_option, exclusion_pattern, exclusion_regexes ) else: continue if query_yes_no("Do you want to exclude any other domains?"): exclusion_regexes = gather_custom_exclusions( exclusion_pattern, exclusion_regexes ) return exclusion_regexes def gather_custom_exclusions(exclusion_pattern, exclusion_regexes): """ Gather custom exclusions from the user. Parameters ---------- exclusion_pattern : str The exclusion pattern with which to create the domain regex. exclusion_regexes : list The list of regex patterns used to exclude domains. Returns ------- aug_exclusion_regexes : list The original list of regex patterns potentially with additional patterns from domains that the user chooses to exclude. """ # We continue running this while-loop until the user # says that they have no more domains to exclude. while True: domain_prompt = "Enter the domain you want to exclude (e.g. facebook.com): " user_domain = input(domain_prompt) if is_valid_domain_format(user_domain): exclusion_regexes = exclude_domain( user_domain, exclusion_pattern, exclusion_regexes ) continue_prompt = "Do you have more domains you want to enter?" if not query_yes_no(continue_prompt): break return exclusion_regexes def exclude_domain(domain, exclusion_pattern, exclusion_regexes): """ Exclude a domain from being blocked. This creates the domain regex by which to exclude this domain and appends it a list of already-existing exclusion regexes. Parameters ---------- domain : str The filename or regex pattern to exclude. exclusion_pattern : str The exclusion pattern with which to create the domain regex. exclusion_regexes : list The list of regex patterns used to exclude domains. Returns ------- aug_exclusion_regexes : list The original list of regex patterns with one additional pattern from the `domain` input. """ exclusion_regex = re.compile(exclusion_pattern + domain) exclusion_regexes.append(exclusion_regex) return exclusion_regexes def matches_exclusions(stripped_rule, exclusion_regexes): """ Check whether a rule matches an exclusion rule we already provided. If this function returns True, that means this rule should be excluded from the final hosts file. Parameters ---------- stripped_rule : str The rule that we are checking. exclusion_regexes : list The list of regex patterns used to exclude domains. Returns ------- matches_exclusion : bool Whether or not the rule string matches a provided exclusion. """ stripped_domain = stripped_rule.split()[1] for exclusionRegex in exclusion_regexes: if exclusionRegex.search(stripped_domain): return True return False # End Exclusion Logic # Update Logic def update_sources_data(sources_data, **sources_params): """ Update the sources data and information for each source. Parameters ---------- sources_data : list The list of sources data that we are to update. sources_params : kwargs Dictionary providing additional parameters for updating the sources data. Currently, those fields are: 1) datapath 2) extensions 3) extensionspath 4) sourcedatafilename Returns ------- update_sources_data : list The original source data list with new source data appended. """ source_data_filename = sources_params["sourcedatafilename"] for source in sort_sources( recursive_glob(sources_params["datapath"], source_data_filename) ): update_file = open(source, "r", encoding="UTF-8") update_data = json.load(update_file) sources_data.append(update_data) update_file.close() for source in sources_params["extensions"]: source_dir = path_join_robust(sources_params["extensionspath"], source) for update_file_path in sort_sources( recursive_glob(source_dir, source_data_filename) ): update_file = open(update_file_path, "r") update_data = json.load(update_file) sources_data.append(update_data) update_file.close() return sources_data def jsonarray(json_array_string): """ Transformer, converts a json array string hosts into one host per line, prefixing each line with "127.0.0.1 ". Parameters ---------- json_array_string : str The json array string in the form '["example1.com", "example1.com", ...]' """ temp_list = json.loads(json_array_string) hostlines = "127.0.0.1 " + "\n127.0.0.1 ".join(temp_list) return hostlines def update_all_sources(source_data_filename, host_filename):
# End Update Logic # File Logic def create_initial_file(): """ Initialize the file in which we merge all host files for later pruning. """ merge_file = tempfile.NamedTemporaryFile() # spin the sources for the base file for source in sort_sources( recursive_glob(settings["datapath"], settings["hostfilename"]) ): start = "# Start {}\n\n".format(os.path.basename(os.path.dirname(source))) end = "# End {}\n\n".format(os.path.basename(os.path.dirname(source))) with open(source, "r", encoding="UTF-8") as curFile: write_data(merge_file, start + curFile.read() + end) # spin the sources for extensions to the base file for source in settings["extensions"]: for filename in sort_sources( recursive_glob( path_join_robust(settings["extensionspath"], source), settings["hostfilename"], ) ): with open(filename, "r") as curFile: write_data(merge_file, curFile.read()) maybe_copy_example_file(settings["blacklistfile"]) if os.path.isfile(settings["blacklistfile"]): with open(settings["blacklistfile"], "r") as curFile: write_data(merge_file, curFile.read()) return merge_file def compress_file(input_file, target_ip, output_file): """ Reduce the file dimension removing non-necessary lines (empty lines and comments) and putting multiple domains in each line. Reducing the number of lines of the file, the parsing under Microsoft Windows is much faster. Parameters ---------- input_file : file The file object that contains the hostnames that we are reducing. target_ip : str The target IP address. output_file : file The file object that will contain the reduced hostnames. """ input_file.seek(0) # reset file pointer write_data(output_file, "\n") target_ip_len = len(target_ip) lines = [target_ip] lines_index = 0 for line in input_file.readlines(): line = line.decode("UTF-8") if line.startswith(target_ip): if lines[lines_index].count(" ") < 9: lines[lines_index] += ( " " + line[target_ip_len : line.find("#")].strip() # noqa: E203 ) else: lines[lines_index] += "\n" lines.append(line[: line.find("#")].strip()) lines_index += 1 for line in lines: write_data(output_file, line) input_file.close() def minimise_file(input_file, target_ip, output_file): """ Reduce the file dimension removing non-necessary lines (empty lines and comments). Parameters ---------- input_file : file The file object that contains the hostnames that we are reducing. target_ip : str The target IP address. output_file : file The file object that will contain the reduced hostnames. """ input_file.seek(0) # reset file pointer write_data(output_file, "\n") lines = [] for line in input_file.readlines(): line = line.decode("UTF-8") if line.startswith(target_ip): lines.append(line[: line.find("#")].strip() + "\n") for line in lines: write_data(output_file, line) input_file.close() def remove_dups_and_excl(merge_file, exclusion_regexes, output_file=None): """ Remove duplicates and remove hosts that we are excluding. We check for duplicate hostnames as well as remove any hostnames that have been explicitly excluded by the user. Parameters ---------- merge_file : file The file object that contains the hostnames that we are pruning. exclusion_regexes : list The list of regex patterns used to exclude domains. output_file : file The file object in which the result is written. If None, the file 'settings["outputpath"]' will be created. """ number_of_rules = settings["numberofrules"] maybe_copy_example_file(settings["whitelistfile"]) if os.path.isfile(settings["whitelistfile"]): with open(settings["whitelistfile"], "r") as ins: for line in ins: line = line.strip(" \t\n\r") if line and not line.startswith("#"): settings["exclusions"].append(line) if not os.path.exists(settings["outputpath"]): os.makedirs(settings["outputpath"]) if output_file is None: final_file = open(path_join_robust(settings["outputpath"], "hosts_unconv"), "w+b") else: final_file = output_file merge_file.seek(0) # reset file pointer hostnames = {"localhost", "localhost.localdomain", "local", "broadcasthost"} exclusions = settings["exclusions"] for line in merge_file.readlines(): write_line = True # Explicit encoding line = line.decode("UTF-8") # replace tabs with space line = line.replace("\t+", " ") # see gh-271: trim trailing whitespace, periods line = line.rstrip(" .") # Testing the first character doesn't require startswith if line[0] == "#" or re.match(r"^\s*$", line[0]): write_data(final_file, line) continue if "::1" in line: continue stripped_rule = strip_rule(line) # strip comments if not stripped_rule or matches_exclusions(stripped_rule, exclusion_regexes): continue # Normalize rule hostname, normalized_rule = normalize_rule( stripped_rule, target_ip=settings["targetip"], keep_domain_comments=settings["keepdomaincomments"], ) for exclude in exclusions: if re.search(r"[\s\.]" + re.escape(exclude) + r"\s", line): write_line = False break if normalized_rule and (hostname not in hostnames) and write_line: write_data(final_file, normalized_rule) hostnames.add(hostname) number_of_rules += 1 settings["numberofrules"] = number_of_rules merge_file.close() if output_file is None: return final_file def normalize_rule(rule, target_ip, keep_domain_comments): """ Standardize and format the rule string provided. Parameters ---------- rule : str The rule whose spelling and spacing we are standardizing. target_ip : str The target IP address for the rule. keep_domain_comments : bool Whether or not to keep comments regarding these domains in the normalized rule. Returns ------- normalized_rule : tuple A tuple of the hostname and the rule string with spelling and spacing reformatted. """ """ first try: IP followed by domain """ regex = r"^\s*(\d{1,3}\.){3}\d{1,3}\s+([\w\.-]+[a-zA-Z])(.*)" result = re.search(regex, rule) if result: hostname, suffix = result.group(2, 3) # Explicitly lowercase and trim the hostname. hostname = hostname.lower().strip() rule = "%s %s" % (target_ip, hostname) if suffix and keep_domain_comments: if not suffix.strip().startswith("#"): rule += " #%s" % suffix else: rule += " %s" % suffix return hostname, rule + "\n" """ next try: IP address followed by host IP address """ regex = r"^\s*(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\s*(.*)" result = re.search(regex, rule) if result: ip_host, suffix = result.group(2, 3) # Explicitly trim the ip host. ip_host = ip_host.strip() rule = "%s %s" % (target_ip, ip_host) if suffix and keep_domain_comments: if not suffix.strip().startswith("#"): rule += " #%s" % suffix else: rule += " %s" % suffix return ip_host, rule + "\n" """ finally, if we get here, just belch to screen """ print("==>%s<==" % rule) return None, None def strip_rule(line): """ Sanitize a rule string provided before writing it to the output hosts file. Parameters ---------- line : str The rule provided for sanitation. Returns ------- sanitized_line : str The sanitized rule. """ split_line = line.split() if len(split_line) < 2: # just return blank return "" else: return " ".join(split_line) def write_opening_header(final_file, **header_params): """ Write the header information into the newly-created hosts file. Parameters ---------- final_file : file The file object that points to the newly-created hosts file. header_params : kwargs Dictionary providing additional parameters for populating the header information. Currently, those fields are: 1) extensions 2) numberofrules 3) outputsubfolder 4) skipstatichosts """ final_file.seek(0) # Reset file pointer. file_contents = final_file.read() # Save content. final_file.seek(0) # Write at the top. if header_params["extensions"]: if len(header_params["extensions"]) > 1: write_data( final_file, "# Title: StevenBlack/hosts with the {0} and {1} extensions\n#\n".format( ", ".join(header_params["extensions"][:-1]), header_params["extensions"][-1], ), ) else: write_data( final_file, "# Title: StevenBlack/hosts with the {0} extension\n#\n".format( ", ".join(header_params["extensions"]) ), ) else: write_data(final_file, "# Title: StevenBlack/hosts\n#\n") write_data( final_file, "# This hosts file is a merged collection " "of hosts from reputable sources,\n", ) write_data(final_file, "# with a dash of crowd sourcing via GitHub\n#\n") write_data( final_file, "# Date: " + time.strftime("%d %B %Y %H:%M:%S (%Z)", time.gmtime()) + "\n", ) if header_params["extensions"]: write_data( final_file, "# Extensions added to this file: " + ", ".join(header_params["extensions"]) + "\n", ) write_data( final_file, ( "# Number of unique domains: {:,}\n#\n".format( header_params["numberofrules"] ) ), ) write_data( final_file, "# Fetch the latest version of this file: " "https://raw.githubusercontent.com/StevenBlack/hosts/master/" + path_join_robust(header_params["outputsubfolder"], "").replace("\\", "/") + "hosts\n", ) write_data( final_file, "# Project home page: https://github.com/StevenBlack/hosts\n" ) write_data( final_file, "# Project releases: https://github.com/StevenBlack/hosts/releases\n#\n", ) write_data( final_file, "# ===============================================================\n", ) write_data(final_file, "\n") if not header_params["skipstatichosts"]: write_data(final_file, "127.0.0.1 localhost\n") write_data(final_file, "127.0.0.1 localhost.localdomain\n") write_data(final_file, "127.0.0.1 local\n") write_data(final_file, "255.255.255.255 broadcasthost\n") write_data(final_file, "::1 localhost\n") write_data(final_file, "::1 ip6-localhost\n") write_data(final_file, "::1 ip6-loopback\n") write_data(final_file, "fe80::1%lo0 localhost\n") write_data(final_file, "ff00::0 ip6-localnet\n") write_data(final_file, "ff00::0 ip6-mcastprefix\n") write_data(final_file, "ff02::1 ip6-allnodes\n") write_data(final_file, "ff02::2 ip6-allrouters\n") write_data(final_file, "ff02::3 ip6-allhosts\n") write_data(final_file, "0.0.0.0 0.0.0.0\n") if platform.system() == "Linux": write_data(final_file, "127.0.1.1 " + socket.gethostname() + "\n") write_data(final_file, "127.0.0.53 " + socket.gethostname() + "\n") write_data(final_file, "\n") preamble = path_join_robust(BASEDIR_PATH, "myhosts") maybe_copy_example_file(preamble) if os.path.isfile(preamble): with open(preamble, "r") as f: write_data(final_file, f.read()) final_file.write(file_contents) def update_readme_data(readme_file, **readme_updates): """ Update the host and website information provided in the README JSON data. Parameters ---------- readme_file : str The name of the README file to update. readme_updates : kwargs Dictionary providing additional JSON fields to update before saving the data. Currently, those fields are: 1) extensions 2) sourcesdata 3) numberofrules 4) outputsubfolder """ return extensions_key = "base" extensions = readme_updates["extensions"] if extensions: extensions_key = "-".join(extensions) output_folder = readme_updates["outputsubfolder"] generation_data = { "location": path_join_robust(output_folder, ""), "entries": readme_updates["numberofrules"], "sourcesdata": readme_updates["sourcesdata"], } with open(readme_file, "r") as f: readme_data = json.load(f) readme_data[extensions_key] = generation_data for denomination, data in readme_data.copy().items(): if "location" in data and data["location"] and "\\" in data["location"]: # Windows compatibility: #1166 readme_data[denomination]["location"] = data["location"].replace("\\", "/") with open(readme_file, "w") as f: json.dump(readme_data, f) def move_hosts_file_into_place(final_file): """ Move the newly-created hosts file into its correct location on the OS. For UNIX systems, the hosts file is "etc/hosts." On Windows, it's "C:\Windows\System32\drivers\etc\hosts." For this move to work, you must have administrator privileges to do this. On UNIX systems, this means having "sudo" access, and on Windows, it means being able to run command prompt in administrator mode. Parameters ---------- final_file : file object The newly-created hosts file to move. """ # noqa: W605 filename = os.path.abspath(final_file.name) if os.name == "posix": print( "Moving the file requires administrative privileges. You might need to enter your password." ) if subprocess.call(SUDO + ["cp", filename, "/etc/hosts"]): print_failure("Moving the file failed.") elif os.name == "nt": print("Automatically moving the hosts file in place is not yet supported.") print( "Please move the generated file to %SystemRoot%\\system32\\drivers\\etc\\hosts" ) def flush_dns_cache(): """ Flush the DNS cache. """ print("Flushing the DNS cache to utilize new hosts file...") print( "Flushing the DNS cache requires administrative privileges. You might need to enter your password." ) dns_cache_found = False if platform.system() == "Darwin": if subprocess.call(SUDO + ["killall", "-HUP", "mDNSResponder"]): print_failure("Flushing the DNS cache failed.") elif os.name == "nt": print("Automatically flushing the DNS cache is not yet supported.") print( "Please copy and paste the command 'ipconfig /flushdns' in " "administrator command prompt after running this script." ) else: nscd_prefixes = ["/etc", "/etc/rc.d"] nscd_msg = "Flushing the DNS cache by restarting nscd {result}" for nscd_prefix in nscd_prefixes: nscd_cache = nscd_prefix + "/init.d/nscd" if os.path.isfile(nscd_cache): dns_cache_found = True if subprocess.call(SUDO + [nscd_cache, "restart"]): print_failure(nscd_msg.format(result="failed")) else: print_success(nscd_msg.format(result="succeeded")) centos_file = "/etc/init.d/network" centos_msg = "Flushing the DNS cache by restarting network {result}" if os.path.isfile(centos_file): if subprocess.call(SUDO + [centos_file, "restart"]): print_failure(centos_msg.format(result="failed")) else: print_success(centos_msg.format(result="succeeded")) system_prefixes = ["/usr", ""] service_types = ["NetworkManager", "wicd", "dnsmasq", "networking"] restarted_services = [] for system_prefix in system_prefixes: systemctl = system_prefix + "/bin/systemctl" system_dir = system_prefix + "/lib/systemd/system" for service_type in service_types: service = service_type + ".service" if service in restarted_services: continue service_file = path_join_robust(system_dir, service) service_msg = ( "Flushing the DNS cache by restarting " + service + " {result}" ) if os.path.isfile(service_file): if 0 != subprocess.call([systemctl, "status", service], stdout=subprocess.DEVNULL): continue dns_cache_found = True if subprocess.call(SUDO + [systemctl, "restart", service]): print_failure(service_msg.format(result="failed")) else: print_success(service_msg.format(result="succeeded")) restarted_services.append(service) dns_clean_file = "/etc/init.d/dns-clean" dns_clean_msg = "Flushing the DNS cache via dns-clean executable {result}" if os.path.isfile(dns_clean_file): dns_cache_found = True if subprocess.call(SUDO + [dns_clean_file, "start"]): print_failure(dns_clean_msg.format(result="failed")) else: print_success(dns_clean_msg.format(result="succeeded")) if not dns_cache_found: print_failure("Unable to determine DNS management tool.") def remove_old_hosts_file(old_file_path, backup): """ Remove the old hosts file. This is a hotfix because merging with an already existing hosts file leads to artifacts and duplicates. Parameters ---------- backup : boolean, default False Whether or not to backup the existing hosts file. """ old_file_path = path_join_robust(BASEDIR_PATH, "hosts_unconv") # Create if already removed, so remove won't raise an error. open(old_file_path, "a").close() if backup: backup_file_path = old_file_path + "-{}".format( time.strftime("%Y-%m-%d-%H-%M-%S") ) # Make a backup copy, marking the date in which the list was updated shutil.copy(old_file_path, backup_file_path) os.remove(old_file_path) # Create new empty hosts file open(old_file_path, "a").close() # End File Logic def domain_to_idna(line): """ Encode a domain that is present into a line into `idna`. This way we avoid most encoding issues. Parameters ---------- line : str The line we have to encode/decode. Returns ------- line : str The line in a converted format. Notes ----- - This function encodes only the domain to `idna` format because in most cases, the encoding issue is due to a domain which looks like `b'\xc9\xa2oogle.com'.decode('idna')`. - About the splitting: We split because we only want to encode the domain and not the full line, which may cause some issues. Keep in mind that we split, but we still concatenate once we encoded the domain. - The following split the prefix `0.0.0.0` or `127.0.0.1` of a line. - The following also split the trailing comment of a given line. """ if not line.startswith("#"): tabs = "\t" space = " " tabs_position, space_position = (line.find(tabs), line.find(space)) if tabs_position > -1 and space_position > -1: if space_position < tabs_position: separator = space else: separator = tabs elif not tabs_position == -1: separator = tabs elif not space_position == -1: separator = space else: separator = "" if separator: splited_line = line.split(separator) try: index = 1 while index < len(splited_line): if splited_line[index]: break index += 1 if "#" in splited_line[index]: index_comment = splited_line[index].find("#") if index_comment > -1: comment = splited_line[index][index_comment:] splited_line[index] = ( splited_line[index] .split(comment)[0] .encode("IDNA") .decode("UTF-8") + comment ) splited_line[index] = splited_line[index].encode("IDNA").decode("UTF-8") except IndexError: pass return separator.join(splited_line) return line.encode("IDNA").decode("UTF-8") return line.encode("UTF-8").decode("UTF-8") # Helper Functions def maybe_copy_example_file(file_path): """ Given a file path, copy over its ".example" if the path doesn't exist. If the path does exist, nothing happens in this function. If the path doesn't exist, and the ".example" file doesn't exist, nothing happens in this function. Parameters ---------- file_path : str The full file path to check. """ if not os.path.isfile(file_path): example_file_path = file_path + ".example" if os.path.isfile(example_file_path): shutil.copyfile(example_file_path, file_path) def get_file_by_url(url, retries=3, delay=10): """ Get a file data located at a particular URL. Parameters ---------- url : str The URL at which to access the data. Returns ------- url_data : str or None The data retrieved at that URL from the file. Returns None if the attempted retrieval is unsuccessful. Note ---- - BeautifulSoup is used in this case to avoid having to search in which format we have to encode or decode data before parsing it to UTF-8. """ while retries: try: with urlopen(url) as f: soup = BeautifulSoup(f.read(), "lxml").get_text() return "\n".join(list(map(domain_to_idna, soup.split("\n")))) except Exception as e: if 'failure in name resolution' in str(e): print('No internet connection! Retrying in {} seconds'.format(delay)) time.sleep(delay) retries -= 1 continue break print("Problem getting file: ", url) def write_data(f, data): """ Write data to a file object. Parameters ---------- f : file The file object at which to write the data. data : str The data to write to the file. """ f.write(bytes(data, "UTF-8")) def list_dir_no_hidden(path): """ List all files in a directory, except for hidden files. Parameters ---------- path : str The path of the directory whose files we wish to list. """ return glob(os.path.join(path, "*")) def query_yes_no(question, default="yes"): """ Ask a yes/no question via input() and get answer from the user. Inspired by the following implementation: http://code.activestate.com/recipes/577058 Parameters ---------- question : str The question presented to the user. default : str, default "yes" The presumed answer if the user just hits <Enter>. It must be "yes", "no", or None (means an answer is required of the user). Returns ------- yes : Whether or not the user replied yes to the question. """ valid = {"yes": "yes", "y": "yes", "ye": "yes", "no": "no", "n": "no"} prompt = {None: " [y/n] ", "yes": " [Y/n] ", "no": " [y/N] "}.get(default, None) if not prompt: raise ValueError("invalid default answer: '%s'" % default) reply = None while not reply: sys.stdout.write(colorize(question, Colors.PROMPT) + prompt) choice = input().lower() reply = None if default and not choice: reply = default elif choice in valid: reply = valid[choice] else: print_failure("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") return reply == "yes" def is_valid_domain_format(domain): """ Check whether a provided domain is valid. Parameters ---------- domain : str The domain against which to check. Returns ------- valid_domain : bool Whether or not the domain provided is valid. """ if domain == "": print("You didn't enter a domain. Try again.") return False domain_regex = re.compile(r"www\d{0,3}[.]|https?") if domain_regex.match(domain): print( "The domain " + domain + " is not valid. Do not include " "www.domain.com or http(s)://domain.com. Try again." ) return False else: return True def recursive_glob(stem, file_pattern): """ Recursively match files in a directory according to a pattern. Parameters ---------- stem : str The directory in which to recurse file_pattern : str The filename regex pattern to which to match. Returns ------- matches_list : list A list of filenames in the directory that match the file pattern. """ if sys.version_info >= (3, 5): return glob(stem + "/**/" + file_pattern, recursive=True) else: # gh-316: this will avoid invalid unicode comparisons in Python 2.x if stem == str("*"): stem = "." matches = [] for root, dirnames, filenames in os.walk(stem): for filename in fnmatch.filter(filenames, file_pattern): matches.append(path_join_robust(root, filename)) return matches def path_join_robust(path, *paths): """ Wrapper around `os.path.join` with handling for locale issues. Parameters ---------- path : str The first path to join. paths : varargs Subsequent path strings to join. Returns ------- joined_path : str The joined path string of the two path inputs. Raises ------ locale.Error : A locale issue was detected that prevents path joining. """ try: # gh-316: joining unicode and str can be saddening in Python 2.x path = str(path) paths = [str(another_path) for another_path in paths] return os.path.join(path, *paths) except UnicodeDecodeError as e: raise locale.Error( "Unable to construct path. This is likely a LOCALE issue:\n\n" + str(e) ) # Colors class Colors(object): PROMPT = "\033[94m" SUCCESS = "\033[92m" FAIL = "\033[91m" ENDC = "\033[0m" def supports_color(): """ Check whether the running terminal or command prompt supports color. Inspired by StackOverflow link (and Django implementation) here: https://stackoverflow.com/questions/7445658 Returns ------- colors_supported : bool Whether the running terminal or command prompt supports color. """ sys_platform = sys.platform supported = sys_platform != "Pocket PC" and ( sys_platform != "win32" or "ANSICON" in os.environ ) atty_connected = hasattr(sys.stdout, "isatty") and sys.stdout.isatty() return supported and atty_connected def colorize(text, color): """ Wrap a string so that it displays in a particular color. This function adds a prefix and suffix to a text string so that it is displayed as a particular color, either in command prompt or the terminal. If the running terminal or command prompt does not support color, the original text is returned without being wrapped. Parameters ---------- text : str The message to display. color : str The color string prefix to put before the text. Returns ------- wrapped_str : str The wrapped string to display in color, if possible. """ if not supports_color(): return text return color + text + Colors.ENDC def print_success(text): """ Print a success message. Parameters ---------- text : str The message to display. """ print(colorize(text, Colors.SUCCESS)) def print_failure(text): """ Print a failure message. Parameters ---------- text : str The message to display. """ print(colorize(text, Colors.FAIL)) # End Helper Functions if __name__ == "__main__": main()
""" Update all host files, regardless of folder depth. Parameters ---------- source_data_filename : str The name of the filename where information regarding updating sources for a particular URL is stored. This filename is assumed to be the same for all sources. host_filename : str The name of the file in which the updated source information is stored for a particular URL. This filename is assumed to be the same for all sources. """ # The transforms we support transform_methods = {"jsonarray": jsonarray} all_sources = sort_sources(recursive_glob("*", source_data_filename)) for source in all_sources: update_file = open(source, "r", encoding="UTF-8") update_data = json.load(update_file) update_file.close() update_url = update_data["url"] update_transforms = [] if update_data.get("transforms"): update_transforms = update_data["transforms"] print("Updating source " + os.path.dirname(source) + " from " + update_url) try: updated_file = get_file_by_url(update_url) # spin the transforms as required for transform in update_transforms: updated_file = transform_methods[transform](updated_file) # get rid of carriage-return symbols updated_file = updated_file.replace("\r", "") hosts_file = open( path_join_robust(BASEDIR_PATH, os.path.dirname(source), host_filename), "wb", ) write_data(hosts_file, updated_file) hosts_file.close() except Exception as e: print(e) print("Error in updating source: ", update_url)
ingress_test.go
// Licensed to the Apache Software Foundation (ASF) under one or more // contributor license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright ownership. // The ASF licenses this file to You under the Apache License, Version 2.0 // (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package translation import ( "context" "path" "testing" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" networkingv1beta1 "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" "github.com/apache/apisix-ingress-controller/pkg/kube" configv2beta3 "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/apis/config/v2beta3" fakeapisix "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/client/clientset/versioned/fake" apisixinformers "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/client/informers/externalversions" apisixconst "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/const" "github.com/apache/apisix-ingress-controller/pkg/kube/translation/annotations" v1 "github.com/apache/apisix-ingress-controller/pkg/types/apisix/v1" ) var ( _testSvc = &corev1.Service{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-service", Namespace: "default", }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { Name: "port1", Port: 80, TargetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: 9080, }, }, { Name: "port2", Port: 443, TargetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: 9443, }, }, }, }, } _testEp = &corev1.Endpoints{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "test-service", Namespace: "default", }, Subsets: []corev1.EndpointSubset{ { Ports: []corev1.EndpointPort{ { Name: "port1", Port: 9080, }, { Name: "port2", Port: 9443, }, }, Addresses: []corev1.EndpointAddress{ {IP: "192.168.1.1"}, {IP: "192.168.1.2"}, }, }, }, } ) func TestTranslateIngressV1NoBackend(t *testing.T) { prefix := networkingv1.PathTypePrefix // no backend. ing := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", }, Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1.IngressRuleValue{ HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, }, }, }, }, }, }, }, } tr := &translator{} ctx, err := tr.translateIngressV1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 1) assert.Len(t, ctx.Upstreams, 0) assert.Len(t, ctx.PluginConfigs, 0) assert.Equal(t, "", ctx.Routes[0].UpstreamId) assert.Equal(t, "", ctx.Routes[0].PluginConfigId) assert.Equal(t, []string{"/foo", "/foo/*"}, ctx.Routes[0].Uris) } func
(t *testing.T) { prefix := networkingv1.PathTypePrefix // no backend. ing := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", }, Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1.IngressRuleValue{ HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, Backend: networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test-service", Port: networkingv1.ServiceBackendPort{ Name: "undefined-port", }, }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, }, } ctx, err := tr.translateIngressV1(ing) assert.NotNil(t, err) assert.Nil(t, ctx) assert.Equal(t, "service \"test-service\" not found", err.Error()) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err = client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) <-processCh ctx, err = tr.translateIngressV1(ing) assert.Nil(t, ctx, nil) assert.Equal(t, &translateError{ field: "service", reason: "port not found", }, err) } func TestTranslateIngressV1WithRegex(t *testing.T) { prefix := networkingv1.PathTypeImplementationSpecific regexPath := "/foo/*/bar" ing := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Annotations: map[string]string{ "k8s.apisix.apache.org/use-regex": "true", }, }, Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1.IngressRuleValue{ HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ { Path: regexPath, PathType: &prefix, Backend: networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test-service", Port: networkingv1.ServiceBackendPort{ Name: "port1", }, }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() epLister, epInformer := kube.NewEndpointListerAndInformer(informersFactory, false) apisixClient := fakeapisix.NewSimpleClientset() apisixInformersFactory := apisixinformers.NewSharedInformerFactory(apisixClient, 0) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) epInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) go epInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err := client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, EndpointLister: epLister, ApisixUpstreamLister: apisixInformersFactory.Apisix().V2beta3().ApisixUpstreams().Lister(), }, } <-processCh <-processCh ctx, err := tr.translateIngressV1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 1) assert.Len(t, ctx.Upstreams, 1) // the number of the PluginConfigs should be zero, cause there no available Annotations matched te rule assert.Len(t, ctx.PluginConfigs, 0) routeVars, err := tr.translateRouteMatchExprs([]configv2beta3.ApisixRouteHTTPMatchExpr{{ Subject: configv2beta3.ApisixRouteHTTPMatchExprSubject{ Scope: apisixconst.ScopePath, }, Op: apisixconst.OpRegexMatch, Value: &regexPath, }}) assert.Nil(t, err) var expectedVars v1.Vars = routeVars assert.Equal(t, []string{"/*"}, ctx.Routes[0].Uris) assert.Equal(t, expectedVars, ctx.Routes[0].Vars) } func TestTranslateIngressV1(t *testing.T) { prefix := networkingv1.PathTypePrefix // no backend. ing := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Annotations: map[string]string{ "k8s.apisix.apache.org/use-regex": "true", path.Join(annotations.AnnotationsPrefix, "enable-cors"): "true", path.Join(annotations.AnnotationsPrefix, "allowlist-source-range"): "127.0.0.1", }, }, Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1.IngressRuleValue{ HTTP: &networkingv1.HTTPIngressRuleValue{ Paths: []networkingv1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, Backend: networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test-service", Port: networkingv1.ServiceBackendPort{ Name: "port1", }, }, }, }, { Path: "/bar", Backend: networkingv1.IngressBackend{ Service: &networkingv1.IngressServiceBackend{ Name: "test-service", Port: networkingv1.ServiceBackendPort{ Number: 443, }, }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() epLister, epInformer := kube.NewEndpointListerAndInformer(informersFactory, false) apisixClient := fakeapisix.NewSimpleClientset() apisixInformersFactory := apisixinformers.NewSharedInformerFactory(apisixClient, 0) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) epInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) go epInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err := client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, EndpointLister: epLister, ApisixUpstreamLister: apisixInformersFactory.Apisix().V2beta3().ApisixUpstreams().Lister(), }, } <-processCh <-processCh ctx, err := tr.translateIngressV1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 2) assert.Len(t, ctx.Upstreams, 2) assert.Len(t, ctx.PluginConfigs, 2) assert.Equal(t, []string{"/foo", "/foo/*"}, ctx.Routes[0].Uris) assert.Equal(t, ctx.Upstreams[0].ID, ctx.Routes[0].UpstreamId) assert.Equal(t, ctx.PluginConfigs[0].ID, ctx.Routes[0].PluginConfigId) assert.Equal(t, "apisix.apache.org", ctx.Routes[0].Host) assert.Equal(t, []string{"/bar"}, ctx.Routes[1].Uris) assert.Equal(t, ctx.Upstreams[1].ID, ctx.Routes[1].UpstreamId) assert.Equal(t, ctx.PluginConfigs[1].ID, ctx.Routes[1].PluginConfigId) assert.Equal(t, "apisix.apache.org", ctx.Routes[1].Host) assert.Equal(t, "roundrobin", ctx.Upstreams[0].Type) assert.Equal(t, "http", ctx.Upstreams[0].Scheme) assert.Len(t, ctx.Upstreams[0].Nodes, 2) assert.Equal(t, 9080, ctx.Upstreams[0].Nodes[0].Port) assert.Equal(t, "192.168.1.1", ctx.Upstreams[0].Nodes[0].Host) assert.Equal(t, 9080, ctx.Upstreams[0].Nodes[1].Port) assert.Equal(t, "192.168.1.2", ctx.Upstreams[0].Nodes[1].Host) assert.Equal(t, "roundrobin", ctx.Upstreams[1].Type) assert.Equal(t, "http", ctx.Upstreams[1].Scheme) assert.Len(t, ctx.Upstreams[1].Nodes, 2) assert.Equal(t, 9443, ctx.Upstreams[1].Nodes[0].Port) assert.Equal(t, "192.168.1.1", ctx.Upstreams[1].Nodes[0].Host) assert.Equal(t, 9443, ctx.Upstreams[1].Nodes[1].Port) assert.Equal(t, "192.168.1.2", ctx.Upstreams[1].Nodes[1].Host) assert.Len(t, ctx.PluginConfigs[0].Plugins, 2) assert.Len(t, ctx.PluginConfigs[1].Plugins, 2) } func TestTranslateIngressV1beta1NoBackend(t *testing.T) { prefix := networkingv1beta1.PathTypePrefix // no backend. ing := &networkingv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", }, Spec: networkingv1beta1.IngressSpec{ Rules: []networkingv1beta1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1beta1.IngressRuleValue{ HTTP: &networkingv1beta1.HTTPIngressRuleValue{ Paths: []networkingv1beta1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, }, }, }, }, }, }, }, } tr := &translator{} ctx, err := tr.translateIngressV1beta1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 1) assert.Len(t, ctx.Upstreams, 0) assert.Len(t, ctx.PluginConfigs, 0) assert.Equal(t, "", ctx.Routes[0].UpstreamId) assert.Equal(t, "", ctx.Routes[0].PluginConfigId) assert.Equal(t, []string{"/foo", "/foo/*"}, ctx.Routes[0].Uris) } func TestTranslateIngressV1beta1BackendWithInvalidService(t *testing.T) { prefix := networkingv1beta1.PathTypePrefix // no backend. ing := &networkingv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", }, Spec: networkingv1beta1.IngressSpec{ Rules: []networkingv1beta1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1beta1.IngressRuleValue{ HTTP: &networkingv1beta1.HTTPIngressRuleValue{ Paths: []networkingv1beta1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, Backend: networkingv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.String, StrVal: "undefined-port", }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, }, } ctx, err := tr.translateIngressV1beta1(ing) assert.NotNil(t, err) assert.Nil(t, ctx) assert.Equal(t, "service \"test-service\" not found", err.Error()) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err = client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) <-processCh ctx, err = tr.translateIngressV1beta1(ing) assert.Nil(t, ctx) assert.Equal(t, &translateError{ field: "service", reason: "port not found", }, err) } func TestTranslateIngressV1beta1WithRegex(t *testing.T) { prefix := networkingv1beta1.PathTypeImplementationSpecific // no backend. regexPath := "/foo/*/bar" ing := &networkingv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Annotations: map[string]string{ "k8s.apisix.apache.org/use-regex": "true", }, }, Spec: networkingv1beta1.IngressSpec{ Rules: []networkingv1beta1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1beta1.IngressRuleValue{ HTTP: &networkingv1beta1.HTTPIngressRuleValue{ Paths: []networkingv1beta1.HTTPIngressPath{ { Path: regexPath, PathType: &prefix, Backend: networkingv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.String, StrVal: "port1", }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() epLister, epInformer := kube.NewEndpointListerAndInformer(informersFactory, false) apisixClient := fakeapisix.NewSimpleClientset() apisixInformersFactory := apisixinformers.NewSharedInformerFactory(apisixClient, 0) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) epInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) go epInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err := client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, EndpointLister: epLister, ApisixUpstreamLister: apisixInformersFactory.Apisix().V2beta3().ApisixUpstreams().Lister(), }, } <-processCh <-processCh ctx, err := tr.translateIngressV1beta1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 1) assert.Len(t, ctx.Upstreams, 1) // the number of the PluginConfigs should be zero, cause there no available Annotations matched te rule assert.Len(t, ctx.PluginConfigs, 0) routeVars, err := tr.translateRouteMatchExprs([]configv2beta3.ApisixRouteHTTPMatchExpr{{ Subject: configv2beta3.ApisixRouteHTTPMatchExprSubject{ Scope: apisixconst.ScopePath, }, Op: apisixconst.OpRegexMatch, Value: &regexPath, }}) assert.Nil(t, err) var expectedVars v1.Vars = routeVars assert.Equal(t, []string{"/*"}, ctx.Routes[0].Uris) assert.Equal(t, expectedVars, ctx.Routes[0].Vars) } func TestTranslateIngressV1beta1(t *testing.T) { prefix := networkingv1beta1.PathTypePrefix // no backend. ing := &networkingv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Annotations: map[string]string{ "k8s.apisix.apache.org/use-regex": "true", path.Join(annotations.AnnotationsPrefix, "enable-cors"): "true", path.Join(annotations.AnnotationsPrefix, "allowlist-source-range"): "127.0.0.1", path.Join(annotations.AnnotationsPrefix, "enable-cors222"): "true", }, }, Spec: networkingv1beta1.IngressSpec{ Rules: []networkingv1beta1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: networkingv1beta1.IngressRuleValue{ HTTP: &networkingv1beta1.HTTPIngressRuleValue{ Paths: []networkingv1beta1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, Backend: networkingv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.String, StrVal: "port1", }, }, }, { Path: "/bar", Backend: networkingv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.Int, IntVal: 443, }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() epLister, epInformer := kube.NewEndpointListerAndInformer(informersFactory, false) apisixClient := fakeapisix.NewSimpleClientset() apisixInformersFactory := apisixinformers.NewSharedInformerFactory(apisixClient, 0) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) epInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) go epInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err := client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, EndpointLister: epLister, ApisixUpstreamLister: apisixInformersFactory.Apisix().V2beta3().ApisixUpstreams().Lister(), }, } <-processCh <-processCh ctx, err := tr.translateIngressV1beta1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 2) assert.Len(t, ctx.Upstreams, 2) assert.Len(t, ctx.PluginConfigs, 2) assert.Equal(t, []string{"/foo", "/foo/*"}, ctx.Routes[0].Uris) assert.Equal(t, ctx.Upstreams[0].ID, ctx.Routes[0].UpstreamId) assert.Equal(t, "apisix.apache.org", ctx.Routes[0].Host) assert.Equal(t, []string{"/bar"}, ctx.Routes[1].Uris) assert.Equal(t, ctx.Upstreams[1].ID, ctx.Routes[1].UpstreamId) assert.Equal(t, "apisix.apache.org", ctx.Routes[1].Host) assert.Equal(t, "roundrobin", ctx.Upstreams[0].Type) assert.Equal(t, "http", ctx.Upstreams[0].Scheme) assert.Len(t, ctx.Upstreams[0].Nodes, 2) assert.Equal(t, 9080, ctx.Upstreams[0].Nodes[0].Port) assert.Equal(t, "192.168.1.1", ctx.Upstreams[0].Nodes[0].Host) assert.Equal(t, 9080, ctx.Upstreams[0].Nodes[1].Port) assert.Equal(t, "192.168.1.2", ctx.Upstreams[0].Nodes[1].Host) assert.Equal(t, "roundrobin", ctx.Upstreams[1].Type) assert.Equal(t, "http", ctx.Upstreams[1].Scheme) assert.Len(t, ctx.Upstreams[1].Nodes, 2) assert.Equal(t, 9443, ctx.Upstreams[1].Nodes[0].Port) assert.Equal(t, "192.168.1.1", ctx.Upstreams[1].Nodes[0].Host) assert.Equal(t, 9443, ctx.Upstreams[1].Nodes[1].Port) assert.Equal(t, "192.168.1.2", ctx.Upstreams[1].Nodes[1].Host) assert.Len(t, ctx.PluginConfigs[0].Plugins, 2) assert.Len(t, ctx.PluginConfigs[1].Plugins, 2) } func TestTranslateIngressExtensionsV1beta1(t *testing.T) { prefix := extensionsv1beta1.PathTypePrefix // no backend. ing := &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Annotations: map[string]string{ "k8s.apisix.apache.org/use-regex": "true", path.Join(annotations.AnnotationsPrefix, "enable-cors"): "true", path.Join(annotations.AnnotationsPrefix, "allowlist-source-range"): "127.0.0.1", path.Join(annotations.AnnotationsPrefix, "enable-cors222"): "true", }, }, Spec: extensionsv1beta1.IngressSpec{ Rules: []extensionsv1beta1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: extensionsv1beta1.IngressRuleValue{ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ Paths: []extensionsv1beta1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, Backend: extensionsv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.String, StrVal: "port1", }, }, }, { Path: "/bar", Backend: extensionsv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.Int, IntVal: 443, }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() epLister, epInformer := kube.NewEndpointListerAndInformer(informersFactory, false) apisixClient := fakeapisix.NewSimpleClientset() apisixInformersFactory := apisixinformers.NewSharedInformerFactory(apisixClient, 0) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) epInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) go epInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err := client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, EndpointLister: epLister, ApisixUpstreamLister: apisixInformersFactory.Apisix().V2beta3().ApisixUpstreams().Lister(), }, } <-processCh <-processCh ctx, err := tr.translateIngressExtensionsV1beta1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 2) assert.Len(t, ctx.Upstreams, 2) assert.Len(t, ctx.PluginConfigs, 2) assert.Equal(t, []string{"/foo", "/foo/*"}, ctx.Routes[0].Uris) assert.Equal(t, ctx.Upstreams[0].ID, ctx.Routes[0].UpstreamId) assert.Equal(t, "apisix.apache.org", ctx.Routes[0].Host) assert.Equal(t, []string{"/bar"}, ctx.Routes[1].Uris) assert.Equal(t, ctx.Upstreams[1].ID, ctx.Routes[1].UpstreamId) assert.Equal(t, "apisix.apache.org", ctx.Routes[1].Host) assert.Equal(t, "roundrobin", ctx.Upstreams[0].Type) assert.Equal(t, "http", ctx.Upstreams[0].Scheme) assert.Len(t, ctx.Upstreams[0].Nodes, 2) assert.Equal(t, 9080, ctx.Upstreams[0].Nodes[0].Port) assert.Equal(t, "192.168.1.1", ctx.Upstreams[0].Nodes[0].Host) assert.Equal(t, 9080, ctx.Upstreams[0].Nodes[1].Port) assert.Equal(t, "192.168.1.2", ctx.Upstreams[0].Nodes[1].Host) assert.Equal(t, "roundrobin", ctx.Upstreams[1].Type) assert.Equal(t, "http", ctx.Upstreams[1].Scheme) assert.Len(t, ctx.Upstreams[1].Nodes, 2) assert.Equal(t, 9443, ctx.Upstreams[1].Nodes[0].Port) assert.Equal(t, "192.168.1.1", ctx.Upstreams[1].Nodes[0].Host) assert.Equal(t, 9443, ctx.Upstreams[1].Nodes[1].Port) assert.Equal(t, "192.168.1.2", ctx.Upstreams[1].Nodes[1].Host) assert.Len(t, ctx.PluginConfigs[0].Plugins, 2) assert.Len(t, ctx.PluginConfigs[1].Plugins, 2) } func TestTranslateIngressExtensionsV1beta1BackendWithInvalidService(t *testing.T) { prefix := extensionsv1beta1.PathTypePrefix // no backend. ing := &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", }, Spec: extensionsv1beta1.IngressSpec{ Rules: []extensionsv1beta1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: extensionsv1beta1.IngressRuleValue{ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ Paths: []extensionsv1beta1.HTTPIngressPath{ { Path: "/foo", PathType: &prefix, Backend: extensionsv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.String, StrVal: "undefined-port", }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, }, } ctx, err := tr.translateIngressExtensionsV1beta1(ing) assert.Nil(t, ctx) assert.NotNil(t, err) assert.Equal(t, "service \"test-service\" not found", err.Error()) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err = client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) <-processCh ctx, err = tr.translateIngressExtensionsV1beta1(ing) assert.Nil(t, ctx) assert.Equal(t, &translateError{ field: "service", reason: "port not found", }, err) } func TestTranslateIngressExtensionsV1beta1WithRegex(t *testing.T) { prefix := extensionsv1beta1.PathTypeImplementationSpecific regexPath := "/foo/*/bar" ing := &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", Annotations: map[string]string{ "k8s.apisix.apache.org/use-regex": "true", }, }, Spec: extensionsv1beta1.IngressSpec{ Rules: []extensionsv1beta1.IngressRule{ { Host: "apisix.apache.org", IngressRuleValue: extensionsv1beta1.IngressRuleValue{ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ Paths: []extensionsv1beta1.HTTPIngressPath{ { Path: regexPath, PathType: &prefix, Backend: extensionsv1beta1.IngressBackend{ ServiceName: "test-service", ServicePort: intstr.IntOrString{ Type: intstr.String, StrVal: "port1", }, }, }, }, }, }, }, }, }, } client := fake.NewSimpleClientset() informersFactory := informers.NewSharedInformerFactory(client, 0) svcInformer := informersFactory.Core().V1().Services().Informer() svcLister := informersFactory.Core().V1().Services().Lister() epLister, epInformer := kube.NewEndpointListerAndInformer(informersFactory, false) apisixClient := fakeapisix.NewSimpleClientset() apisixInformersFactory := apisixinformers.NewSharedInformerFactory(apisixClient, 0) processCh := make(chan struct{}) svcInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) epInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { processCh <- struct{}{} }, }) stopCh := make(chan struct{}) defer close(stopCh) go svcInformer.Run(stopCh) go epInformer.Run(stopCh) cache.WaitForCacheSync(stopCh, svcInformer.HasSynced) _, err := client.CoreV1().Services("default").Create(context.Background(), _testSvc, metav1.CreateOptions{}) assert.Nil(t, err) _, err = client.CoreV1().Endpoints("default").Create(context.Background(), _testEp, metav1.CreateOptions{}) assert.Nil(t, err) tr := &translator{ TranslatorOptions: &TranslatorOptions{ ServiceLister: svcLister, EndpointLister: epLister, ApisixUpstreamLister: apisixInformersFactory.Apisix().V2beta3().ApisixUpstreams().Lister(), }, } <-processCh <-processCh ctx, err := tr.translateIngressExtensionsV1beta1(ing) assert.Nil(t, err) assert.Len(t, ctx.Routes, 1) assert.Len(t, ctx.Upstreams, 1) // the number of the PluginConfigs should be zero, cause there no available Annotations matched te rule assert.Len(t, ctx.PluginConfigs, 0) routeVars, err := tr.translateRouteMatchExprs([]configv2beta3.ApisixRouteHTTPMatchExpr{{ Subject: configv2beta3.ApisixRouteHTTPMatchExprSubject{ Scope: apisixconst.ScopePath, }, Op: apisixconst.OpRegexMatch, Value: &regexPath, }}) assert.Nil(t, err) var expectedVars v1.Vars = routeVars assert.Equal(t, []string{"/*"}, ctx.Routes[0].Uris) assert.Equal(t, expectedVars, ctx.Routes[0].Vars) }
TestTranslateIngressV1BackendWithInvalidService
memap.py
#!/usr/bin/env python """Memory Map File Analyser for ARM mbed""" import sys import os import re import csv import json import argparse from prettytable import PrettyTable from tools.utils import argparse_filestring_type, \ argparse_lowercase_hyphen_type, argparse_uppercase_type DEBUG = False RE_ARMCC = re.compile( r'^\s+0x(\w{8})\s+0x(\w{8})\s+(\w+)\s+(\w+)\s+(\d+)\s+[*]?.+\s+(.+)$') RE_IAR = re.compile( r'^\s+(.+)\s+(zero|const|ro code|inited|uninit)\s' r'+0x(\w{8})\s+0x(\w+)\s+(.+)\s.+$') class MemapParser(object): """An object that represents parsed results, parses the memory map files, and writes out different file types of memory results """ print_sections = ('.text', '.data', '.bss') misc_flash_sections = ('.interrupts', '.flash_config') other_sections = ('.interrupts_ram', '.init', '.ARM.extab', '.ARM.exidx', '.ARM.attributes', '.eh_frame', '.init_array', '.fini_array', '.jcr', '.stab', '.stabstr', '.ARM.exidx', '.ARM') # sections to print info (generic for all toolchains) sections = ('.text', '.data', '.bss', '.heap', '.stack') def __init__(self): """ General initialization """ # list of all modules and their sections self.modules = dict() # sections must be defined in this order to take irrelevant out self.all_sections = self.sections + self.other_sections + \ self.misc_flash_sections + ('unknown', 'OUTPUT') # list of all object files and mappting to module names self.object_to_module = dict() # Memory usage summary structure self.mem_summary = dict() def module_add(self, module_name, size, section): """ Adds a module / section to the list Positional arguments: module_name - name of the module to add size - the size of the module being added section - the section the module contributes to """ if module_name in self.modules: self.modules[module_name][section] += size else: temp_dic = dict() for section_idx in self.all_sections: temp_dic[section_idx] = 0 temp_dic[section] = size self.modules[module_name] = temp_dic def check_new_section_gcc(self, line): """ Check whether a new section in a map file has been detected (only applies to gcc) Positional arguments: line - the line to check for a new section """ for i in self.all_sections: if line.startswith(i): # should name of the section (assuming it's a known one) return i if line.startswith('.'): return 'unknown' # all others are classified are unknown else: return False # everything else, means no change in section @staticmethod def path_object_to_module_name(txt): """ Parse a path to object file to extract it's module and object data Positional arguments: txt - the path to parse the object and module name from """ txt = txt.replace('\\', '/') rex_mbed_os_name = r'^.+mbed-os\/(.+)\/(.+\.o)$' test_rex_mbed_os_name = re.match(rex_mbed_os_name, txt) if test_rex_mbed_os_name: object_name = test_rex_mbed_os_name.group(2) data = test_rex_mbed_os_name.group(1).split('/') ndata = len(data) if ndata == 1: module_name = data[0] else: module_name = data[0] + '/' + data[1] return [module_name, object_name] else: return ['Misc', ""] def parse_section_gcc(self, line): """ Parse data from a section of gcc map file examples: 0x00004308 0x7c ./.build/K64F/GCC_ARM/mbed-os/hal/targets/hal/TARGET_Freescale/TARGET_KPSDK_MCUS/spi_api.o .text 0x00000608 0x198 ./.build/K64F/GCC_ARM/mbed-os/core/mbed-rtos/rtx/TARGET_CORTEX_M/TARGET_RTOS_M4_M7/TOOLCHAIN_GCC/HAL_CM4.o Positional arguments: line - the line to parse a section from """ rex_address_len_name = re.compile( r'^\s+.*0x(\w{8,16})\s+0x(\w+)\s(.+)$') test_address_len_name = re.match(rex_address_len_name, line) if test_address_len_name: if int(test_address_len_name.group(2), 16) == 0: # size == 0 return ["", 0] # no valid entry else: m_name, _ = self.path_object_to_module_name( test_address_len_name.group(3)) m_size = int(test_address_len_name.group(2), 16) return [m_name, m_size] else: # special corner case for *fill* sections # example # *fill* 0x0000abe4 0x4 rex_address_len = r'^\s+\*fill\*\s+0x(\w{8,16})\s+0x(\w+).*$' test_address_len = re.match(rex_address_len, line) if test_address_len: if int(test_address_len.group(2), 16) == 0: # size == 0 return ["", 0] # no valid entry else: m_name = 'Fill' m_size = int(test_address_len.group(2), 16) return [m_name, m_size] else: return ["", 0] # no valid entry def parse_map_file_gcc(self, file_desc): """ Main logic to decode gcc map files Positional arguments: file_desc - a stream object to parse as a gcc map file """ current_section = 'unknown' with file_desc as infile: # Search area to parse for line in infile: if line.startswith('Linker script and memory map'): current_section = "unknown" break # Start decoding the map file for line in infile: change_section = self.check_new_section_gcc(line) if change_section == "OUTPUT": # finish parsing file: exit break elif change_section != False: current_section = change_section [module_name, module_size] = self.parse_section_gcc(line) if module_size == 0 or module_name == "": pass else: self.module_add(module_name, module_size, current_section) if DEBUG: print "Line: %s" % line, print "Module: %s\tSection: %s\tSize: %s" % \ (module_name, current_section, module_size) raw_input("----------") def parse_section_armcc(self, line): """ Parse data from an armcc map file Examples of armcc map file: Base_Addr Size Type Attr Idx E Section Name Object 0x00000000 0x00000400 Data RO 11222 RESET startup_MK64F12.o 0x00000410 0x00000008 Code RO 49364 * !!!main c_w.l(__main.o) Positional arguments: line - the line to parse the section data from """ test_rex_armcc = re.match(RE_ARMCC, line) if test_rex_armcc: size = int(test_rex_armcc.group(2), 16) if test_rex_armcc.group(4) == 'RO': section = '.text' else: if test_rex_armcc.group(3) == 'Data': section = '.data' elif test_rex_armcc.group(3) == 'Zero': section = '.bss' else: print "BUG armcc map parser" raw_input() # lookup object in dictionary and return module name object_name = test_rex_armcc.group(6) if object_name in self.object_to_module: module_name = self.object_to_module[object_name] else: module_name = 'Misc' return [module_name, size, section] else: return ["", 0, ""] # no valid entry def parse_section_iar(self, line): """ Parse data from an IAR map file Examples of IAR map file: Section Kind Address Size Object .intvec ro code 0x00000000 0x198 startup_MK64F12.o [15] .rodata const 0x00000198 0x0 zero_init3.o [133] .iar.init_table const 0x00008384 0x2c - Linker created - Initializer bytes const 0x00000198 0xb2 <for P3 s0> .data inited 0x20000000 0xd4 driverAtmelRFInterface.o [70] .bss zero 0x20000598 0x318 RTX_Conf_CM.o [4] .iar.dynexit uninit 0x20001448 0x204 <Block tail> HEAP uninit 0x20001650 0x10000 <Block tail> Positional_arguments: line - the line to parse section data from """ test_rex_iar = re.match(RE_IAR, line) if test_rex_iar: size = int(test_rex_iar.group(4), 16) if test_rex_iar.group(2) == 'const' or \ test_rex_iar.group(2) == 'ro code': section = '.text' elif test_rex_iar.group(2) == 'zero' or \ test_rex_iar.group(2) == 'uninit': if test_rex_iar.group(1)[0:4] == 'HEAP': section = '.heap' elif test_rex_iar.group(1)[0:6] == 'CSTACK': section = '.stack' else: section = '.bss' # default section elif test_rex_iar.group(2) == 'inited': section = '.data' else: print "BUG IAR map parser" raw_input() # lookup object in dictionary and return module name object_name = test_rex_iar.group(5) if object_name in self.object_to_module: module_name = self.object_to_module[object_name] else: module_name = 'Misc' return [module_name, size, section] else: return ["", 0, ""] # no valid entry def parse_map_file_armcc(self, file_desc): """ Main logic to decode armc5 map files Positional arguments: file_desc - a file like object to parse as an armc5 map file """ with file_desc as infile: # Search area to parse for line in infile: if line.startswith(' Base Addr Size'): break # Start decoding the map file for line in infile: [name, size, section] = self.parse_section_armcc(line) if size == 0 or name == "" or section == "": pass else: self.module_add(name, size, section) def parse_map_file_iar(self, file_desc): """ Main logic to decode IAR map files Positional arguments: file_desc - a file like object to parse as an IAR map file """ with file_desc as infile: # Search area to parse for line in infile: if line.startswith(' Section '): break # Start decoding the map file for line in infile: [name, size, section] = self.parse_section_iar(line) if size == 0 or name == "" or section == "": pass else: self.module_add(name, size, section) def search_objects(self, path, toolchain): """ Check whether the specified map file matches with the toolchain. Searches for object files and creates mapping: object --> module Positional arguments: path - the path to an object file toolchain - the toolchain used to build the object file """ path = path.replace('\\', '/') # check location of map file rex = r'^(.+\/)' + re.escape(toolchain) + r'\/(.+\.map)$' test_rex = re.match(rex, path) if test_rex: search_path = test_rex.group(1) + toolchain + '/mbed-os/' else: # It looks this is not an mbed project # object-to-module mapping cannot be generated print "Warning: specified toolchain doesn't match with"\ " path to the memory map file." return for root, _, obj_files in os.walk(search_path): for obj_file in obj_files: if obj_file.endswith(".o"): module_name, object_name = self.path_object_to_module_name( os.path.join(root, obj_file)) if object_name in self.object_to_module: if DEBUG: print "WARNING: multiple usages of object file: %s"\ % object_name print " Current: %s" % \ self.object_to_module[object_name] print " New: %s" % module_name print " " else: self.object_to_module.update({object_name:module_name}) export_formats = ["json", "csv-ci", "table"] def generate_output(self, export_format, file_output=None): """ Generates summary of memory map data Positional arguments: export_format - the format to dump Keyword arguments: file_desc - descriptor (either stdout or file) """ try: if file_output: file_desc = open(file_output, 'wb') else: file_desc = sys.stdout except IOError as error: print "I/O error({0}): {1}".format(error.errno, error.strerror) return False subtotal = dict() for k in self.sections: subtotal[k] = 0 # Calculate misc flash sections misc_flash_mem = 0 for i in self.modules: for k in self.misc_flash_sections: if self.modules[i][k]: misc_flash_mem += self.modules[i][k] json_obj = [] for i in sorted(self.modules): row = [] json_obj.append({ "module":i, "size":{ k:self.modules[i][k] for k in self.print_sections } }) summary = { 'summary':{ 'static_ram': (subtotal['.data'] + subtotal['.bss']), 'heap': (subtotal['.heap']), 'stack': (subtotal['.stack']), 'total_ram': (subtotal['.data'] + subtotal['.bss'] + subtotal['.heap']+subtotal['.stack']), 'total_flash': (subtotal['.text'] + subtotal['.data'] + misc_flash_mem), } } self.mem_summary = json_obj + [summary] to_call = {'json': self.generate_json, 'csv-ci': self.generate_csv, 'table': self.generate_table}[export_format] to_call(subtotal, misc_flash_mem, file_desc) if file_desc is not sys.stdout: file_desc.close() def generate_json(self, _, dummy, file_desc):
def generate_csv(self, subtotal, misc_flash_mem, file_desc): """Generate a CSV file from a memoy map Positional arguments: subtotal - total sizes for each module misc_flash_mem - size of misc flash sections file_desc - the file to write out the final report to """ csv_writer = csv.writer(file_desc, delimiter=',', quoting=csv.QUOTE_NONE) csv_module_section = [] csv_sizes = [] for i in sorted(self.modules): for k in self.print_sections: csv_module_section += [i+k] csv_sizes += [self.modules[i][k]] csv_module_section += ['static_ram'] csv_sizes += [subtotal['.data']+subtotal['.bss']] csv_module_section += ['heap'] if subtotal['.heap'] == 0: csv_sizes += ['unknown'] else: csv_sizes += [subtotal['.heap']] csv_module_section += ['stack'] if subtotal['.stack'] == 0: csv_sizes += ['unknown'] else: csv_sizes += [subtotal['.stack']] csv_module_section += ['total_ram'] csv_sizes += [subtotal['.data'] + subtotal['.bss'] + subtotal['.heap'] + subtotal['.stack']] csv_module_section += ['total_flash'] csv_sizes += [subtotal['.text']+subtotal['.data']+misc_flash_mem] csv_writer.writerow(csv_module_section) csv_writer.writerow(csv_sizes) def generate_table(self, subtotal, misc_flash_mem, file_desc): """Generate a table from a memoy map Positional arguments: subtotal - total sizes for each module misc_flash_mem - size of misc flash sections file_desc - the file to write out the final report to """ # Create table columns = ['Module'] columns.extend(self.print_sections) table = PrettyTable(columns) table.align["Module"] = "l" for col in self.print_sections: table.align[col] = 'r' for i in list(self.print_sections): table.align[i] = 'r' for i in sorted(self.modules): row = [i] for k in self.sections: subtotal[k] += self.modules[i][k] for k in self.print_sections: row.append(self.modules[i][k]) table.add_row(row) subtotal_row = ['Subtotals'] for k in self.print_sections: subtotal_row.append(subtotal[k]) table.add_row(subtotal_row) file_desc.write(table.get_string()) file_desc.write('\n') if subtotal['.heap'] == 0: file_desc.write("Allocated Heap: unknown\n") else: file_desc.write("Allocated Heap: %s bytes\n" % str(subtotal['.heap'])) if subtotal['.stack'] == 0: file_desc.write("Allocated Stack: unknown\n") else: file_desc.write("Allocated Stack: %s bytes\n" % str(subtotal['.stack'])) file_desc.write("Total Static RAM memory (data + bss): %s bytes\n" % (str(subtotal['.data'] + subtotal['.bss']))) file_desc.write( "Total RAM memory (data + bss + heap + stack): %s bytes\n" % (str(subtotal['.data'] + subtotal['.bss'] + subtotal['.heap'] + subtotal['.stack']))) file_desc.write("Total Flash memory (text + data + misc): %s bytes\n" % (str(subtotal['.text'] + subtotal['.data'] + misc_flash_mem))) toolchains = ["ARM", "ARM_STD", "ARM_MICRO", "GCC_ARM", "IAR"] def parse(self, mapfile, toolchain): """ Parse and decode map file depending on the toolchain Positional arguments: mapfile - the file name of the memory map file toolchain - the toolchain used to create the file """ result = True try: with open(mapfile, 'r') as file_input: if toolchain == "ARM" or toolchain == "ARM_STD" or\ toolchain == "ARM_MICRO": self.search_objects(os.path.abspath(mapfile), "ARM") self.parse_map_file_armcc(file_input) elif toolchain == "GCC_ARM": self.parse_map_file_gcc(file_input) elif toolchain == "IAR": self.search_objects(os.path.abspath(mapfile), toolchain) self.parse_map_file_iar(file_input) else: result = False except IOError as error: print "I/O error({0}): {1}".format(error.errno, error.strerror) result = False return result def main(): """Entry Point""" version = '0.3.11' # Parser handling parser = argparse.ArgumentParser( description="Memory Map File Analyser for ARM mbed\nversion %s" % version) parser.add_argument( 'file', type=argparse_filestring_type, help='memory map file') parser.add_argument( '-t', '--toolchain', dest='toolchain', help='select a toolchain used to build the memory map file (%s)' % ", ".join(MemapParser.toolchains), required=True, type=argparse_uppercase_type(MemapParser.toolchains, "toolchain")) parser.add_argument( '-o', '--output', help='output file name', required=False) parser.add_argument( '-e', '--export', dest='export', required=False, default='table', type=argparse_lowercase_hyphen_type(MemapParser.export_formats, 'export format'), help="export format (examples: %s: default)" % ", ".join(MemapParser.export_formats)) parser.add_argument('-v', '--version', action='version', version=version) # Parse/run command if len(sys.argv) <= 1: parser.print_help() sys.exit(1) args = parser.parse_args() # Create memap object memap = MemapParser() # Parse and decode a map file if args.file and args.toolchain: if memap.parse(args.file, args.toolchain) is False: sys.exit(0) # Write output in file if args.output != None: memap.generate_output(args.export, args.output) else: # Write output in screen memap.generate_output(args.export) sys.exit(0) if __name__ == "__main__": main()
"""Generate a json file from a memory map Positional arguments: subtotal - total sizes for each module misc_flash_mem - size of misc flash sections file_desc - the file to write out the final report to """ file_desc.write(json.dumps(self.mem_summary, indent=4)) file_desc.write('\n')
message0.go
package multisig import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" multisig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" "github.com/filecoin-project/venus/pkg/types/internal" "github.com/filecoin-project/venus/pkg/types/specactors" init_ "github.com/filecoin-project/venus/pkg/types/specactors/builtin/init" ) type message0 struct{ from address.Address } func (m message0) Create( signers []address.Address, threshold uint64, unlockStart, unlockDuration abi.ChainEpoch, initialAmount abi.TokenAmount, ) (*internal.Message, error) { lenAddrs := uint64(len(signers)) if lenAddrs < threshold { return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") } if threshold == 0 { threshold = lenAddrs } if m.from == address.Undef { return nil, xerrors.Errorf("must provide source address") } if unlockStart != 0 { return nil, xerrors.Errorf("actors v0 does not support a non-zero vesting start time") } // Set up constructor parameters for multisig msigParams := &multisig0.ConstructorParams{ Signers: signers, NumApprovalsThreshold: threshold, UnlockDuration: unlockDuration, } enc, actErr := specactors.SerializeParams(msigParams) if actErr != nil { return nil, actErr } // new actors are created by invoking 'exec' on the init actor with the constructor params execParams := &init0.ExecParams{ CodeCID: builtin0.MultisigActorCodeID, ConstructorParams: enc, } enc, actErr = specactors.SerializeParams(execParams) if actErr != nil { return nil, actErr } return &internal.Message{ To: init_.Address, From: m.from, Method: builtin0.MethodsInit.Exec, Params: enc, Value: initialAmount, }, nil } func (m message0) Propose(msig, to address.Address, amt abi.TokenAmount, method abi.MethodNum, params []byte) (*internal.Message, error) { if msig == address.Undef { return nil, xerrors.Errorf("must provide a multisig address for proposal") } if to == address.Undef { return nil, xerrors.Errorf("must provide a target address for proposal") } if amt.Sign() == -1
if m.from == address.Undef { return nil, xerrors.Errorf("must provide source address") } enc, actErr := specactors.SerializeParams(&multisig0.ProposeParams{ To: to, Value: amt, Method: method, Params: params, }) if actErr != nil { return nil, xerrors.Errorf("failed to serialize parameters: %w", actErr) } return &internal.Message{ To: msig, From: m.from, Value: abi.NewTokenAmount(0), Method: builtin0.MethodsMultisig.Propose, Params: enc, }, nil } func (m message0) Approve(msig address.Address, txID uint64, hashData *ProposalHashData) (*internal.Message, error) { enc, err := txnParams(txID, hashData) if err != nil { return nil, err } return &internal.Message{ To: msig, From: m.from, Value: internal.NewInt(0), Method: builtin0.MethodsMultisig.Approve, Params: enc, }, nil } func (m message0) Cancel(msig address.Address, txID uint64, hashData *ProposalHashData) (*internal.Message, error) { enc, err := txnParams(txID, hashData) if err != nil { return nil, err } return &internal.Message{ To: msig, From: m.from, Value: internal.NewInt(0), Method: builtin0.MethodsMultisig.Cancel, Params: enc, }, nil }
{ return nil, xerrors.Errorf("must provide a non-negative amount for proposed send") }
console-and-errors-pane.tsx
import React from 'react' import { useReadOnlyConsoleLogs } from '../../core/shared/runtime-report-logs' import { setFocus } from '../common/actions' import { openCodeEditorFile } from '../editor/actions/action-creators' import { getAllCodeEditorErrors } from '../editor/store/editor-state' import { useEditorState } from '../editor/store/store-hook' import { CodeEditorTabPane } from './code-problems' export const ConsoleAndErrorsPane = React.memo(() => { const dispatch = useEditorState((store) => store.dispatch, 'ConsoleAndErrorsPane dispatch') const canvasConsoleLogs = useReadOnlyConsoleLogs() const errorMessages = useEditorState((store) => { return getAllCodeEditorErrors(store.editor, 'warning', false)
const onOpenFile = React.useCallback( (path: string) => { dispatch([openCodeEditorFile(path, true), setFocus('codeEditor')]) }, [dispatch], ) return ( <CodeEditorTabPane canvasConsoleLogs={canvasConsoleLogs} errorMessages={errorMessages} onOpenFile={onOpenFile} /> ) })
}, 'ConsoleAndErrorsPane errorMessages')
172.factorial-trailing-zeroes.py
# # @lc app=leetcode id=172 lang=python3 # # [172] Factorial Trailing Zeroes # # @lc code=start class Solution:
if __name__ == '__main__': a = Solution() b = a.trailingZeroes(200) print(b) # @lc code=end
def trailingZeroes(self, n): # zero generated by 2 and 5 if n < 5: return 0 ans = 0 base = 5 while n >= base: ans += n//base base *= 5 return ans
bitcoin_ky.ts
<?xml version="1.0" ?><!DOCTYPE TS><TS language="ky" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About Rikeza</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>&lt;b&gt;Rikeza&lt;/b&gt; version</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The Rikeza developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Жаң даректи жасоо</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation type="unfinished"/> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation type="unfinished"/> </message> <message> <location line="-46"/> <source>These are your Rikeza addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <source>&amp;Copy Address</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a Rikeza address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation type="unfinished"/> </message> <message> <location line="-14"/> <source>Verify a message to ensure it was signed with a specified Rikeza address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>Ө&amp;чүрүү</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation type="unfinished"/> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Address</source> <translation>Дарек</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(аты жок)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"/> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+35"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation type="unfinished"/> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation type="unfinished"/> </message> <message> <location line="-58"/> <source>Rikeza will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation type="unfinished"/> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation type="unfinished"/> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation type="unfinished"/> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+280"/> <source>Sign &amp;message...</source> <translation type="unfinished"/> </message> <message> <location line="+242"/> <source>Synchronizing with network...</source> <translation type="unfinished"/> </message> <message> <location line="-308"/> <source>&amp;Overview</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;Транзакциялар</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation type="unfinished"/> </message> <message> <location line="-13"/> <source>&amp;Receive coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show the list of addresses for receiving payments</source> <translation type="unfinished"/> </message> <message> <location line="-7"/> <source>&amp;Send coins</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>E&amp;xit</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Quit application</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Show information about Rikeza</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+250"/> <source>~%n block(s) remaining</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source> <translation type="unfinished"/> </message> <message> <location line="-247"/> <source>&amp;Export...</source> <translation type="unfinished"/> </message> <message> <location line="-62"/> <source>Send coins to a Rikeza address</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Modify configuration options for Rikeza</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="-14"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup wallet to another location</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation>Билдирүүнү &amp;текшерүү...</translation> </message> <message> <location line="-200"/> <source>Rikeza</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet</source> <translation>Капчык</translation> </message> <message> <location line="+178"/> <source>&amp;About Rikeza</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>&amp;File</source> <translation>&amp;Файл</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>&amp;Жардам</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Actions toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+9"/> <source>[testnet]</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <location line="+60"/> <source>Rikeza client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+70"/> <source>%n active connection(s) to Rikeza network</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+40"/> <source>Downloaded %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message> <location line="+413"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-403"/> <source>%n second(s) ago</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="-284"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+288"/> <source>%n minute(s) ago</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s) ago</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s) ago</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Up to date</source> <translation>Жаңыланган</translation> </message> <message> <location line="+7"/> <source>Catching up...</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Last received block was generated %1.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid Rikeza address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+76"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+18"/> <source>Not staking</source> <translation type="unfinished"/> </message> <message> <location filename="../bitcoin.cpp" line="+109"/> <source>A fatal error occurred. Rikeza can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+90"/> <source>Network Alert</source> <translation type="unfinished"/> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="+551"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change:</source> <translation type="unfinished"/> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>List mode</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation>Дарек</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Дата</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Priority</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="-515"/> <source>Copy address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+317"/> <source>highest</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium-high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>low-medium</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>low</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>lowest</source> <translation type="unfinished"/> </message> <message> <location line="+155"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+66"/> <source>(no label)</source> <translation>(аты жок)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>(change)</source> <translation type="unfinished"/> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Дарек</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+20"/> <source>New receiving address</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>New sending address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation type="unfinished"/> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid Rikeza address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation type="unfinished"/> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+420"/> <location line="+12"/> <source>Rikeza-Qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start Rikeza after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start Rikeza on system login</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Detach databases at shutdown</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation>&amp;Тармак</translation> </message> <message> <location line="+6"/> <source>Automatically open the Rikeza client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Connect to the Rikeza network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Порт:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation type="unfinished"/> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Терезе</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting Rikeza.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Whether to show Rikeza addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;Жарайт</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Жокко чыгаруу</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+55"/> <source>default</source> <translation>жарыяланбаган</translation> </message> <message> <location line="+149"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting Rikeza.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation type="unfinished"/> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+33"/> <location line="+231"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Rikeza network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-160"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation type="unfinished"/> </message> <message> <location line="-107"/> <source>Wallet</source> <translation>Капчык</translation> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>Immature:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Total:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation type="unfinished"/> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-108"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+113"/> <location line="+1"/> <source>out of sync</source> <translation>синхрондоштурулган эмес</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+348"/> <source>N/A</source> <translation type="unfinished"/> </message> <message> <location line="-217"/> <source>Client version</source> <translation type="unfinished"/> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation type="unfinished"/> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>Startup time</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Network</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Last block time</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Ачуу</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the Rikeza-Qt help message to get a list with possible Rikeza command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Консоль</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation type="unfinished"/> </message> <message> <location line="-104"/> <source>Rikeza - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Rikeza Core</source> <translation type="unfinished"/> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Open the Rikeza debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Консолду тазалоо</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-33"/> <source>Welcome to the Rikeza RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation type="unfinished"/> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <location line="+86"/> <location line="+86"/> <location line="+32"/> <source>0.00 BC</source> <translation type="unfinished"/> </message> <message> <location line="-191"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>&amp;Бардыгын тазалоо</translation> </message> <message> <location line="+28"/> <source>Balance:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>123.456 BC</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>&amp;Жөнөтүү</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a Rikeza address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+251"/> <source>WARNING: Invalid Rikeza address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(аты жок)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation type="unfinished"/> </message>
<location line="+13"/> <source>Pay &amp;To:</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <location filename="../sendcoinsentry.cpp" line="+25"/> <source>Enter a label for this address to add it to your address book</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Label:</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Даректи алмашуу буферинен коюу</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a Rikeza address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation type="unfinished"/> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>Даректи алмашуу буферинен коюу</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this Rikeza address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>&amp;Бардыгын тазалоо</translation> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified Rikeza address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation type="unfinished"/> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a Rikeza address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Enter Rikeza signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation type="unfinished"/> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+19"/> <source>Open until %1</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-2"/> <source>Open for %n block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+8"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation>%1/тармакта эмес</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Status</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Дата</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Generated</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation type="unfinished"/> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>label</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation type="unfinished"/> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Net amount</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Message</source> <translation>Билдирүү</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated coins must mature 20 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Transaction</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Inputs</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>true</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>false</source> <translation type="unfinished"/> </message> <message> <location line="-211"/> <source>, has not been successfully broadcast yet</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>unknown</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+226"/> <source>Date</source> <translation>Дата</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Address</source> <translation>Дарек</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <source>Open until %1</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Received from</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation type="unfinished"/> </message> <message> <location line="+190"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+55"/> <location line="+16"/> <source>All</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <source>Today</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This week</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Last month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This year</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Range...</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>To yourself</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Other</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Min amount</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>Copy address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+144"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Date</source> <translation>Дата</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Address</source> <translation>Дарек</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>ID</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>to</source> <translation type="unfinished"/> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+206"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+33"/> <source>Rikeza version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send command to -server or rikezad</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Options:</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify configuration file (default: rikeza.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: rikezad.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Set database cache size in megabytes (default: 25)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Listen for connections on &lt;port&gt; (default: 27473 or testnet: 37473)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Stake your coins to support network and gain reward (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Detach block and address databases. Increases shutdown time (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 27474 or testnet: 37474)</source> <translation type="unfinished"/> </message> <message> <location line="-11"/> <source>Accept command line and JSON-RPC commands</source> <translation type="unfinished"/> </message> <message> <location line="+101"/> <source>Error: Transaction creation failed </source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: Wallet locked, unable to create transaction </source> <translation type="unfinished"/> </message> <message> <location line="-8"/> <source>Importing blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Importing bootstrap blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="-88"/> <source>Run in the background as a daemon and accept commands</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation type="unfinished"/> </message> <message> <location line="-24"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation type="unfinished"/> </message> <message> <location line="-38"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+117"/> <source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+61"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Rikeza will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="-31"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation type="unfinished"/> </message> <message> <location line="-18"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation type="unfinished"/> </message> <message> <location line="-30"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Block creation options:</source> <translation type="unfinished"/> </message> <message> <location line="-62"/> <source>Connect only to the specified node(s)</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation type="unfinished"/> </message> <message> <location line="+94"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation type="unfinished"/> </message> <message> <location line="-90"/> <source>Find peers using DNS lookup (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"/> </message> <message> <location line="+83"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-82"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation type="unfinished"/> </message> <message> <location line="-74"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation type="unfinished"/> </message> <message> <location line="-42"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"/> </message> <message> <location line="-80"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Username for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="+47"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+57"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation type="unfinished"/> </message> <message> <location line="-48"/> <source>wallet.dat corrupt, salvage failed</source> <translation type="unfinished"/> </message> <message> <location line="-54"/> <source>Password for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="-84"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=rikezarpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Rikeza Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Find peers using internet relay chat (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 2500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"/> </message> <message> <location line="-158"/> <source>This help message</source> <translation type="unfinished"/> </message> <message> <location line="+95"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot obtain a lock on data directory %s. Rikeza is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-98"/> <source>Rikeza</source> <translation type="unfinished"/> </message> <message> <location line="+140"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation type="unfinished"/> </message> <message> <location line="-130"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation type="unfinished"/> </message> <message> <location line="+122"/> <source>Loading addresses...</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of Rikeza</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart Rikeza to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-24"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <source>Error: could not start node</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation type="unfinished"/> </message> <message> <location line="-34"/> <source>Loading block index...</source> <translation type="unfinished"/> </message> <message> <location line="-103"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation type="unfinished"/> </message> <message> <location line="+122"/> <source>Unable to bind to %s on this computer. Rikeza is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-97"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+55"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Loading wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot initialize keypool</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Done loading</source> <translation type="unfinished"/> </message> <message> <location line="-167"/> <source>To use the %s option</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Error</source> <translation>Ката</translation> </message> <message> <location line="+6"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation type="unfinished"/> </message> </context> </TS>
<message>
home.go
// Copyright The Helm Authors, SUSE LLC. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package hypperpath calculates filesystem paths to Hypper's configuration, cache and data. package hypperpath // This helper builds paths to Helm's configuration, cache and data paths. const lp = lazypath("hypper") // ConfigPath returns the path where Helm stores configuration. func ConfigPath(elem ...string) string { return lp.configPath(elem...) } // CachePath returns the path where Helm stores cached objects. func CachePath(elem ...string) string { return lp.cachePath(elem...) } // DataPath returns the path where Helm stores data. func DataPath(elem ...string) string { return lp.dataPath(elem...) } // CacheIndexFile returns the path to an index for the given named repository. func CacheIndexFile(name string) string
// CacheChartsFile returns the path to a text file listing all the charts // within the given named repository. func CacheChartsFile(name string) string { if name != "" { name += "-" } return name + "charts.txt" }
{ if name != "" { name += "-" } return name + "index.yaml" }
utils.py
""" mpld3 Utilities =============== Utility routines for the mpld3 package """ import os import re import shutil import warnings from functools import wraps from . import urls # Make sure that DeprecationWarning gets printed warnings.simplefilter("always", DeprecationWarning) def html_id_ok(objid, html5=False):
def get_id(obj, suffix="", prefix="el", warn_on_invalid=True): """Get a unique id for the object""" if not suffix: suffix = "" if not prefix: prefix = "" objid = prefix + str(os.getpid()) + str(id(obj)) + suffix if warn_on_invalid and not html_id_ok(objid): warnings.warn('"{0}" is not a valid html ID. This may cause problems') return objid def deprecated(func, old_name, new_name): """Decorator to mark functions as deprecated.""" @wraps(func) def new_func(*args, **kwargs): warnings.warn(("{0} is deprecated and will be removed. " "Use {1} instead".format(old_name, new_name)), category=DeprecationWarning) return func(*args, **kwargs) new_func.__doc__ = ("*%s is deprecated: use %s instead*\n\n " % (old_name, new_name)) + new_func.__doc__ return new_func def write_ipynb_local_js(location=None, d3_src=None, mpld3_src=None): """ Write the mpld3 and d3 javascript libraries to the given file location. This utility is used by the IPython notebook tools to enable easy use of mpld3 with no web connection. Parameters ---------- location : string (optioal) the directory in which the d3 and mpld3 javascript libraries will be written. If not specified, the IPython nbextensions directory will be used. If IPython doesn't support nbextensions (< 2.0), the current working directory will be used. d3_src : string (optional) the source location of the d3 library. If not specified, the standard path in mpld3.urls.D3_LOCAL will be used. mpld3_src : string (optional) the source location of the mpld3 library. If not specified, the standard path in mpld3.urls.MPLD3_LOCAL will be used. Returns ------- d3_url, mpld3_url : string The URLs to be used for loading these js files. """ if location is None: try: from IPython.html import install_nbextension except ImportError: location = os.getcwd() nbextension = False else: nbextension = True else: nbextension = False if d3_src is None: d3_src = urls.D3_LOCAL if mpld3_src is None: mpld3_src = urls.MPLD3_LOCAL d3js = os.path.basename(d3_src) mpld3js = os.path.basename(mpld3_src) if not os.path.exists(d3_src): raise ValueError("d3 src not found at '{0}'".format(d3_src)) if not os.path.exists(mpld3_src): raise ValueError("mpld3 src not found at '{0}'".format(mpld3_src)) if nbextension: # IPython 2.0+. # This will not work if a url prefix is added prefix = '/nbextensions/' try: install_nbextension([d3_src, mpld3_src]) except IOError: # files may be read only. We'll try deleting them and re-installing from IPython.utils.path import get_ipython_dir nbext = os.path.join(get_ipython_dir(), "nbextensions") for src in [d3_src, mpld3_src]: dest = os.path.join(nbext, os.path.basename(src)) if os.path.exists(dest): os.remove(dest) install_nbextension([d3_src, mpld3_src]) else: # IPython < 2.0 or explicit path. # This won't work if users have changed the kernel directory. prefix = '/files/' d3_dest = os.path.join(location, d3js) mpld3_dest = os.path.join(location, mpld3js) for src, dest in [(d3_src, d3_dest), (mpld3_src, mpld3_dest)]: try: shutil.copyfile(src, dest) except IOError: # file may be read only. We'll try deleting it first if os.path.exists(dest): os.remove(dest) shutil.copyfile(src, dest) return prefix + d3js, prefix + mpld3js
"""Check whether objid is valid as an HTML id attribute. If html5 == True, then use the more liberal html5 rules. """ if html5: return not re.search('\s', objid) else: return bool(re.match("^[a-zA-Z][a-zA-Z0-9\-\.\:\_]*$", objid))
into_sink.rs
use futures_core::ready; use futures_core::task::{Context, Poll}; use futures_io::AsyncWrite; use futures_sink::Sink; use std::io; use std::pin::Pin; use pin_project::pin_project; #[derive(Debug)] struct Block<Item> { offset: usize, bytes: Item, } /// Sink for the [`into_sink`](super::AsyncWriteExt::into_sink) method. #[pin_project] #[must_use = "sinks do nothing unless polled"] #[derive(Debug)] #[cfg_attr(docsrs, doc(cfg(feature = "sink")))] pub struct IntoSink<W, Item> { #[pin] writer: W, /// An outstanding block for us to push into the underlying writer, along with an offset of how /// far into this block we have written already. buffer: Option<Block<Item>>, } impl<W: AsyncWrite, Item: AsRef<[u8]>> IntoSink<W, Item> { pub(super) fn new(writer: W) -> Self { Self { writer, buffer: None } } /// If we have an outstanding block in `buffer` attempt to push it into the writer, does _not_ /// flush the writer after it succeeds in pushing the block into it. fn poll_flush_buffer( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), io::Error>> { let mut this = self.project(); if let Some(buffer) = this.buffer { loop { let bytes = buffer.bytes.as_ref(); let written = ready!(this.writer.as_mut().poll_write(cx, &bytes[buffer.offset..]))?;
buffer.offset += written; if buffer.offset == bytes.len() { break; } } } *this.buffer = None; Poll::Ready(Ok(())) } } impl<W: AsyncWrite, Item: AsRef<[u8]>> Sink<Item> for IntoSink<W, Item> { type Error = io::Error; fn poll_ready( self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { ready!(self.poll_flush_buffer(cx))?; Poll::Ready(Ok(())) } #[allow(clippy::debug_assert_with_mut_call)] fn start_send( self: Pin<&mut Self>, item: Item, ) -> Result<(), Self::Error> { debug_assert!(self.buffer.is_none()); *self.project().buffer = Some(Block { offset: 0, bytes: item }); Ok(()) } fn poll_flush( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { ready!(self.as_mut().poll_flush_buffer(cx))?; ready!(self.project().writer.poll_flush(cx))?; Poll::Ready(Ok(())) } fn poll_close( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), Self::Error>> { ready!(self.as_mut().poll_flush_buffer(cx))?; ready!(self.project().writer.poll_close(cx))?; Poll::Ready(Ok(())) } }
heap.rs
use std::cell::RefCell; use std::convert::TryInto; use std::marker::PhantomData; use std::sync::Arc; use crate::object::*; use crate::pointer::*; use crate::space::*; use crate::types::*; struct HeapInner { // TODO: Add more generations. space: Space, scopes: Vec<Vec<HeapHandle<()>>>, globals: Vec<Option<HeapHandle<()>>>, weaks: Vec<HeapHandle<()>>, } impl HeapInner { fn new(space: Space) -> HeapInner { HeapInner { space, globals: vec![], scopes: vec![], weaks: vec![], } } fn trace(&mut self, visitor: &mut ObjectVisitor) { visitor.trace_maybe_handles(&mut self.globals); for scope in self.scopes.iter_mut() { // FIXME: Scope should be an object, not a vec here. visitor.trace_handles(scope); } while let Some(object_ptr) = visitor.queue.pop_front() { let object = TraceableObject::load(object_ptr); let traceable = object.as_traceable(); traceable.trace(visitor); } } fn update_weak(&mut self) -> Vec<Box<dyn Traceable>> { let mut doomed = vec![]; let mut survivors = vec![]; for handle in self.weaks.iter() { let maybe_object_ptr: Option<ObjectPtr> = handle.ptr().try_into().ok(); if let Some(object_ptr) = maybe_object_ptr { let old_header = object_ptr.header(); if let Some(new_header_ptr) = old_header.new_header_ptr { survivors.push(HeapHandle::new(new_header_ptr.to_object_ptr().into())); } else { let object = TraceableObject::load(object_ptr); doomed.push(object.into_box()); } } } std::mem::swap(&mut self.weaks, &mut survivors); doomed } } impl std::fmt::Debug for HeapInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("HeapInner").finish() } } #[derive(Debug)] pub struct Heap { max_size_in_bytes: usize, inner: Arc<RefCell<HeapInner>>, } impl Heap { pub fn new(size_in_bytes: usize) -> Result<Heap, GCError> { let half_size = size_in_bytes / 2; Ok(Heap { max_size_in_bytes: size_in_bytes, inner: Arc::new(RefCell::new(HeapInner::new(Space::new(half_size)?))), }) } pub fn used_bytes(&self) -> usize { self.inner.borrow().space.used_bytes() } pub fn free_bytes(&self) -> usize { self.inner.borrow().space.free_bytes() } pub fn collect(&self) -> Result<(), GCError> { let doomed = { let mut visitor = ObjectVisitor::new(Space::new(self.inner.borrow().space.size_in_bytes)?); let mut inner = self.inner.borrow_mut(); inner.trace(&mut visitor); let doomed = inner.update_weak(); std::mem::swap(&mut inner.space, &mut visitor.new_space); doomed }; std::mem::drop(doomed); Ok(()) } fn emplace<T: HostObject>(&self, object: Box<T>) -> Result<ObjectPtr, GCError> { let object_size = std::mem::size_of::<TraceableObject>(); let header = { let maybe_header = ObjectHeader::new(&mut self.inner.borrow_mut().space, object_size, T::TYPE_ID); // Collect here. Release inner mut-borrow and call collect, try again. match maybe_header { Err(_) => { self.collect()?; ObjectHeader::new(&mut self.inner.borrow_mut().space, object_size, T::TYPE_ID)? } Ok(header) => header, } }; let object_ptr = header.as_ptr().to_object_ptr(); TraceableObject::from_box(object).store(object_ptr); self.inner .borrow_mut() .weaks .push(HeapHandle::new(object_ptr.into())); Ok(object_ptr) } } #[derive(Debug)] struct Root { inner: Arc<RefCell<HeapInner>>, index: usize, } #[derive(Debug)] pub struct GlobalHandle<T> { root: Root, _phantom: PhantomData<T>, } impl<T> GlobalHandle<T> { fn ptr(&self) -> TaggedPtr { let inner = self.root.inner.borrow(); let cell = inner.globals[self.root.index].as_ref().unwrap(); cell.ptr() } pub fn erase_type(self) -> GlobalHandle<()> { GlobalHandle { root: self.root, _phantom: PhantomData::<()>::default(), } } } impl<T> From<GlobalHandle<T>> for HeapHandle<T> { fn from(handle: GlobalHandle<T>) -> Self { HeapHandle::<T>::new(handle.ptr()) } } impl Drop for Root { fn drop(&mut self) { self.inner.borrow_mut().globals[self.index] = None; } } pub struct HandleScope<'heap> { heap: &'heap Heap, index: usize, } impl<'heap> HandleScope<'heap> { pub fn new(heap: &Heap) -> HandleScope { let mut inner = heap.inner.borrow_mut(); let index = inner.scopes.len(); inner.scopes.push(vec![]); HandleScope { heap, index } } pub fn create_child_scope(&self) -> HandleScope<'heap> { HandleScope::new(self.heap) } pub fn create_num(&self, value: f64) -> LocalHandle<f64> { LocalHandle::<f64>::new(self, value.into()) } pub fn create_bool(&self, value: bool) -> LocalHandle<bool> { LocalHandle::<bool>::new(self, value.into()) } pub fn create_null(&self) -> LocalHandle<()> { LocalHandle::<()>::new(self, TaggedPtr::NULL) } pub fn create<T: HostObject + Default>(&self) -> Result<LocalHandle<T>, GCError> { let object_ptr = self.heap.emplace(Box::new(T::default()))?; Ok(LocalHandle::<T>::new(self, object_ptr.into())) } pub fn take<T: HostObject>(&self, object: T) -> Result<LocalHandle<T>, GCError> { let object_ptr = self.heap.emplace(Box::new(object))?; Ok(LocalHandle::<T>::new(self, object_ptr.into())) } // Should this be create_str? // Could also do generically for ToOwned? // fn from_unowned<T, S>(...) where T: ToOwned<S>, S : HostObject {...} pub fn str(&self, object: &str) -> Result<LocalHandle<String>, GCError> { self.take(object.to_string()) } fn add(&self, ptr: TaggedPtr) -> usize { let mut inner = self.heap.inner.borrow_mut(); let cells = &mut inner.scopes[self.index]; let index = cells.len(); cells.push(HeapHandle::new(ptr)); index } pub fn from_global<T>(&self, handle: &GlobalHandle<T>) -> LocalHandle<T> { LocalHandle::<T>::new(self, handle.ptr()) } pub fn from_heap<T>(&self, handle: &HeapHandle<T>) -> LocalHandle<T> { LocalHandle::<T>::new(self, handle.ptr()) } pub fn from_local<T>(&self, handle: &LocalHandle<'_, T>) -> LocalHandle<T> { LocalHandle::<T>::new(self, handle.ptr()) } pub fn from_maybe_heap<T>( &self, maybe_handle: &Option<HeapHandle<T>>, ) -> Option<LocalHandle<T>> { maybe_handle .clone() .map(|handle| LocalHandle::<T>::new(self, handle.ptr())) } pub fn as_ref<T: HostObject>(&self, handle: &GlobalHandle<T>) -> &T { let local = self.from_global(handle); local.as_ref() } pub fn as_mut<T: HostObject>(&self, handle: &GlobalHandle<T>) -> &mut T { let local = self.from_global(handle); local.as_mut() } fn get_ptr(&self, index: usize) -> TaggedPtr { let inner = self.heap.inner.borrow(); inner.scopes[self.index][index].ptr() } } impl<'heap> Drop for HandleScope<'heap> { fn drop(&mut self) { let mut inner = self.heap.inner.borrow_mut(); inner.scopes.pop(); } } #[derive(Copy)] pub struct LocalHandle<'a, T> { scope: &'a HandleScope<'a>, index: usize, phantom: PhantomData<T>, } // Derive Clone requires T to be Cloneable, which isn't required for Handles. impl<'a, T> Clone for LocalHandle<'a, T> { fn clone(&self) -> Self { LocalHandle { scope: self.scope, index: self.index, phantom: PhantomData::<T>::default(), } } fn clone_from(&mut self, source: &Self) { self.scope = source.scope; self.index = source.index; } } impl<'a, T> LocalHandle<'a, T> { fn new(scope: &'a HandleScope, ptr: TaggedPtr) -> Self { Self { scope: scope, index: scope.add(ptr), phantom: PhantomData::<T>::default(), } } #[cfg(test)] pub(crate) fn ptr_for_test(&self) -> TaggedPtr { self.ptr() } fn ptr(&self) -> TaggedPtr { self.scope.get_ptr(self.index) } fn get_object_ptr(&self) -> Option<ObjectPtr> { self.ptr().try_into().ok() } pub fn erase_type(&self) -> LocalHandle<'a, ()> { LocalHandle { scope: self.scope, index: self.index, phantom: PhantomData::<()>::default(), } } } impl<'a> LocalHandle<'a, ()> { pub fn is_null(&self) -> bool { self.ptr().is_null() } pub fn is_bool(&self) -> bool { self.ptr().is_bool() } pub fn is_num(&self) -> bool { self.ptr().is_num() } pub fn try_as_ref<S: HostObject>(&self) -> Option<&'a S> { if let Some(object_ptr) = self.get_object_ptr() { if object_ptr.is_type(S::TYPE_ID) { if let Some(ptr) = TraceableObject::try_downcast::<S>(object_ptr) { return Some(unsafe { &*ptr }); } } } None } pub fn try_as_mut<S: HostObject>(&self) -> Option<&'a mut S> { if let Some(object_ptr) = self.get_object_ptr() { if object_ptr.is_type(S::TYPE_ID) { if let Some(ptr) = TraceableObject::try_downcast::<S>(object_ptr) { let mut_ptr = ptr as *mut S; return Some(unsafe { &mut *mut_ptr }); } } } None } pub fn is_of_type<S: HostObject>(&self) -> bool { let maybe_ref: Option<&S> = self.try_as_ref(); maybe_ref.is_some() } } pub trait DowncastTo<T> { fn try_downcast(self) -> Option<T>; } impl<'a, T: HostObject> DowncastTo<LocalHandle<'a, T>> for LocalHandle<'a, ()> { fn try_downcast(self) -> Option<LocalHandle<'a, T>> { if let Some(object_ptr) = self.get_object_ptr() { if object_ptr.is_type(T::TYPE_ID) { let ptr = TraceableObject::try_downcast::<T>(object_ptr); if ptr.is_some() { return Some(LocalHandle { scope: self.scope, index: self.index, phantom: PhantomData::<T>::default(), }); } } } None } } impl<'a> DowncastTo<LocalHandle<'a, f64>> for LocalHandle<'a, ()> { fn try_downcast(self) -> Option<LocalHandle<'a, f64>> { self.try_into() .ok() .map(|value| self.scope.create_num(value)) } } impl<'a> DowncastTo<LocalHandle<'a, bool>> for LocalHandle<'a, ()> { fn try_downcast(self) -> Option<LocalHandle<'a, bool>> { self.try_into() .ok() .map(|value| self.scope.create_bool(value)) } } impl<'a, T: HostObject> LocalHandle<'a, T> { pub fn borrow(&self) -> &'a T { let object_ptr = self.get_object_ptr().unwrap(); let ptr = TraceableObject::downcast::<T>(object_ptr); unsafe { &*ptr } } pub fn borrow_mut(&self) -> &'a mut T { let object_ptr = self.get_object_ptr().unwrap(); let ptr = TraceableObject::downcast_mut::<T>(object_ptr); unsafe { &mut *ptr } } // Old names: pub fn as_ref(&self) -> &'a T
pub fn as_mut(&self) -> &'a mut T { self.borrow_mut() } } impl<'a> TryInto<f64> for LocalHandle<'a, ()> { type Error = GCError; fn try_into(self) -> Result<f64, GCError> { self.ptr().try_into() } } impl<'a> Into<f64> for LocalHandle<'a, f64> { fn into(self) -> f64 { self.ptr().try_into().unwrap() } } impl<'a> TryInto<bool> for LocalHandle<'a, ()> { type Error = GCError; fn try_into(self) -> Result<bool, GCError> { self.ptr().try_into() } } impl<'a> Into<bool> for LocalHandle<'a, bool> { fn into(self) -> bool { self.ptr().try_into().unwrap() } } impl<'a, T> From<LocalHandle<'a, T>> for HeapHandle<T> { fn from(handle: LocalHandle<'a, T>) -> Self { HeapHandle::<T>::new(handle.ptr()) } } impl<'a, T> From<LocalHandle<'a, T>> for GlobalHandle<T> { fn from(handle: LocalHandle<'a, T>) -> Self { let ptr = handle.ptr(); let index = { // TODO: Scan for available cells. let mut inner = handle.scope.heap.inner.borrow_mut(); let index = inner.globals.len(); inner.globals.push(Some(HeapHandle::<()>::new(ptr))); index }; GlobalHandle { root: Root { inner: Arc::clone(&handle.scope.heap.inner), index, }, _phantom: PhantomData::<T>::default(), } } } #[cfg(test)] mod tests { use super::*; use std::cell::Cell; use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::rc::Rc; #[derive(Default)] struct DropObject { counter: Rc<Cell<u32>>, } impl HostObject for DropObject { const TYPE_ID: ObjectType = ObjectType::Host; } impl Traceable for DropObject { fn trace(&mut self, _visitor: &mut ObjectVisitor) {} } impl Drop for DropObject { fn drop(&mut self) { let counter = self.counter.get(); self.counter.set(counter + 1); } } impl Hash for DropObject { fn hash<H: Hasher>(&self, state: &mut H) { (self as *const DropObject as usize).hash(state); } } #[test] pub fn smoke_test() { let heap = Heap::new(1000).unwrap(); assert_eq!(heap.used_bytes(), 0); let two: GlobalHandle<DropObject> = { let scope = HandleScope::new(&heap); let one = scope.create::<DropObject>().unwrap(); let two = scope.create::<DropObject>().unwrap(); std::mem::drop(one); two.into() }; let used_before_collection = heap.used_bytes(); heap.collect().unwrap(); let used_after_collection = heap.used_bytes(); assert!(0 < used_after_collection); assert!(used_before_collection > used_after_collection); std::mem::drop(two); heap.collect().unwrap(); assert_eq!(0, heap.used_bytes()); } #[test] fn finalizer_test() { let heap = Heap::new(1000).unwrap(); let counter = Rc::new(Cell::new(0)); let scope = HandleScope::new(&heap); let handle = scope.create::<DropObject>().unwrap(); handle.as_mut().counter = Rc::clone(&counter); std::mem::drop(handle); assert_eq!(0u32, counter.get()); std::mem::drop(scope); assert_eq!(0u32, counter.get()); heap.collect().ok(); assert_eq!(1u32, counter.get()); } #[test] fn tracing_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let handle = scope.create::<List<DropObject>>().unwrap(); let list = handle.as_mut(); list.push(scope.create::<DropObject>().unwrap().into()); list.push(scope.create::<DropObject>().unwrap().into()); list.push(scope.create::<DropObject>().unwrap().into()); std::mem::drop(list); let used = heap.used_bytes(); heap.collect().ok(); assert_eq!(used, heap.used_bytes()); std::mem::drop(handle); heap.collect().ok(); assert_eq!(used, heap.used_bytes()); std::mem::drop(scope); heap.collect().ok(); assert_eq!(0, heap.used_bytes()); } #[test] fn tagged_num_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let a = scope.create_num(1.0); let b = scope.create_num(2.0); assert_eq!(0, heap.used_bytes()); let a_value: f64 = a.ptr().try_into().unwrap(); assert_eq!(1.0, a_value); let b_value: f64 = b.ptr().try_into().unwrap(); assert_eq!(2.0, b_value); } #[test] fn add_f64_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let one = scope.create_num(1.0); let two = scope.create_num(2.0); let one_value: f64 = one.try_into().unwrap(); assert_eq!(1.0, one_value); let two_value: f64 = two.try_into().unwrap(); assert_eq!(2.0, two_value); let three_value = one_value + two_value; let three = scope.create_num(three_value); let three_global = GlobalHandle::from(three); std::mem::drop(scope); let scope = HandleScope::new(&heap); let three = scope.from_global(&three_global); let three_value: f64 = three.try_into().unwrap(); assert_eq!(3.0, three_value); } #[test] fn list_push_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let list = scope.create::<List<f64>>().unwrap(); let one = scope.create_num(1.0); let list_value = list.as_mut(); list_value.push(one.into()); std::mem::drop(list_value); heap.collect().ok(); let list_value = list.as_ref(); assert_eq!(list_value.len(), 1); } #[test] fn string_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let string_handle = scope.create::<String>().unwrap(); heap.collect().ok(); let string_value = string_handle.as_ref(); assert_eq!(string_value, ""); } #[test] fn take_string_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let string_handle = scope.take("Foo".to_string()).unwrap(); heap.collect().ok(); let string_value = string_handle.as_ref(); assert_eq!(string_value, "Foo"); } #[test] fn list_push_string_twice_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let list = scope.create::<List<String>>().unwrap(); let string = scope.str("Foo").unwrap(); let list_value = list.as_mut(); list_value.push(string.clone().into()); list_value.push(string.clone().into()); std::mem::drop(list_value); heap.collect().ok(); let list_value = list.as_mut(); assert_eq!(list_value.len(), 2); assert_eq!(list_value[0].as_ref(), "Foo"); assert_eq!(list_value[1].as_ref(), "Foo"); string.as_mut().push_str("Bar"); assert_eq!(list_value[0].as_ref(), "FooBar"); } #[test] fn map_insert_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let map = scope.create::<Map<String, String>>().unwrap(); let foo = scope.str("Foo").unwrap(); let bar = scope.str("Bar").unwrap(); let map_value = map.as_mut(); map_value.insert(foo.clone().into(), bar.clone().into()); std::mem::drop(map_value); std::mem::drop(foo); std::mem::drop(bar); // Check if lookup works before collect. { let map_value = map.as_mut(); let foo = scope.str("Foo").unwrap(); let bar = scope.from_heap(map_value.get(&foo.into()).unwrap()); assert_eq!(bar.as_ref(), "Bar"); } heap.collect().ok(); let map_value = map.as_ref(); let foo = scope.str("Foo").unwrap(); let bar = map_value.get(&foo.into()).unwrap(); assert_eq!(bar.as_ref(), "Bar"); } #[test] fn typed_handle_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); // Bools let boolean: LocalHandle<bool> = scope.create_bool(true); let out: bool = boolean.into(); assert_eq!(out, true); // bool.as_ref() can't work. // bool.as_mut() similarly so. // Nums let num: LocalHandle<f64> = scope.create_num(1.0); let out: f64 = num.try_into().unwrap(); assert_eq!(out, 1.0); // num.as_ref() should be possible. // num.as_mut() might be possible? // Null let null: LocalHandle<()> = scope.create_null(); assert_eq!(null.is_null(), true); // HostObjects (e.g. String) let string: LocalHandle<String> = scope.str("Foo").unwrap(); assert_eq!(string.as_ref(), "Foo"); // Untyped handles let untyped = num.erase_type(); let out: f64 = untyped.try_into().unwrap(); assert_eq!(out, 1.0); // create a String // try to store it in the wrong type'd handle // see it panic. // Things to test: // Combinations of types (null, f64 (valid and NaN), HostObject, bool) // - Getting refs to all types // - Geting (and changing?) a mut-ref to num, bool, null types? // - value cast to the wrong type // - handle cast to the wrong type // - Using try_downcast and getting back None with the wrong type. } #[test] fn downcast_to_typed_handle_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); // Bools let untyped: LocalHandle<()> = scope.create_bool(true).erase_type(); let maybe_string: Option<LocalHandle<String>> = untyped.try_downcast(); let maybe_bool: Option<LocalHandle<bool>> = untyped.try_downcast(); let maybe_f64: Option<LocalHandle<f64>> = untyped.try_downcast(); assert!(maybe_string.is_none()); assert!(maybe_bool.is_some()); assert!(maybe_f64.is_none()); // Nums let untyped: LocalHandle<()> = scope.create_num(1.0).erase_type(); let maybe_string: Option<LocalHandle<String>> = untyped.try_downcast(); let maybe_bool: Option<LocalHandle<bool>> = untyped.try_downcast(); let maybe_f64: Option<LocalHandle<f64>> = untyped.try_downcast(); assert!(maybe_string.is_none()); assert!(maybe_bool.is_none()); assert!(maybe_f64.is_some()); // Null let untyped: LocalHandle<()> = scope.create_null(); let maybe_string: Option<LocalHandle<String>> = untyped.try_downcast(); let maybe_bool: Option<LocalHandle<bool>> = untyped.try_downcast(); let maybe_f64: Option<LocalHandle<f64>> = untyped.try_downcast(); assert!(maybe_string.is_none()); assert!(maybe_bool.is_none()); assert!(maybe_f64.is_none()); // HostObjects (e.g. String) let untyped: LocalHandle<()> = scope.str("Foo").unwrap().erase_type(); let maybe_string: Option<LocalHandle<String>> = untyped.try_downcast(); let maybe_bool: Option<LocalHandle<bool>> = untyped.try_downcast(); let maybe_f64: Option<LocalHandle<f64>> = untyped.try_downcast(); assert!(maybe_string.is_some()); assert!(maybe_bool.is_none()); assert!(maybe_f64.is_none()); } #[test] fn is_of_type_test() { let heap = Heap::new(1000).unwrap(); let scope = HandleScope::new(&heap); let untyped: LocalHandle<()> = scope.str("foo").unwrap().erase_type(); assert_eq!(untyped.is_of_type::<String>(), true); assert_eq!(untyped.is_of_type::<DropObject>(), false); assert_eq!(untyped.is_bool(), false); // The HeapHandle version of is_of_type used to crash. let heap: HeapHandle<()> = untyped.into(); assert_eq!(heap.is_of_type::<String>(), true); assert_eq!(heap.is_of_type::<DropObject>(), false); assert_eq!(heap.is_bool(), false); } #[test] fn nested_scope_test() { let heap = Heap::new(1000).unwrap(); let before_size = heap.used_bytes(); let outer = HandleScope::new(&heap); { let inner = outer.create_child_scope(); inner.str("foo").unwrap(); let inner_size = heap.used_bytes(); assert!(before_size < inner_size); heap.collect().unwrap(); assert_eq!(heap.used_bytes(), inner_size); } assert!(before_size < heap.used_bytes()); heap.collect().unwrap(); assert_eq!(before_size, heap.used_bytes()); { let inner = outer.create_child_scope(); let inner_string = inner.str("foo").unwrap(); outer.from_local(&inner_string); } // With the inner local moved to the outer scope, it's not collected. assert!(before_size < heap.used_bytes()); heap.collect().unwrap(); assert!(before_size < heap.used_bytes()); } #[test] fn test_collect_on_allocate() { // Make a heap let heap = Heap::new(1000).unwrap(); let emtpy_size = heap.used_bytes(); let one_object_size; // Fill heap { let scope = HandleScope::new(&heap); scope.str("foo").unwrap(); one_object_size = heap.used_bytes() - emtpy_size; // Loop until next allocate would fill heap while heap.free_bytes() > one_object_size { scope.str("foo").unwrap(); } let full_size = heap.used_bytes(); // Verify that collection works with a full heap, but does not // actually release anything else by the HandleScope. heap.collect().unwrap(); assert_eq!(heap.used_bytes(), full_size); // Release handle scope, but does not collect. } assert!(heap.used_bytes() > one_object_size); // Attempt to allocate again, expect it to succeed and usage to go down. let scope = HandleScope::new(&heap); scope.str("foo").unwrap(); assert_eq!(heap.used_bytes(), one_object_size); } }
{ self.borrow() }
tailer_test.go
package tailer import ( "fmt" "github.com/seznam/slo-exporter/pkg/event" "github.com/sirupsen/logrus" "io/ioutil" "os" "reflect" "regexp" "strings" "testing" "time" "github.com/stretchr/testify/assert" ) var ( lineParseRegexp = `^(?P<ip>[A-Fa-f0-9.:]{4,50}) \S+ \S+ \[(?P<time>.*?)\] "(?P<request>.*?)" (?P<statusCode>\d+) \d+ "(?P<referer>.*?)" uag="(?P<userAgent>[^"]+)" "[^"]+" ua="[^"]+" rt="(?P<requestDuration>\d+(\.\d+)??)"(?: frpc-status="(?P<frpcStatus>\d*|-)")?(?: slo-domain="(?P<sloDomain>[^"]*)")?(?: slo-app="(?P<sloApp>[^"]*)")?(?: slo-class="(?P<sloClass>[^"]*)")?(?: slo-endpoint="(?P<sloEndpoint>[^"]*)")?(?: slo-result="(?P<sloResult>[^"]*)")?` emptyGroupRegexp = `^-$` requestLineFormat = `{ip} - - [{time}] "{request}" {statusCode} 79 "-" uag="-" "-" ua="10.66.112.78:80" rt="{requestDuration}" frpc-status="{frpcStatus}" slo-domain="{sloDomain}" slo-app="{sloApp}" slo-class="{sloClass}" slo-endpoint="{sloEndpoint}" slo-result="{sloResult}"` // provided to getRequestLine, this returns a considered-valid line requestLineFormatMapValid = map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "34.65.133.58", "request": "GET /robots.txt HTTP/1.1", "statusCode": "200", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", } ) // return request line formatted using the provided formatMap func getRequestLine(formatMap map[string]string) (requestLine string) { requestLine = requestLineFormat for k, v := range formatMap { requestLine = strings.Replace(requestLine, fmt.Sprintf("{%s}", k), v, -1) } return requestLine } type parseLineTest struct { // lineContentMapping is to be used to generate the request log line via getRequestLine func // (see it for what the defaults are, so that you dont have to fill them for every test case) lineContentMapping map[string]string isLineValid bool } func Test_ParseLineAndBuildEvent(t *testing.T) { testTable := []parseLineTest{ // ipv4 {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "34.65.133.58", "request": "GET /robots.txt HTTP/1.1", "statusCode": "200", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, true}, // ipv6 {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "2001:718:801:230::1", "request": "GET /robots.txt HTTP/1.1", "statusCode": "200", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, true}, // invalid time {map[string]string{"time": "32/Nov/2019:25:20:07 +0100", "ip": "2001:718:801:230::1", "request": "GET /robots.txt HTTP/1.1", "statusCode": "200x", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, false}, // invalid request {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "2001:718:801:230::1", "request": "invalid-request[eof]", "statusCode": "200x", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, false}, // request without protocol {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "2001:718:801:230::1", "request": "GET /robots.txt", "statusCode": "301", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, true}, // http2.0 proto request {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "2001:718:801:230::1", "request": "GET /robots.txt HTTP/2.0", "statusCode": "200", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, true}, // zero status code {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "2001:718:801:230::1", "request": "GET /robots.txt HTTP/1.1", "statusCode": "0", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, true}, // invalid status code {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "2001:718:801:230::1", "request": "GET /robots.txt HTTP/1.1", "statusCode": "xxx", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, false}, // classified event {map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "2001:718:801:230::1", "request": "GET /robots.txt HTTP/1.1", "statusCode": "200", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "critical", "sloDomain": "userportal", "sloApp": "frontend-api", "sloResult": "success", "sloEndpoint": "AdInventoryManagerInterestsQuery", "frpcStatus": "-", "userAgent": "-", "referer": "-", }, true}, } lineParseRegexpCompiled := regexp.MustCompile(lineParseRegexp) emptyGroupRegexpCompiled := regexp.MustCompile(emptyGroupRegexp) for _, test := range testTable { requestLine := getRequestLine(test.lineContentMapping) data, err := parseLine(lineParseRegexpCompiled, emptyGroupRegexpCompiled, requestLine) if err != nil { if test.isLineValid { t.Fatalf("unable to parse request line '%s': %v", requestLine, err) } else { // the tested line is marked as not valid, Err is expected continue } } parsedEvent := &event.Raw{Metadata: data} var expectedEvent *event.Raw if test.isLineValid { // line is considered valid, build the expectedEvent struct in order to compare it to the parsed one // first, drop all data which matches emptyGroupRegexpCompiled, as they should not be included in the data provided to buildEvent for k, v := range test.lineContentMapping { if emptyGroupRegexpCompiled.MatchString(v) { delete(test.lineContentMapping, k) } } expectedEvent = &event.Raw{Metadata: test.lineContentMapping} if !reflect.DeepEqual(expectedEvent, parsedEvent) { t.Errorf("Unexpected result of parse line: %s\nGot: %+v\nExpected: %+v", requestLine, parsedEvent, expectedEvent) } } } } func Test_ParseLine(t *testing.T)
type offsetPersistenceTest struct { // all values refers to number of events which should be written to a log file at a given phase of test pre int // *before* the tailing starts during int // while the tailer is running post int // after the tailer temporarily stops reopen int // after the tailer starts again } // reads in chan and on close returns count to out chan func countEvents(in chan *event.Raw, out chan int) { count := 0 for range in { count++ } out <- count } func offsetPersistenceTestRun(t offsetPersistenceTest) error { // temp file for logs f, err := ioutil.TempFile("", "") if err != nil { return fmt.Errorf("Error while creating temp file: %w", err) } fname := f.Name() positionsFname := f.Name() + ".pos" defer os.Remove(positionsFname) defer os.Remove(fname) defer f.Close() eventCount := make(chan int) persistPositionInterval, _ := time.ParseDuration("10s") for i := 0; i < t.pre; i++ { f.WriteString(getRequestLine(requestLineFormatMapValid) + "\n") } config := tailerConfig{ TailedFile: fname, Follow: true, Reopen: true, PositionFile: positionsFname, PositionPersistenceInterval: persistPositionInterval, LoglineParseRegexp: lineParseRegexp, EmptyGroupRE: emptyGroupRegexp, } tailer, err := New(config, logrus.New()) if err != nil { return err } tailer.Run() go countEvents(tailer.OutputChannel(), eventCount) for i := 0; i < t.during; i++ { f.WriteString(getRequestLine(requestLineFormatMapValid) + "\n") } time.Sleep(time.Second) tailer.Stop() eventsCount := <-eventCount if eventsCount != t.pre+t.during { return fmt.Errorf("Number of processed events during first open of a log file does not match: got '%d', expected '%d'", eventsCount, t.pre+t.during) } for i := 0; i < t.post; i++ { f.WriteString(getRequestLine(requestLineFormatMapValid) + "\n") } tailer, err = New(config, logrus.New()) if err != nil { return err } tailer.Run() go countEvents(tailer.OutputChannel(), eventCount) for i := 0; i < t.reopen; i++ { f.WriteString(getRequestLine(requestLineFormatMapValid) + "\n") } time.Sleep(time.Second) tailer.Stop() eventsCount = <-eventCount if eventsCount != t.post+t.reopen { return fmt.Errorf("Number of processed events after reopening a log file does not match: got '%d', expected '%d'", eventsCount, t.post+t.reopen) } return nil } var testOffsetPersistenceTable = []offsetPersistenceTest{ // events in log file present before the first open {10, 10, 0, 0}, // just new events are read on file reopen {10, 10, 10, 10}, } func TestOffsetPersistence(t *testing.T) { for _, testData := range testOffsetPersistenceTable { err := offsetPersistenceTestRun(testData) if err != nil { t.Error(err) } } } func TestGetDefaultPositionsFilePath(t *testing.T) { testData := map[string]string{ "/tmp/access_log": "/tmp/access_log.pos", "./access_log.pos": "./access_log.pos.pos", } for logFile, posFile := range testData { config := tailerConfig{TailedFile: logFile} assert.Equal(t, posFile, config.getDefaultPositionsFilePath()) } }
{ testTable := []parseLineTest{ { lineContentMapping: map[string]string{"time": "12/Nov/2019:10:20:07 +0100", "ip": "34.65.133.58", "request": "GET /robots.txt HTTP/1.1", "statusCode": "200", "requestDuration": "0.123", // in s, as logged by nginx "sloClass": "-", "sloDomain": "-", "sloApp": "-", "sloResult": "-", "sloEndpoint": "-", "frpcStatus": "-", }, isLineValid: true, }, } lineParseRegexpCompiled := regexp.MustCompile(lineParseRegexp) emptyGroupRegexpCompiled := regexp.MustCompile(emptyGroupRegexp) for _, test := range testTable { requestLine := getRequestLine(test.lineContentMapping) data, err := parseLine(lineParseRegexpCompiled, emptyGroupRegexpCompiled, requestLine) if err != nil { t.Fatalf("unable to parse request line '%s': %v", requestLine, err) } for k, v := range test.lineContentMapping { if !emptyGroupRegexpCompiled.MatchString(v) { continue } // test that empty group was correctly replaced by an empty string if _, ok := data[k]; ok { t.Errorf("Content named group '%s':'%s' should not have been included in the resulting stringmap (as value matches emptyGroupRegexp: '%s'): %+v", k, v, emptyGroupRegexp, data) } } } }
models.py
# !/usr/bin/env python # -*- coding: utf-8 -*- # # Filename: models.py # Project: tests # Author: Brian Cherinka # Created: Friday, 15th February 2019 2:44:13 pm # License: BSD 3-clause "New" or "Revised" License # Copyright (c) 2019 Brian Cherinka # Last Modified: Sunday, 3rd March 2019 4:47:18 pm # Modified By: Brian Cherinka from __future__ import print_function, division, absolute_import from sqlalchemy import Column, String, BigInteger, Integer, Float from .database import Base, engine, Session import factory import factory.fuzzy from pytest_factoryboy import register class ModelA(Base): __tablename__ = 'modela' pk = Column(BigInteger, primary_key=True) name = Column(String, nullable=False) x = Column(Integer, nullable=False) y = Column(Integer, nullable=False) def __repr__(self): return f'<ModelA(pk={self.pk},name={self.name},x={self.x},y={self.y})>' class ModelB(Base): __tablename__ = 'modelb' pk = Column(BigInteger, primary_key=True) z = Column(Float, nullable=False) def __repr__(self): return f'<ModelB(pk={self.pk},z={self.z})>' @register class ModelAFactory(factory.alchemy.SQLAlchemyModelFactory): class Meta: model = ModelA sqlalchemy_session = Session # the SQLAlchemy session object pk = factory.Sequence(lambda n: n) x = factory.Faker('pyint', min_value=0, max_value=20) y = factory.Faker('pyint', min_value=0, max_value=20)
class ModelBFactory(factory.alchemy.SQLAlchemyModelFactory): class Meta: model = ModelB sqlalchemy_session = Session # the SQLAlchemy session object pk = factory.Sequence(lambda n: n) z = factory.Faker('pyint', min_value=0, max_value=20) Base.metadata.create_all(engine)
name = factory.fuzzy.FuzzyText(prefix='model', length=3) @register
nested_interface.go
// Code generated by go-swagger; DO NOT EDIT. // Copyright 2018 The go-netbox Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package models // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( strfmt "github.com/go-openapi/strfmt" "github.com/go-openapi/errors" "github.com/go-openapi/swag" "github.com/go-openapi/validate" ) // NestedInterface Lag // swagger:model NestedInterface type NestedInterface struct { // ID // Read Only: true ID int64 `json:"id,omitempty"`
// Name // Required: true // Max Length: 64 Name *string `json:"name"` // Url // Read Only: true URL strfmt.URI `json:"url,omitempty"` } // Validate validates this nested interface func (m *NestedInterface) Validate(formats strfmt.Registry) error { var res []error if err := m.validateName(formats); err != nil { // prop res = append(res, err) } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } func (m *NestedInterface) validateName(formats strfmt.Registry) error { if err := validate.Required("name", "body", m.Name); err != nil { return err } if err := validate.MaxLength("name", "body", string(*m.Name), 64); err != nil { return err } return nil } // MarshalBinary interface implementation func (m *NestedInterface) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } return swag.WriteJSON(m) } // UnmarshalBinary interface implementation func (m *NestedInterface) UnmarshalBinary(b []byte) error { var res NestedInterface if err := swag.ReadJSON(b, &res); err != nil { return err } *m = res return nil }
get_token.go
/* Copyright © 2021 Clastix Labs Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "bytes" "fmt" "github.com/dgrijalva/jwt-go" "github.com/spf13/cobra" "github.com/spf13/viper" "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer/json" clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" "github.com/clastix/kubectl-login/internal/actions" ) var tokenCmd = &cobra.Command{ Use: "get-token", Short: "Return a credential execution required by kubectl with the updated ID token", RunE: func(cmd *cobra.Command, args []string) (err error) { var idToken string if idToken = viper.GetString(TokenID); len(idToken) == 0 { return fmt.Errorf("the ID Token is not yet configured, please issue the login process first") } logger.Info("Decoding the ID token as JWT") claims := &jwt.MapClaims{} parser := jwt.Parser{SkipClaimsValidation: true} if _, _, err = parser.ParseUnverified(idToken, claims); err != nil { return fmt.Errorf("token ID is a non JWT encoded string (%w)", err) } if err = claims.Valid(); err != nil { logger.Info("proceeding to token refresh") logger.Debug("JWT claim is not valid due to error", zap.Error(err)) var refreshToken string idToken, refreshToken, err = actions.NewRefreshToken(logger, true, viper.GetString(TokenEndpoint), viper.GetString(OIDCClientID), viper.GetString(TokenRefresh)).Handle() if err != nil { return fmt.Errorf("cannot refresh token due to an error (%w)", err) } viper.Set(TokenID, idToken) viper.Set(TokenRefresh, refreshToken) defer func() { if err = viper.WriteConfig(); err != nil { logger.Error("Cannot write configuration file", zap.Error(err)) } }() } ec := &clientauthenticationv1beta1.ExecCredential{ TypeMeta: metav1.TypeMeta{ Kind: "ExecCredential", APIVersion: "client.authentication.k8s.io/v1beta1", }, Status: &clientauthenticationv1beta1.ExecCredentialStatus{ Token: viper.GetString(TokenID), }, } scheme := runtime.NewScheme() encoder := json.NewSerializerWithOptions(json.SimpleMetaFactory{}, scheme, scheme, json.SerializerOptions{}) a := bytes.NewBuffer([]byte{}) if err = encoder.Encode(ec, a); err != nil { return fmt.Errorf("cannot encode kubeconfig to JSON (%w)", err) } fmt.Println(a.String()) return nil }, } func i
) { rootCmd.AddCommand(tokenCmd) }
nit(
head_tracker_test.go
package services_test import ( "context" "errors" "math/big" "sync" "sync/atomic" "testing" "time" "github.com/smartcontractkit/chainlink/core/store/dialects" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/mocks" "github.com/smartcontractkit/chainlink/core/services" strpkg "github.com/smartcontractkit/chainlink/core/store" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/ethereum/go-ethereum" gethCommon "github.com/ethereum/go-ethereum/common" gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/onsi/gomega" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) func firstHead(t *testing.T, store *strpkg.Store) models.Head { h := models.Head{} if err := store.DB.Order("number asc").First(&h).Error; err != nil { t.Fatal(err) } return h } func TestHeadTracker_New(t *testing.T) { t.Parallel() store, cleanup := cltest.NewStore(t) defer cleanup() logger := store.Config.CreateProductionLogger() sub := new(mocks.Subscription) ethClient := new(mocks.Client) store.EthClient = ethClient ethClient.On("ChainID", mock.Anything).Return(store.Config.ChainID(), nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) sub.On("Err").Return(nil) assert.Nil(t, store.IdempotentInsertHead(context.TODO(), *cltest.Head(1))) last := cltest.Head(16) assert.Nil(t, store.IdempotentInsertHead(context.TODO(), *last)) assert.Nil(t, store.IdempotentInsertHead(context.TODO(), *cltest.Head(10))) ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}) assert.Nil(t, ht.Start()) assert.Equal(t, last.Number, ht.HighestSeenHead().Number) } func TestHeadTracker_Save_InsertsAndTrimsTable(t *testing.T) { t.Parallel() store, cleanup := cltest.NewStore(t) store.Config.Set("ETH_HEAD_TRACKER_HISTORY_DEPTH", 100) defer cleanup() logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ethClient.On("ChainID", mock.Anything).Return(store.Config.ChainID(), nil) for idx := 0; idx < 200; idx++ { assert.Nil(t, store.IdempotentInsertHead(context.TODO(), *cltest.Head(idx))) } ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}) h := cltest.Head(200) require.NoError(t, ht.Save(context.TODO(), *h)) assert.Equal(t, big.NewInt(200), ht.HighestSeenHead().ToInt()) firstHead := firstHead(t, store) assert.Equal(t, big.NewInt(101), firstHead.ToInt()) lastHead, err := store.LastHead(context.TODO()) require.NoError(t, err) assert.Equal(t, int64(200), lastHead.Number) } func
(t *testing.T) { t.Parallel() start := cltest.Head(5) tests := []struct { name string initial *models.Head toSave *models.Head want *big.Int }{ {"greater", start, cltest.Head(6), big.NewInt(6)}, {"less than", start, cltest.Head(1), big.NewInt(5)}, {"zero", start, cltest.Head(0), big.NewInt(5)}, {"nil", start, nil, big.NewInt(5)}, {"nil no initial", nil, nil, nil}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) sub := new(mocks.Subscription) store.EthClient = ethClient ethClient.On("ChainID", mock.Anything).Return(store.Config.ChainID(), nil) sub.On("Err").Return(nil) sub.On("Unsubscribe").Return(nil) chStarted := make(chan struct{}) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). Run(func(mock.Arguments) { close(chStarted) }). Return(sub, nil) fnCall := ethClient.On("HeaderByNumber", mock.Anything, mock.Anything) fnCall.RunFn = func(args mock.Arguments) { num := args.Get(1).(*big.Int) fnCall.ReturnArguments = mock.Arguments{cltest.Head(num.Int64()), nil} } if test.initial != nil { assert.Nil(t, store.IdempotentInsertHead(context.TODO(), *test.initial)) } ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}) ht.Start() defer ht.Stop() if test.toSave != nil { err := ht.Save(context.TODO(), *test.toSave) assert.NoError(t, err) } assert.Equal(t, test.want, ht.HighestSeenHead().ToInt()) }) } } func TestHeadTracker_Start_NewHeads(t *testing.T) { t.Parallel() store, cleanup := cltest.NewStore(t) defer cleanup() logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ethClient.On("ChainID", mock.Anything).Return(store.Config.ChainID(), nil) sub := new(mocks.Subscription) sub.On("Err").Return(nil) sub.On("Unsubscribe").Return(nil) chStarted := make(chan struct{}) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). Run(func(mock.Arguments) { close(chStarted) }). Return(sub, nil) ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}) assert.NoError(t, ht.Start()) <-chStarted ht.Stop() <-ht.ExportedDone() ethClient.AssertExpectations(t) } func TestHeadTracker_CallsHeadTrackableCallbacks(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) store, cleanup := cltest.NewStore(t) defer cleanup() logger := store.Config.CreateProductionLogger() sub := new(mocks.Subscription) ethClient := new(mocks.Client) store.EthClient = ethClient chchHeaders := make(chan chan<- *models.Head, 1) ethClient.On("ChainID", mock.Anything).Return(store.Config.ChainID(), nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { chchHeaders <- args.Get(1).(chan<- *models.Head) }). Return(sub, nil) ethClient.On("HeaderByNumber", mock.Anything, mock.Anything).Return(cltest.Head(1), nil) sub.On("Unsubscribe").Return() sub.On("Err").Return(nil) checker := &cltest.MockHeadTrackable{} ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{checker}, cltest.NeverSleeper{}) assert.Nil(t, ht.Start()) g.Eventually(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(1))) assert.Equal(t, int32(0), checker.DisconnectedCount()) assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) headers := <-chchHeaders headers <- &models.Head{Number: 1} g.Eventually(func() int32 { return checker.OnNewLongestChainCount() }).Should(gomega.Equal(int32(1))) assert.Equal(t, int32(1), checker.ConnectedCount()) assert.Equal(t, int32(0), checker.DisconnectedCount()) require.NoError(t, ht.Stop()) assert.Equal(t, int32(1), checker.DisconnectedCount()) assert.Equal(t, int32(1), checker.ConnectedCount()) assert.Equal(t, int32(1), checker.OnNewLongestChainCount()) } func TestHeadTracker_ReconnectOnError(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) store, cleanup := cltest.NewStore(t) defer cleanup() logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) sub := new(mocks.Subscription) ethClient.On("ChainID", mock.Anything).Maybe().Return(store.Config.ChainID(), nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(nil, errors.New("cannot reconnect")) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) chErr := make(chan error) sub.On("Unsubscribe").Return() sub.On("Err").Return((<-chan error)(chErr)) store.EthClient = ethClient checker := &cltest.MockHeadTrackable{} ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{checker}, cltest.NeverSleeper{}) // connect assert.Nil(t, ht.Start()) g.Eventually(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(1))) assert.Equal(t, int32(0), checker.DisconnectedCount()) assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) // trigger reconnect loop chErr <- errors.New("Test error to force reconnect") g.Eventually(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(2))) g.Consistently(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(2))) assert.Equal(t, int32(1), checker.DisconnectedCount()) assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) // stop assert.NoError(t, ht.Stop()) } func TestHeadTracker_ResubscribeOnSubscriptionError(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) store, cleanup := cltest.NewStore(t) defer cleanup() logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) sub := new(mocks.Subscription) store.EthClient = ethClient chchHeaders := make(chan chan<- *models.Head, 1) ethClient.On("ChainID", mock.Anything).Maybe().Return(store.Config.ChainID(), nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { chchHeaders <- args.Get(1).(chan<- *models.Head) }). Return(sub, nil) sub.On("Unsubscribe").Return() sub.On("Err").Return(nil) checker := &cltest.MockHeadTrackable{} ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{checker}, cltest.NeverSleeper{}) // connect assert.Nil(t, ht.Start()) g.Eventually(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(1))) assert.Equal(t, int32(0), checker.DisconnectedCount()) assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) headers := <-chchHeaders // trigger reconnect loop close(headers) g.Eventually(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(2))) g.Consistently(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(2))) assert.Equal(t, int32(1), checker.DisconnectedCount()) assert.Equal(t, int32(0), checker.OnNewLongestChainCount()) // stop assert.NoError(t, ht.Stop()) } func TestHeadTracker_StartConnectsFromLastSavedHeader(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) // Need separate db because ht.Stop() will cancel the ctx, causing a db connection // close and go-txdb rollback. config, _, cleanupDB := cltest.BootstrapThrowawayORM(t, "last_saved_header", true) defer cleanupDB() config.Config.Dialect = dialects.Postgres store, cleanup := cltest.NewStoreWithConfig(t, config) defer cleanup() logger := store.Config.CreateProductionLogger() sub := new(mocks.Subscription) ethClient := new(mocks.Client) store.EthClient = ethClient chchHeaders := make(chan chan<- *models.Head, 1) ethClient.On("ChainID", mock.Anything).Return(store.Config.ChainID(), nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { chchHeaders <- args.Get(1).(chan<- *models.Head) }). Return(sub, nil) latestHeadByNumber := make(map[int64]*models.Head) fnCall := ethClient.On("HeaderByNumber", mock.Anything, mock.Anything) fnCall.RunFn = func(args mock.Arguments) { num := args.Get(1).(*big.Int) head, exists := latestHeadByNumber[num.Int64()] if !exists { head = cltest.Head(num.Int64()) latestHeadByNumber[num.Int64()] = head } fnCall.ReturnArguments = mock.Arguments{head, nil} } sub.On("Unsubscribe").Return() sub.On("Err").Return(nil) lastSavedBN := big.NewInt(1) currentBN := big.NewInt(2) var connectedValue atomic.Value checker := &cltest.MockHeadTrackable{ConnectedCallback: func(bn *models.Head) { connectedValue.Store(bn.ToInt()) }} ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{checker}, cltest.NeverSleeper{}) require.NoError(t, ht.Save(context.TODO(), models.NewHead(lastSavedBN, cltest.NewHash(), cltest.NewHash(), 0))) assert.Nil(t, ht.Start()) headers := <-chchHeaders headers <- &models.Head{Number: currentBN.Int64()} g.Eventually(func() int32 { return checker.ConnectedCount() }).Should(gomega.Equal(int32(1))) connectedBN := connectedValue.Load().(*big.Int) assert.Equal(t, lastSavedBN, connectedBN) g.Eventually(func() int32 { return checker.OnNewLongestChainCount() }).Should(gomega.Equal(int32(1))) assert.NoError(t, ht.Stop()) h, err := store.LastHead(context.TODO()) require.NoError(t, err) require.NotNil(t, h) assert.Equal(t, h.Number, currentBN.Int64()) } func TestHeadTracker_SwitchesToLongestChain(t *testing.T) { t.Parallel() // Need separate db because ht.Stop() will cancel the ctx, causing a db connection // close and go-txdb rollback. config, _, cleanupDB := cltest.BootstrapThrowawayORM(t, "switches_longest_chain", true) t.Cleanup(cleanupDB) config.Config.Dialect = dialects.Postgres config.Set("ETH_FINALITY_DEPTH", "50") store, cleanup := cltest.NewStoreWithConfig(t, config) t.Cleanup(cleanup) // Need to set the buffer to something large since we inject a lot of heads at once and otherwise they will be dropped store.Config.Set("ETH_HEAD_TRACKER_MAX_BUFFER_SIZE", 42) store.Config.Set("ETH_HEAD_TRACKER_SAMPLING_INTERVAL", "100ms") sub := new(mocks.Subscription) ethClient := new(mocks.Client) store.EthClient = ethClient logger := store.Config.CreateProductionLogger() checker := new(mocks.HeadTrackable) ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{checker}, cltest.NeverSleeper{}) chchHeaders := make(chan chan<- *models.Head, 1) ethClient.On("ChainID", mock.Anything).Return(store.Config.ChainID(), nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything). Run(func(args mock.Arguments) { chchHeaders <- args.Get(1).(chan<- *models.Head) }). Return(sub, nil) sub.On("Unsubscribe").Return() sub.On("Err").Return(nil) checker.On("Connect", mock.MatchedBy(func(h *models.Head) bool { return h == nil })).Return(nil).Once() checker.On("Disconnect").Return(nil).Once() assert.Nil(t, ht.Start()) lastHead := make(chan struct{}) blockHeaders := []*models.Head{} // First block comes in blockHeaders = append(blockHeaders, &models.Head{Number: 1, Hash: cltest.NewHash(), ParentHash: cltest.NewHash(), Timestamp: time.Unix(1, 0)}) // Blocks 2 and 3 are out of order head2 := &models.Head{Number: 2, Hash: cltest.NewHash(), ParentHash: blockHeaders[0].Hash, Timestamp: time.Unix(2, 0)} head3 := &models.Head{Number: 3, Hash: cltest.NewHash(), ParentHash: head2.Hash, Timestamp: time.Unix(3, 0)} blockHeaders = append(blockHeaders, head3) blockHeaders = append(blockHeaders, head2) // Block 4 comes in blockHeaders = append(blockHeaders, &models.Head{Number: 4, Hash: cltest.NewHash(), ParentHash: blockHeaders[1].Hash, Timestamp: time.Unix(4, 0)}) // Another block at level 4 comes in, that will be uncled blockHeaders = append(blockHeaders, &models.Head{Number: 4, Hash: cltest.NewHash(), ParentHash: blockHeaders[1].Hash, Timestamp: time.Unix(5, 0)}) // Reorg happened forking from block 2 blockHeaders = append(blockHeaders, &models.Head{Number: 2, Hash: cltest.NewHash(), ParentHash: blockHeaders[0].Hash, Timestamp: time.Unix(6, 0)}) blockHeaders = append(blockHeaders, &models.Head{Number: 3, Hash: cltest.NewHash(), ParentHash: blockHeaders[5].Hash, Timestamp: time.Unix(7, 0)}) blockHeaders = append(blockHeaders, &models.Head{Number: 4, Hash: cltest.NewHash(), ParentHash: blockHeaders[6].Hash, Timestamp: time.Unix(8, 0)}) // Now the new chain is longer blockHeaders = append(blockHeaders, &models.Head{Number: 5, Hash: cltest.NewHash(), ParentHash: blockHeaders[7].Hash, Timestamp: time.Unix(9, 0)}) checker.On("OnNewLongestChain", mock.Anything, mock.MatchedBy(func(h models.Head) bool { return h.Number == 1 && h.Hash == blockHeaders[0].Hash })).Return().Once() checker.On("OnNewLongestChain", mock.Anything, mock.MatchedBy(func(h models.Head) bool { return h.Number == 3 && h.Hash == blockHeaders[1].Hash })).Return().Once() checker.On("OnNewLongestChain", mock.Anything, mock.MatchedBy(func(h models.Head) bool { if h.Number == 4 && h.Hash == blockHeaders[3].Hash { // Check that the block came with its parents require.NotNil(t, h.Parent) require.Equal(t, h.Parent.Hash, blockHeaders[1].Hash) require.NotNil(t, h.Parent.Parent.Hash) require.Equal(t, h.Parent.Parent.Hash, blockHeaders[2].Hash) require.NotNil(t, h.Parent.Parent.Parent) require.NotNil(t, h.Parent.Parent.Parent.Hash, blockHeaders[0].Hash) return true } return false })).Return().Once() checker.On("OnNewLongestChain", mock.Anything, mock.MatchedBy(func(h models.Head) bool { if h.Number == 5 && h.Hash == blockHeaders[8].Hash { // This is the new longest chain, check that it came with its parents require.NotNil(t, h.Parent) require.Equal(t, h.Parent.Hash, blockHeaders[7].Hash) require.NotNil(t, h.Parent.Parent.Hash) require.Equal(t, h.Parent.Parent.Hash, blockHeaders[6].Hash) require.NotNil(t, h.Parent.Parent.Parent) require.NotNil(t, h.Parent.Parent.Parent.Hash, blockHeaders[5].Hash) require.NotNil(t, h.Parent.Parent.Parent.Parent) require.NotNil(t, h.Parent.Parent.Parent.Parent.Hash, blockHeaders[0].Hash) return true } return false })).Return().Once().Run(func(_ mock.Arguments) { close(lastHead) }) headers := <-chchHeaders // This grotesque construction is the only way to do dynamic return values using // the mock package. We need dynamic returns because we're simulating reorgs. latestHeadByNumber := make(map[int64]*models.Head) latestHeadByNumberMu := new(sync.Mutex) fnCall := ethClient.On("HeaderByNumber", mock.Anything, mock.Anything) fnCall.RunFn = func(args mock.Arguments) { latestHeadByNumberMu.Lock() defer latestHeadByNumberMu.Unlock() num := args.Get(1).(*big.Int) head, exists := latestHeadByNumber[num.Int64()] if !exists { head = cltest.Head(num.Int64()) latestHeadByNumber[num.Int64()] = head } fnCall.ReturnArguments = mock.Arguments{head, nil} } for _, h := range blockHeaders { // waiting longer than the head sampling frequency time.Sleep(220 * time.Millisecond) latestHeadByNumberMu.Lock() latestHeadByNumber[h.Number] = h latestHeadByNumberMu.Unlock() headers <- h } gomega.NewGomegaWithT(t).Eventually(lastHead).Should(gomega.BeClosed()) require.NoError(t, ht.Stop()) assert.Equal(t, int64(5), ht.HighestSeenHead().Number) for _, h := range blockHeaders { c, err := store.Chain(context.TODO(), h.Hash, 1) require.NoError(t, err) require.NotNil(t, c) assert.Equal(t, c.ParentHash, h.ParentHash) assert.Equal(t, c.Timestamp.Unix(), h.Timestamp.UTC().Unix()) assert.Equal(t, c.Number, h.Number) } checker.AssertExpectations(t) } func TestHeadTracker_Backfill(t *testing.T) { t.Parallel() // Heads are arranged as follows: // headN indicates an unpersisted ethereum header // hN indicates a persisted head record // // (1)->(H0) // // (14Orphaned)-+ // +->(13)->(12)->(11)->(H10)->(9)->(H8) // (15)->(14)---------+ now := uint64(time.Now().UTC().Unix()) gethHead0 := &gethTypes.Header{ Number: big.NewInt(0), ParentHash: gethCommon.BigToHash(big.NewInt(0)), Time: now, } head0 := models.NewHead(gethHead0.Number, cltest.NewHash(), gethHead0.ParentHash, gethHead0.Time) h1 := *cltest.Head(1) h1.ParentHash = head0.Hash gethHead8 := &gethTypes.Header{ Number: big.NewInt(8), ParentHash: cltest.NewHash(), Time: now, } head8 := models.NewHead(gethHead8.Number, cltest.NewHash(), gethHead8.ParentHash, gethHead8.Time) h9 := *cltest.Head(9) h9.ParentHash = head8.Hash gethHead10 := &gethTypes.Header{ Number: big.NewInt(10), ParentHash: h9.Hash, Time: now, } head10 := models.NewHead(gethHead10.Number, cltest.NewHash(), gethHead10.ParentHash, gethHead10.Time) h11 := *cltest.Head(11) h11.ParentHash = head10.Hash h12 := *cltest.Head(12) h12.ParentHash = h11.Hash h13 := *cltest.Head(13) h13.ParentHash = h12.Hash h14Orphaned := *cltest.Head(14) h14Orphaned.ParentHash = h13.Hash h14 := *cltest.Head(14) h14.ParentHash = h13.Hash h15 := *cltest.Head(15) h15.ParentHash = h14.Hash heads := []models.Head{ h9, h11, h12, h13, h14Orphaned, h14, h15, } ctx := context.Background() t.Run("does nothing if all the heads are in database", func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() for _, h := range heads { require.NoError(t, store.IdempotentInsertHead(context.TODO(), h)) } logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}, cltest.NeverSleeper{}) err := ht.Backfill(ctx, h12, 2) require.NoError(t, err) ethClient.AssertExpectations(t) }) t.Run("fetches a missing head", func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() for _, h := range heads { require.NoError(t, store.IdempotentInsertHead(context.TODO(), h)) } logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(10)). Return(&head10, nil) ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}, cltest.NeverSleeper{}) var depth uint = 3 err := ht.Backfill(ctx, h12, depth) require.NoError(t, err) h, err := store.Chain(ctx, h12.Hash, depth) require.NoError(t, err) assert.Equal(t, int64(12), h.Number) require.NotNil(t, h.Parent) assert.Equal(t, int64(11), h.Parent.Number) require.NotNil(t, h.Parent) assert.Equal(t, int64(10), h.Parent.Parent.Number) require.Nil(t, h.Parent.Parent.Parent) writtenHead, err := store.HeadByHash(context.TODO(), head10.Hash) require.NoError(t, err) assert.Equal(t, int64(10), writtenHead.Number) ethClient.AssertExpectations(t) }) t.Run("fetches only heads that are missing", func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() for _, h := range heads { require.NoError(t, store.IdempotentInsertHead(context.TODO(), h)) } logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}, cltest.NeverSleeper{}) ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(10)). Return(&head10, nil) ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(8)). Return(&head8, nil) // Needs to be 8 because there are 8 heads in chain (15,14,13,12,11,10,9,8) var depth uint = 8 err := ht.Backfill(ctx, h15, depth) require.NoError(t, err) h, err := store.Chain(ctx, h15.Hash, depth) require.NoError(t, err) require.Equal(t, uint32(8), h.ChainLength()) earliestInChain := h.EarliestInChain() assert.Equal(t, head8.Number, earliestInChain.Number) assert.Equal(t, head8.Hash, earliestInChain.Hash) ethClient.AssertExpectations(t) }) t.Run("does not backfill if chain length is already greater than or equal to depth", func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() for _, h := range heads { require.NoError(t, store.IdempotentInsertHead(context.TODO(), h)) } logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}, cltest.NeverSleeper{}) err := ht.Backfill(ctx, h15, 3) require.NoError(t, err) err = ht.Backfill(ctx, h15, 5) require.NoError(t, err) ethClient.AssertExpectations(t) }) t.Run("only backfills to height 0 if chain length would otherwise cause it to try and fetch a negative head", func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(0)). Return(&head0, nil) ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}, cltest.NeverSleeper{}) require.NoError(t, store.IdempotentInsertHead(context.TODO(), h1)) err := ht.Backfill(ctx, h1, 400) require.NoError(t, err) h, err := store.Chain(ctx, h1.Hash, 400) require.NoError(t, err) require.Equal(t, uint32(2), h.ChainLength()) require.Equal(t, int64(0), h.EarliestInChain().Number) ethClient.AssertExpectations(t) }) t.Run("abandons backfill and returns error if the eth node returns not found", func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() for _, h := range heads { require.NoError(t, store.IdempotentInsertHead(context.TODO(), h)) } logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(10)). Return(&head10, nil). Once() ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(8)). Return(nil, ethereum.NotFound). Once() ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}, cltest.NeverSleeper{}) err := ht.Backfill(ctx, h12, 400) require.Error(t, err) require.EqualError(t, err, "fetchAndSaveHead failed: not found") h, err := store.Chain(ctx, h12.Hash, 400) require.NoError(t, err) // Should contain 12, 11, 10, 9 assert.Equal(t, 4, int(h.ChainLength())) assert.Equal(t, int64(9), h.EarliestInChain().Number) ethClient.AssertExpectations(t) }) t.Run("abandons backfill and returns error if the context time budget is exceeded", func(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() for _, h := range heads { require.NoError(t, store.IdempotentInsertHead(context.TODO(), h)) } logger := store.Config.CreateProductionLogger() ethClient := new(mocks.Client) store.EthClient = ethClient ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(10)). Return(&head10, nil) ethClient.On("HeaderByNumber", mock.Anything, big.NewInt(8)). Return(nil, context.DeadlineExceeded) ht := services.NewHeadTracker(logger, store, []strpkg.HeadTrackable{}, cltest.NeverSleeper{}) err := ht.Backfill(ctx, h12, 400) require.Error(t, err) require.EqualError(t, err, "fetchAndSaveHead failed: context deadline exceeded") h, err := store.Chain(ctx, h12.Hash, 400) require.NoError(t, err) // Should contain 12, 11, 10, 9 assert.Equal(t, 4, int(h.ChainLength())) assert.Equal(t, int64(9), h.EarliestInChain().Number) ethClient.AssertExpectations(t) }) }
TestHeadTracker_Get
escaped.rs
use crate::NewState; use bracket_lib::prelude::*; use legion::World; pub fn game_over_left(ctx: &mut BTerm, ecs: &World) -> NewState { let mut batch = DrawBatch::new(); // Clear the screen for i in 0..5 { batch.target(i); batch.cls(); } batch.target(crate::LAYER_MAP); let backdrop = XpFile::from_resource("resources/takeoff.xp").unwrap(); let sprite = MultiTileSprite::from_xp(&backdrop); sprite.add_to_batch(&mut batch, Point::new(0, 0)); batch.target(crate::LAYER_TEXT); batch.print_color( Point::new(70, 5), "SecBot has Escaped!", ColorPair::new(GOLD, BLACK), ); batch.print_color( Point::new(50, 25), "Press ENTER or ESCAPE to try again.", ColorPair::new(CYAN, BLACK), );
let stats = crate::stats::get_stats(); let bw = ColorPair::new(WHITE, BLACK); let status = crate::render::gui::PlayerStatus::query(ecs, 0); batch.print_color( Point::new(50, 7), format!("SecBot survived for {} turns.", stats.turns_elapsed), bw, ); batch.print_color( Point::new(50, 8), format!( "A total of {} things were killed/destroyed.", stats.total_dead ), bw, ); batch.print_color( Point::new(50, 9), format!("{} props were smashed.", stats.total_props_smashed), bw, ); batch.print_color( Point::new(50, 10), format!("{} monsters died.", stats.total_dead), bw, ); batch.print_color( Point::new(50, 11), format!("Caused ${} in property damage.", status.property_damage), bw, ); let (color, phrase) = if status.human_resources < 10 { (RED, "About to kill you") } else if status.human_resources < 20 { (ORANGE, "Cranky") } else if status.human_resources < 30 { (ORANGE, "Quite Concerned") } else if status.human_resources < 40 { (YELLOW, "Nervous") } else if status.human_resources < 60 { (GRAY, "Normal") } else if status.human_resources < 70 { (GREEN, "Content") } else if status.human_resources < 90 { (GREEN, "You're doing great!") } else { (GREEN, "Ecstatic") }; batch.print_color( Point::new(50, 12), format!("Human Resources were: {}", phrase), ColorPair::new(color, BLACK), ); batch.print_color( Point::new(50, 13), format!( "There were {} colonists in the game:", status.colony.total_colonists ), bw, ); batch.print_color( Point::new(50, 14), format!(" ... of whom {} were rescued,", status.colony.rescued), bw, ); batch.print_color( Point::new(50, 15), format!( " ... {} died during the rescue attempt,", status.colony.died_in_rescue ), bw, ); batch.print_color( Point::new(50, 16), format!( " ... {} were already dead when you got there,", status.colony.located_dead ), bw, ); batch.submit(1_000_000).expect("Unable to submit batch"); if let Some(key) = ctx.key { if key == VirtualKeyCode::Return || key == VirtualKeyCode::Escape { return NewState::Restart; } } NewState::NoChange }
state_test.go
package state import ( "context" "testing" "github.com/filecoin-project/go-address" "github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/ipfs/go-cid" bstore "github.com/ipfs/go-ipfs-blockstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/sbwtw/go-filecoin/internal/pkg/cborutil" "github.com/sbwtw/go-filecoin/internal/pkg/constants" e "github.com/sbwtw/go-filecoin/internal/pkg/enccid" "github.com/sbwtw/go-filecoin/internal/pkg/repo" tf "github.com/sbwtw/go-filecoin/internal/pkg/testhelpers/testflags" "github.com/sbwtw/go-filecoin/internal/pkg/vm/actor" vmaddr "github.com/sbwtw/go-filecoin/internal/pkg/vm/address" ) func TestStatePutGet(t *testing.T) { tf.UnitTest(t) ctx := context.Background() bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) cst := cborutil.NewIpldStore(bs) tree := NewState(cst) act1 := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(0), cid.Undef) act1.IncrementSeqNum() act2 := actor.NewActor(builtin.AccountActorCodeID, abi.NewTokenAmount(0), cid.Undef) act2.IncrementSeqNum() act2.IncrementSeqNum() addrGetter := vmaddr.NewForTestGetter() addr1 := addrGetter() addr2 := addrGetter() assert.NoError(t, tree.SetActor(ctx, addr1, act1)) assert.NoError(t, tree.SetActor(ctx, addr2, act2)) act1out, found, err := tree.GetActor(ctx, addr1) assert.NoError(t, err) assert.True(t, found) assert.Equal(t, act1, act1out) act2out, found, err := tree.GetActor(ctx, addr2) assert.NoError(t, err) assert.True(t, found) assert.Equal(t, act2, act2out) // now test it persists across recreation of tree tcid, err := tree.Commit(ctx) assert.NoError(t, err) tree2, err := LoadState(ctx, cst, tcid) assert.NoError(t, err) act1out2, found, err := tree2.GetActor(ctx, addr1) assert.NoError(t, err) assert.True(t, found) assert.Equal(t, act1, act1out2) act2out2, found, err := tree2.GetActor(ctx, addr2) assert.NoError(t, err) assert.True(t, found) assert.Equal(t, act2, act2out2) } func
(t *testing.T) { tf.UnitTest(t) ctx := context.Background() bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) cst := cborutil.NewIpldStore(bs) tree := NewState(cst) a, found, err := tree.GetActor(ctx, vmaddr.NewForTestGetter()()) assert.Nil(t, a) assert.False(t, found) assert.NoError(t, err) c, err := constants.DefaultCidBuilder.Sum([]byte("cats")) assert.NoError(t, err) tr2, err := LoadState(ctx, cst, c) assert.Error(t, err) assert.Nil(t, tr2) } func TestGetAllActors(t *testing.T) { tf.UnitTest(t) ctx := context.Background() bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) cst := cborutil.NewIpldStore(bs) tree := NewState(cst) addr := vmaddr.NewForTestGetter()() actor := actor.Actor{Code: e.NewCid(builtin.AccountActorCodeID), CallSeqNum: 1234, Balance: abi.NewTokenAmount(123)} err := tree.SetActor(ctx, addr, &actor) assert.NoError(t, err) _, err = tree.Commit(ctx) require.NoError(t, err) results := tree.GetAllActors(ctx) for result := range results { assert.Equal(t, addr, result.Key) assert.Equal(t, actor.Code, result.Actor.Code) assert.Equal(t, actor.CallSeqNum, result.Actor.CallSeqNum) assert.Equal(t, actor.Balance, result.Actor.Balance) } } func TestStateTreeConsistency(t *testing.T) { tf.UnitTest(t) ctx := context.Background() bs := bstore.NewBlockstore(repo.NewInMemoryRepo().Datastore()) cst := cborutil.NewIpldStore(bs) tree := NewState(cst) var addrs []address.Address for i := 100; i < 150; i++ { a, err := address.NewIDAddress(uint64(i)) if err != nil { t.Fatal(err) } addrs = append(addrs, a) } randomCid, err := cid.Decode("bafy2bzacecu7n7wbtogznrtuuvf73dsz7wasgyneqasksdblxupnyovmtwxxu") if err != nil { t.Fatal(err) } for i, a := range addrs { if err := tree.SetActor(ctx, a, &actor.Actor{ Code: e.NewCid(randomCid), Head: e.NewCid(randomCid), Balance: abi.NewTokenAmount(int64(10000 + i)), CallSeqNum: uint64(1000 - i), }); err != nil { t.Fatal(err) } } root, err := tree.Commit(ctx) if err != nil { t.Fatal(err) } if root.String() != "bafy2bzaceadyjnrv3sbjvowfl3jr4pdn5p2bf3exjjie2f3shg4oy5sub7h34" { t.Fatalf("State Tree Mismatch. Expected: bafy2bzaceadyjnrv3sbjvowfl3jr4pdn5p2bf3exjjie2f3shg4oy5sub7h34 Actual: %s", root.String()) } }
TestStateErrors
responses.go
package core_types import ( "encoding/json" "time" abci "github.com/romakingwolf/patriot/core/abci/types" "github.com/romakingwolf/patriot/core/crypto" cmn "github.com/romakingwolf/patriot/core/libs/common" "github.com/romakingwolf/patriot/core/p2p" "github.com/romakingwolf/patriot/core/state" "github.com/romakingwolf/patriot/core/types" ) // List of blocks type ResultBlockchainInfo struct { LastHeight int64 `json:"last_height"` BlockMetas []*types.BlockMeta `json:"block_metas"` } // Genesis file type ResultGenesis struct { Genesis *types.GenesisDoc `json:"genesis"` } // Single block (with meta) type ResultBlock struct { BlockMeta *types.BlockMeta `json:"block_meta"` Block *types.Block `json:"block"` } // Commit and Header type ResultCommit struct { types.SignedHeader `json:"signed_header"` CanonicalCommit bool `json:"canonical"` } // ABCI results from a block type ResultBlockResults struct { Height int64 `json:"height"` Results *state.ABCIResponses `json:"results"` } // NewResultCommit is a helper to initialize the ResultCommit with // the embedded struct func NewResultCommit(header *types.Header, commit *types.Commit, canonical bool) *ResultCommit { return &ResultCommit{ SignedHeader: types.SignedHeader{ Header: header, Commit: commit, }, CanonicalCommit: canonical, } } // Info about the node's syncing state type SyncInfo struct { LatestBlockHash cmn.HexBytes `json:"latest_block_hash"` LatestAppHash cmn.HexBytes `json:"latest_app_hash"` LatestBlockHeight int64 `json:"latest_block_height"` LatestBlockTime time.Time `json:"latest_block_time"` CatchingUp bool `json:"catching_up"` } // Info about the node's validator type ValidatorInfo struct { Address cmn.HexBytes `json:"address"` PubKey crypto.PubKey `json:"pub_key"` VotingPower int64 `json:"voting_power"` } // Node Status type ResultStatus struct { NodeInfo p2p.DefaultNodeInfo `json:"node_info"` SyncInfo SyncInfo `json:"sync_info"` ValidatorInfo ValidatorInfo `json:"validator_info"` } // Is TxIndexing enabled func (s *ResultStatus) TxIndexEnabled() bool { if s == nil
return s.NodeInfo.Other.TxIndex == "on" } // Info about peer connections type ResultNetInfo struct { Listening bool `json:"listening"` Listeners []string `json:"listeners"` NPeers int `json:"n_peers"` Peers []Peer `json:"peers"` } // Log from dialing seeds type ResultDialSeeds struct { Log string `json:"log"` } // Log from dialing peers type ResultDialPeers struct { Log string `json:"log"` } // A peer type Peer struct { NodeInfo p2p.DefaultNodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` RemoteIP string `json:"remote_ip"` } // Validators for a height type ResultValidators struct { BlockHeight int64 `json:"block_height"` Validators []*types.Validator `json:"validators"` } // ConsensusParams for given height type ResultConsensusParams struct { BlockHeight int64 `json:"block_height"` ConsensusParams types.ConsensusParams `json:"consensus_params"` } // Info about the consensus state. // UNSTABLE type ResultDumpConsensusState struct { RoundState json.RawMessage `json:"round_state"` Peers []PeerStateInfo `json:"peers"` } // UNSTABLE type PeerStateInfo struct { NodeAddress string `json:"node_address"` PeerState json.RawMessage `json:"peer_state"` } // UNSTABLE type ResultConsensusState struct { RoundState json.RawMessage `json:"round_state"` } // CheckTx result type ResultBroadcastTx struct { Code uint32 `json:"code"` Data cmn.HexBytes `json:"data"` Log string `json:"log"` Hash cmn.HexBytes `json:"hash"` } // CheckTx and DeliverTx results type ResultBroadcastTxCommit struct { CheckTx abci.ResponseCheckTx `json:"check_tx"` DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"` Hash cmn.HexBytes `json:"hash"` Height int64 `json:"height"` } // Result of querying for a tx type ResultTx struct { Hash cmn.HexBytes `json:"hash"` Height int64 `json:"height"` Index uint32 `json:"index"` TxResult abci.ResponseDeliverTx `json:"tx_result"` Tx types.Tx `json:"tx"` Proof types.TxProof `json:"proof,omitempty"` } // Result of searching for txs type ResultTxSearch struct { Txs []*ResultTx `json:"txs"` TotalCount int `json:"total_count"` } // List of mempool txs type ResultUnconfirmedTxs struct { Count int `json:"n_txs"` Total int `json:"total"` TotalBytes int64 `json:"total_bytes"` Txs []types.Tx `json:"txs"` } // Info abci msg type ResultABCIInfo struct { Response abci.ResponseInfo `json:"response"` } // Query abci msg type ResultABCIQuery struct { Response abci.ResponseQuery `json:"response"` } // Result of broadcasting evidence type ResultBroadcastEvidence struct { Hash []byte `json:"hash"` } // empty results type ( ResultUnsafeFlushMempool struct{} ResultUnsafeProfile struct{} ResultSubscribe struct{} ResultUnsubscribe struct{} ResultHealth struct{} ) // Event data from a subscription type ResultEvent struct { Query string `json:"query"` Data types.TMEventData `json:"data"` Events map[string][]string `json:"events"` }
{ return false }
angular-rock.js
/* * angular-rock * https://github.com/romeOz/angular-rock * * Version: 0.10.0 * License: MIT */ (function () { 'use strict'; angular.module('rock', [ 'pascalprecht.translate', 'rock.helpers', 'rock.services', 'rock.directives', 'rock.filters', 'rock.notification', 'rock.forms' ] ) .config(configRock) .run(runRock); configRock.$inject = ['$httpProvider', '$translateProvider', '$provide']; /** * Configure rock. * @param $httpProvider * @param $translateProvider * @param $provide */ function configRock($httpProvider, $translateProvider, $provide) { // configure http $httpProvider.defaults.headers.common['Content-Type'] = 'application/json'; $httpProvider.interceptors.push(httpProvider); // configure i18n var lang = i18nProvider($translateProvider); $provide.value('rock', {lang: lang}); } function i18nProvider($translateProvider) { var nav = window.navigator, lang = ( angular.element(document.querySelector('html')).attr('lang') || nav.language || nav.browserLanguage || nav.systemLanguage || nav.userLanguage || 'en' ).split('-')[0]; $translateProvider.translations('en', { "lang": { "notPage": "page not found", "notContent": "content is empty", "notFound": "not found", "search": "search", "username": "username", "email": "e-mail", "password": "password", "confirmPassword": "confirm password", "token": "token", "captcha": "captcha", "invalidEmail": "Email is invalid.", "invalidTokenActivated": "Wrong token or user is already activated.", "invalidPasswordOrEmail": "Password or email is invalid.", "invalidPasswordOrUsername": "Password or login is invalid.", "existsUsername": "User with this name already exists.", "existsUsernameOrEmail": "User with this name\/e-mail already exists.", "notExistsUser": "User with this email does not exist or is blocked.", "notActivatedUser": "Account is not activated", "failLogin": "Fail authorization.", "failRecovery": "Fail recovery password.", "failSignup": "Fail registration.", "failSendEmail": "Email not sent.", "failActivated": "Fail activated.", "failLogout": "Fail logout.", "successLogin": "You successfully login.", "successLogout": "You successfully logout.", "successSignup": "Thanks for signing up!<br\/>On e-mail <b>{{email}}<\/b>, sent an email with an activation code.", "successRecovery": "Your new password has been sent to your e-mail <b>{{email}}<\/b>.", "successActivate": "Your account is activated.", "signup": "sign up", "login": "login", "signin": "sign in", "activation": "activation", "close": "close", "activate": "activate", "registration": "registration", "authorization": "login", "resetPassword": "reset password", "recovery": "recovery", "loginLogout": "You're sign in. Authorization is required to <a href=\"{{link}}\" rel=\"nofollow\">logout<\/a> of your profile", "signupLogout": "You're sign in. The registration must be <a href=\"{{link}}\" rel=\"nofollow\">logout<\/a> of your profile", "recoveryLogout": "You're sign in. To recover the password required to <a href=\"{{link}}\" rel=\"nofollow\">logout<\/a> of your profile", "notJs": "Your browser does not support JavaScript. Try to fix this in the browser settings.", "logout": "logout", "characters": "characters", "failHTTPRequest": "HTTP-request error.", "failServer": "Server error.", "failAccess": "Denied access.", "error": "error", "value": "value", "success": "success", "forgotPassword": "forgot password" }, "validate": { "required": "{{name}} must not be empty", "notRequired": "{{name}} must be empty", "min": "{{name}} must be greater than {{minValue}}", "minInclusive": "{{name}} must be greater than or equals {{minValue}}", "notMin": "{{name}} must not be greater than {{minValue}}", "notMinInclusive": "{{name}} must not be greater than or equals {{minValue}}", "max": "{{name}} must be lower than {{maxValue}}", "maxInclusive": "{{name}} must be lower than or equals {{maxValue}}", "notMax": "{{name}} must not be lower than {{maxValue}}", "notMaxInclusive": "{{name}} must not be lower than or equals {{maxValue}}", "email": "{{name}} must be valid", "notEmail": "{{name}} must not be valid", "regex": "{{name}} contains invalid characters", "notRegex": "{{name}} does not contain invalid characters", "captcha": "captcha must be valid", "notCaptcha": "captcha must not be valid", "confirm": "values must be equals", "notConfirm": "values must not be equals", "call": "{{name}} must be valid", "unique": "{{value}} has already been taken", "notUnique": "{{value}} not already been taken.", "csrf": "CSRF-token must be valid", "notCsrf": "CSRF-token must not be valid", "date": "{{name}} must be date", "dateFormat": "{{name}} must be a valid date. Sample format: {{format}}", "notDate": "{{name}} must not be date", "notDateFormat": "{{name}} must not be a valid date in the format {{format}}" } }); $translateProvider.translations('ru', { "lang": { "notPage": "страница не найдена", "notContent": "материал отсутсвует", "notFound": "ничего не найдено", "resetPassword": "сбросить пароль", "recovery": "восстановление пароля", "signup": "зарегистрироваться", "signin": "войти", "activation": "активация", "password": "пароль", "confirmPassword": "подтверждение пароля", "token": "токен", "captcha": "код подтверждения", "login": "логин", "successLogged": "вы успешно авторизированы", "close": "закрыть", "notJs": "Ваш браузер не поддерживает JavaScript. Попробуйте исправить это в настройках браузера.", "email": "e-mail", "username": "логин\/псевдоним", "existsUsername": "Пользователь с таким именем уже существует.", "existsUsernameOrEmail": "Пользователь с таким именем\/e-mail уже существует.", "notExistsUser": "Пользователя с таким email не существует или блокирован.", "invalidEmail": "Указан неверный email.", "invalidPasswordOrEmail": "Указан неверный пароль или email.", "invalidPasswordOrUsername": "Указан неверный пароль или логин.", "notActivatedUser": "Учётная запись не активирована.", "invalidTokenActivated": "Неверный токен или пользователь уже активирован.", "successLogin": "Вы успешно авторизировались.", "successLogout": "Вы успешно разлогинились.", "successSignup": "Спасибо за регистрацию!<br\/>На указанный Вами адрес электронной почты <b>{{email}}<\/b>, отправлено письмо с подтверждением.", "successRecovery": "Новый пароль, был отправлен на Ваш адрес электронной почты <b>{{email}}<\/b>.", "successActivate": "Ваша учётная запись активирована.", "failLogin": "Ошибка при авторизации.", "failRecovery": "Ошибка при восстановлении пароля.", "failSignup": "Ошибка при регистрации.", "failActivated": "Ошибка при активации.", "failLogout": "Ошибка при разлогировании.", "failSendEmail": "email не отправлен.", "loginLogout": "Вы авторизированы. Для повторной авторизации требуется <a href=\"{{link}}\" rel=\"nofollow\">выйти<\/a> из своего профиля.", "signupLogout": "Вы авторизированы. Для регистрации требуется <a href=\"{{link}}\" rel=\"nofollow\">выйти<\/a> из своего профиля.", "recoveryLogout": "Вы авторизированы. Для для восстановлении пароля требуется <a href=\"{{link}}\" rel=\"nofollow\">выйти<\/a> из своего профиля.", "logout": "выход", "characters": "символов", "failHTTPRequest": "Ошибка HTTP-запроса.", "failServer": "Ошибка сервера.", "failAccess": "Отказано в доступе.", "error": "ошибка", "value": "значение", "forgotPassword": "забыли пароль", "success": "успех" }, "validate": { "required": "{{name}} не должно быть пустым", "notRequired": "{{name}} должно быть пустым", "min": "{{name}} должно быть больше {{minValue}}", "minInclusive": "{{name}} должно быть больше или равно {{minValue}}", "notMin": "{{name}} не должно быть больше {{minValue}}", "notMinInclusive": "{{name}} не должно быть больше или равно {{minValue}}", "max": "{{name}} должно быть меньше {{maxValue}}", "maxInclusive": "{{name}} должно быть меньше или равно {{maxValue}}", "notMax": "{{name}} не должно быть меньше {{maxValue}}", "notMaxInclusive": "{{name}} не дожно быть меньше или равно {{maxValue}}", "email": "{{name}} должен быть верным", "notEmail": "{{name}} не должен быть верным", "regex": "{{name}} содержит неверные символы", "notRegex": "{{name}} не содержит верные символы", "captcha": "каптча должна быть верной", "notCaptcha": "каптча не должна быть верной", "confirm": "значения должны совпадать", "notConfirm": "значения не должны совпадать", "call": "{{name}} должно быть верным", "unique": "{{value}} уже существует", "notUnique": "{{value}} должно существовать", "csrf": "CSRF-токен должен быть верным", "notCsrf": "CSRF-токен не должен быть верным", "date": "{{name}} должно быть датой", "dateFormat": "{{name}} должно соответствовать формату: {{format}}", "notDate": "{{name}} не должно быть датой", "notDateFormat": "{{name}} не должно соответствовать формату: {{format}}" } }); $translateProvider.preferredLanguage(lang); return lang; } httpProvider.$inject = ['$q', '$injector']; function httpProvider($q, $injector) { return { response: function (response) { /** @type {httpUtils} httpUtils */ var httpUtils = $injector.get('httpUtils'); if (!response.config.cache) { httpUtils.csrf(response.data, response.headers); } return response; }, responseError: function (response) { // do something on error /** @type {httpUtils} httpUtils */ var httpUtils = $injector.get('httpUtils'); if (response.config && !response.config.cache) { httpUtils.csrf(response.data, response.headers); } httpUtils.error(response.data, response.status, response.statusText); return $q.reject(response); } }; } runRock.$inject = ['$rootScope', '$http', 'csrfUtils', 'userUtils', 'alias', 'rock', 'htmlUtils']; /** * * @param $rootScope * @param $http * @param {csrfUtils} csrfUtils * @param {userUtils} userUtils * @param {alias} alias * @param rock * @param {htmlUtils} htmlUtils */ function runRock($rootScope, $http, csrfUtils, userUtils, alias, rock, htmlUtils) { runCSRF(csrfUtils); $rootScope.rock = {}; /** @type {string} */ $rootScope.rock.lang = rock.lang; /** @type {csrfUtils} */ $rootScope.rock.csrf = csrfUtils; /** @type {userUtils} */ $rootScope.rock.user = userUtils; /** @type {htmlUtils} */ $rootScope.rock.html = htmlUtils; /** @type {alias} */ $rootScope.rock.alias = alias; $rootScope.$watch(function (scope) { return scope.rock.csrf.getToken(); }, function (value) { if (!value) { return; } $http.defaults.headers.post['X-CSRF-Token'] = value; }); } function runCSRF(csrfUtils) { var csrfParam = angular.element(document.querySelector('meta[name=csrf-param]')).attr('content'), csrfToken = angular.element(document.querySelector('meta[name=csrf-token]')).attr('content'); if (csrfParam && csrfToken) { csrfUtils.addToken(csrfToken); csrfUtils.addParam(csrfParam); } } angular .module('rock.helpers', []) .factory('stringHelper', stringHelper) .factory('collectionHelper', collectionHelper) .factory('alias', alias); /** * @ngdoc service * @name stringHelper */ function stringHelper() { var StringHelper = {}; /** * Upper first char. * @ngdoc method * @name stringHelper#upperFirst * @param {string} value * @returns {string} */ StringHelper.upperFirst = function (value) { return value.charAt(0).toUpperCase() + value.slice(1); }; /** * Find the position of the first occurrence of a substring in a string. * @ngdoc method * @name stringHelper#strpos * @param haystack * @param needle * @param offset * @returns {*|Number} * @link http://kevin.vanzonneveld.net */ StringHelper.strpos = function (haystack, needle, offset) { if (offset === undefined) { offset = 0; } var i = haystack.indexOf(needle, offset); // returns -1 return i >= 0 ? i : false; }; /** * Reverse string * @ngdoc method * @name stringHelper#reverse * @param string * @returns {string} */ StringHelper.reverse = function (string) { return string.split("").reverse().join(""); }; /** * Binary safe string comparison. * * ```js * strncmp('aaa', 'aab', 2); // 0 * strncmp('aaa', 'aab', 3 ); // -1 * ``` * @ngdoc method * @name stringHelper#strncmp * @param {string} str1 * @param {string} str2 * @param {number} lgth * @return {number} */ StringHelper.strncmp = function (str1, str2, lgth) { var s1 = (str1 + '') .substr(0, lgth), s2 = (str2 + '') .substr(0, lgth); return ((s1 == s2) ? 0 : ((s1 > s2) ? 1 : -1)); }; /** * Find the position of the first occurrence of a substring in a string. * @ngdoc method * @name stringHelper#strpos * @param {string} haystack * @param {string} needle * @param {number} offset * @return {number|boolean} */ StringHelper.strpos = function (haystack, needle, offset) { var i = haystack.indexOf(needle, offset); // returns -1 return i >= 0 ? i : false; }; /** * Strip whitespace (or other characters) from the beginning of a string. * @ngdoc method * @name stringHelper#ltrim * @param {string} str * @param {string=} charlist * @return {string} */ StringHelper.ltrim = function (str, charlist) { charlist = !charlist ? ' \\s\u00A0' : (charlist + '') .replace(/([\[\]\(\)\.\?\/\*\{\}\+\$\^\:])/g, '$1'); var re = new RegExp('^[' + charlist + ']+', 'g'); return (str + '') .replace(re, ''); }; /** * Strip whitespace (or other characters) from the end of a string. * @ngdoc method * @name stringHelper#rtrim * @param {string} str * @param {string=} charlist * @return {string} */ StringHelper.rtrim = function (str, charlist) { charlist = !charlist ? ' \\s\u00A0' : (charlist + '') .replace(/([\[\]\(\)\.\?\/\*\{\}\+\$\^\:])/g, '\\$1'); var re = new RegExp('[' + charlist + ']+$', 'g'); return (str + '') .replace(re, ''); }; return StringHelper; } /** * @ngdoc service * @name collectionHelper * @return {*} */ function collectionHelper() { var CollectionHelper = {}; /** * Calculate CSRF-data. * @ngdoc method * @name collectionHelper#flatten * @param {Array} value * @param {Function} callback * @return {Array|Object} */ CollectionHelper.flatten = function (value, callback) { var isArray = angular.isArray(value), result = isArray ? [] : {}; var recurs = function (value, isArray) { angular.forEach(value, function (value, key) { if (angular.isObject(value)) { recurs(value, isArray); return; } if (angular.isFunction(callback)) { value = callback(value); } if (isArray) { result.push(value); } else { result[key] = value; } }); }; recurs(value, isArray); return result; }; return CollectionHelper; } alias.$inject = ['stringHelper', 'notification']; /** * @ngdoc service * @name alias * @returns {*} */ function alias(stringHelper, notification) { var _alias = {}, aliases = {}; /** * @ngdoc method * @name alias#set * @param {string} alias * @param {string} path */ _alias.set = function (alias, path) { if (stringHelper.strncmp(alias, '@', 1)) { alias = '@' + alias; } var delimiter = '/', pos = stringHelper.strpos(alias, delimiter), root = pos === false ? alias : alias.substr(0, pos); if (path !== null) { path = stringHelper.strncmp(path, '@', 1) ? stringHelper.rtrim(path, '\\/') : _alias.get(path); if (aliases[root] === undefined) { if (pos === false) { aliases[root] = path; } else { aliases[root] = {}; aliases[root][alias] = path; } } else if (angular.isString(aliases[root])) { if (pos === false) { aliases[root] = path; } else { aliases[root] = {}; aliases[root][alias] = path; aliases[root][root] = aliases[root]; } } else { aliases[root][alias] = path; //krsort(aliases[root]); } } else if (aliases[root] !== undefined) { if (angular.isArray(aliases[root])) { aliases[root][alias] = undefined; } else if (pos === false) { aliases[root] = undefined; } } }; /** * @ngdoc method * @name alias#get * @param {string} alias * @return {*} */ _alias.get = function (alias) { if (stringHelper.strncmp(alias, '@', 1)) { // not an alias return alias; } var delimiter = '/', pos = stringHelper.strpos(alias, delimiter), root = pos === false ? alias : alias.substr(0, pos); if (aliases[root] !== undefined) { if (angular.isString(aliases[root])) { return pos === false ? aliases[root] : aliases[root] + alias.substr(pos); } else { var result = _.find(aliases[root], function (path, name) { if (stringHelper.strpos(alias + delimiter, name + delimiter) === 0) { return path + alias.substr(name.length); } }); } } if (result === undefined) { notification.debug('Invalid path alias: ' + alias); } return result; }; /** * @ngdoc method * @name alias#remove * @param {string} alias */ _alias.remove = function (alias) { aliases[alias] = undefined; }; return _alias; } angular .module('rock.services', []) .factory('userUtils', userUtils) .provider('formUtils', formUtils) .provider('httpUtils', httpUtils) .factory('csrfUtils', csrfUtils) .factory('modalUtils', modalUtils) .provider('htmlUtils', htmlUtils); userUtils.$inject = ['$rootScope', '$http', 'csrfUtils', 'httpUtils', 'notification']; /** * @ngdoc service * @name userUtils */ function userUtils($rootScope, $http, csrfUtils, httpUtils, notification) { var userUtils = {}; $rootScope._user = undefined; /** * Set list data fo user. * @param {Object} data */ userUtils.set = function (data) { $rootScope._user = httpUtils.removeExtend(data); }; /** * Adds data by key. * @param {string} key * @param {*} value */ userUtils.add = function (key, value) { if (!$rootScope._user) { $rootScope._user = {}; } $rootScope._user[key] = httpUtils.removeExtend(value); }; /** * Returns data by key. * @param {string} key * @return {*} */ userUtils.get = function (key) { if (!$rootScope._user) { return null; } return $rootScope._user[key] !== undefined ? $rootScope._user[key] : null; }; /** * Returns list data. * @return {undefined|*} */ userUtils.getAll = function () { return $rootScope._user; }; /** * Is logged. * @return {boolean|undefined} */ userUtils.isLogged = function () { if ($rootScope._user === undefined) { return undefined; } return !!$rootScope._user; }; /** * Logout user. * @param {string} url */ userUtils.logout = function (url) { $http.get(URI(url).setSearch(csrfUtils.get())) .success(function () { $rootScope._user = null; notification.success('lang.successLogout'); $rootScope.$broadcast('onLogout'); }); }; return userUtils; } /** * @ngdoc provider * @name formUtilsProvider * @returns {*} */ function formUtils() { var defaultMsg = 'Success.'; /** * @ngdoc method * @name formUtilsProvider#defaultMsg * @description * @param {string} msg */ this.defaultMsg = function (msg) { defaultMsg = msg; }; this.$get = ['$http', function ($http) { var formUtils = {}; /** * Reload captcha. * @ngdoc method * @name formUtils#reloadCaptcha * @param {string} url * @return {Object} */ formUtils.reloadCaptcha = function (url) { return $http.get(url); }; return formUtils; }]; } /** * @ngdoc provider * @name httpUtilsProvider * @returns {*} */ function httpUtils() { var extendAttribute = '_extend', defaultMsg = 'lang.failHTTPRequest'; /** * @ngdoc method * @name httpUtilsProvider#extendAttribute * @description * @param {string} attribute */ this.extendAttribute = function (attribute) { extendAttribute = attribute; }; /** * @ngdoc method * @name httpUtilsProvider#defaultMsg * @description * @param {string} msg */ this.defaultMsg = function (msg) { defaultMsg = msg; }; this.$get = ['collectionHelper', 'stringHelper', 'csrfUtils', 'notification', function (collectionHelper, stringHelper, csrfUtils, notification) { var httpUtils = {}; /** * Calculate CSRF-data. * @ngdoc method * @name httpUtils#csrf * @param {Object} data * @param {Function=} headers */ httpUtils.csrf = function (data, headers) { if (angular.isObject(data)) { if (data[extendAttribute] && data[extendAttribute].csrf) { csrfUtils.addToken(data[extendAttribute].csrf.token); csrfUtils.addParam(data[extendAttribute].csrf.param); return; } } if (angular.isFunction(headers)) { csrfUtils.addToken(headers('x-csrf-token')); } }; /** * Prepare messages. * @ngdoc method * @name httpUtils#prepareMessages * @param {Array|object} messages * @param {boolean=true} uniq * @param {string=} defaultMessage * @return {Array} */ httpUtils.normalizeAlerts = function (messages, uniq, defaultMessage) { if (!messages) { messages = [defaultMessage || defaultMsg]; } if (uniq === undefined) { uniq = true; } messages = flatten(httpUtils.removeExtend(messages)); if (uniq === true && angular.isArray(messages)) { messages = _.uniq(messages); } return messages; }; /** * Returns extend attribute. * @ngdoc method * @name httpUtils#getExtend * @param {Object} data * @param {string=} attribute * @return {*} */ httpUtils.getExtend = function (data, attribute) { if (!angular.isObject(data) || !data[extendAttribute]) { return null; } if (attribute) { return data[extendAttribute][attribute] || null; } return data[extendAttribute]; }; /** * Removes extend attribute. * @ngdoc method * @name httpUtils#removeExtend * @param {Object} data * @return {*} */ httpUtils.removeExtend = function (data) { delete(data[extendAttribute]); return data; }; /** * @ngdoc method * @name httpUtils#error * @param {*} data * @param {number} status * @param {string=} statusText */ httpUtils.error = function (data, status, statusText) { if (data && data.error && data.error.message) { notification.debug(data.error.message); } switch (status) { case 400: case 422: break; case 403: notification.error('lang.failAccess', {}, prepareMessage(statusText)); break; case 404: notification.error('lang.notPage', {}, prepareMessage(statusText)); break; case 500: notification.error('lang.failServer', {}, prepareMessage(statusText)); break; } }; /** * * @param {Array} value * @return {Array} */ function flatten(value) { return collectionHelper.flatten(value, function (value) { return prepareMessage(value); }); } /** * * @param {string} message * @return {string} */ function prepareMessage(message) { message = stringHelper.upperFirst(message); if (message.slice(-1) !== '.') { return message + '.'; } return message; } return httpUtils; }]; } /** * @ngdoc service * @name csrfUtils */ function csrfUtils() { var csrfUtils = {}, csrf = {token: undefined, param: undefined}; /** * Adds CSRF-token. * @param {string} token */ csrfUtils.addToken = function (token) { if (angular.isString(token)) { csrf.token = token; } }; /** * Adds CSRF-param. * @param {string} param */ csrfUtils.addParam = function (param) { if (param) { csrf.param = param; } }; /** * Returns `<param>:<token>`. * @return {Object|null} */ csrfUtils.get = function () { if (csrf.token && csrf.param) { var result = {}; result[csrf.param] = csrf.token; return result; } return null; }; /** * Return CSRF-token. * @return {string} */ csrfUtils.getToken = function () { return csrf.token; }; /** * Return CSRF-param * @return {string} */ csrfUtils.getParam = function () { return csrf.param; }; /** * Exists CSRF-token. * @return {boolean} */ csrfUtils.has = function () { return csrf && csrf.token; }; return csrfUtils; } /** * @ngdoc service * @name modalUtils */ modalUtils.$inject = ['$modal']; function modalUtils($modal) { var modalUtils = {}; modalUtils.show = function ($scope, url, ctrl) { $modal.open({ templateUrl: url, controller: ctrl }); }; return modalUtils; } /** * @ngdoc service * @name htmlUtils */ function htmlUtils() { var tpl = '<iframe width="{{width}}" height="{{height}}" frameborder="0" allowfullscreen="allowfullscreen" src="{{src}}"></iframe>', width = 480, height = 360; /** * * @type {{width: Function(width:number), height: Function(height:number)}} */ this.video = { width: function (_width) { width = _width; }, height: function (_height) { height = _height; } }; this.$get = ['$modal', '$interpolate', function ($modal, $interpolate) { var htmlUtils = {}; /** * * @param {string} src * @param {number} width * @param {number} height * @param {string} title * @param {Event} $event */ htmlUtils.playVideo = function (src, width, height, title, $event) { if (!src) { return; } $event.preventDefault(); angular.element($event.target).replaceWith(interpolate(tpl, src, width, height)) }; htmlUtils.playVideoModal = function (src, width, height, title, $event) { if (!src) { return; } $event.preventDefault(); Controller.$inject = ['$scope', '$modalInstance']; function Controller($scope, $modalInstance) { $scope.cancel = function () { $modalInstance.dismiss('cancel'); }; } if (title) { title = '<div class="modal-header">' + '<button data-ng-click="cancel()" class="close" type="button">×</button>' + '<h4 class="modal-title"><span class="glyphicon glyphicon-star"></span> ' + title + '</h4>' + '</div>'; } $modal.open({ template: title + '<div class="modal-body">' + interpolate(tpl, src, width, height) + '</div>', controller: Controller }); }; function interpolate(tpl, src, width, height) { return $interpolate(tpl)({ width: width, height: height, src: src }); } return htmlUtils; }]; } angular .module('rock.filters', []) .filter('unsafe', unsafe) .filter('byKeys', byKeys); /** * @ngdoc filter * @name unsafe */ unsafe.$inject = ['$sce']; function unsafe($sce) { return function (value) { if (typeof value === 'undefined' || value === null) { return ''; } return $sce.trustAsHtml(value); }; } /** * @ngdoc filter * @name byKeys */ function byKeys() { return function (inputs, attrubutes) { if (inputs && angular.isObject(inputs)) { inputs = _.filter(inputs, function (value, attribute) { return _.contains(attrubutes, attribute); }); if (_.isEmpty(inputs)) { return null; } return inputs; } }; } angular .module('rock.directives', []) .directive('bindCompiledHtml', bindCompiledHtml) .directive('rockMetaCsrf', rockMetaCsrf) .directive('rockModifyLink', rockModifyLink); rockMetaCsrf.$inject = ['csrfUtils']; /** * @ngdoc directive * @name metaCsrf * @restrict A */ function rockMetaCsrf(csrfUtils) { return { restrict: 'A', link: function ($scope, $element) { $scope.$root.$watch(function () { return csrfUtils.getToken(); }, function (value) { if (!value) { return; } $element.attr('content', value); }); } }; } /** * @ngdoc directive * @name bindCompiledHtml * @restrict A */ bindCompiledHtml.$inject = ['$compile']; function bindCompiledHtml($compile) { return { restrict: 'A', scope: { rawHtml: '=bindCompiledHtml' }, link: function ($scope, $element) { $scope.$watch('rawHtml', function (value) { if (!value) return; // we want to use the scope OUTSIDE of this directive // (which itself is an isolate scope). var newElem = $compile(value)($scope.$parent); $element.contents().remove(); $element.append(newElem); }); } }; } /** * @ngdoc directive * @name rockModifyLink * @restrict A */ function rockModifyLink() { return { restrict: 'A', scope: { options: '=rockModifyLink' }, link: function ($scope, $elem, $attr) { if (!$scope.options || !angular.isObject($scope.options)) { return; } var attribute = tagName($elem[0]) === 'form' ? 'action' : 'href', options = $scope.options; if (options.attr) { attribute = options.attr; } if (!$attr[attribute]) { return; } var url = URI($attr[attribute]); if (options.self) { if (options.scheme === 'abs') { url.scheme(URI().scheme()); url.host(URI().hostname()); url.port(URI().port()); url.username(URI().username()); url.password(URI().password()); } url.pathname(URI().pathname()); } if (options.modify) { angular.forEach(options.modify, function(value, key){ if (isNumeric(key)) { if (value) { if (value === '!#') { url.hash(''); } else if (value === '!') { url.search(''); } else if (value[0] === '!') { url.removeSearch(value.substr(1,value.length)); } } } else if (key === '#') { url.hash(value); } else { url.addSearch(key, value); } }); } $elem.attr(attribute, url); if (!options.csrf) { return; } $scope.$root.$watch(function (scope) { return scope.rock.csrf.getToken(); }, function (value) { if (!value) { return; } $elem.attr(attribute, url.setSearch($scope.$root.rock.csrf.get())); }); } }; function tagName(elem){ return angular.lowercase(elem.tagName || elem.nodeName); } function isNumeric(n) { return !isNaN(parseFloat(n)) && isFinite(n); } } angular .module('rock.notification', [ 'rock.notification.controllers', 'rock.notification.services' ] ); angular .module('rock.notification.controllers', ['ui.bootstrap']) .controller('NotificationController', NotificationController); NotificationController.$inject = ['$scope', 'notification']; function NotificationController($scope, notification) { $scope.notifications = notification.getAll(); $scope.merge = function (messages) { notification.merge(messages); }; $scope.closeable = true; $scope.closeAlert = function (index) { notification.remove(index); }; } angular .module('rock.notification.services', []) .provider('notification', notification); /** * @ngdoc provider * @name notificationProvider * @returns {*} */ function notification() { var messages = [], debug = true; /** * @ngdoc method * @name notificationProvider#debugEnabled * @description * @param {boolean} debugEnabled enable or disable debug level messages */ this.debugEnabled = function (debugEnabled) { debug = debugEnabled; }; this.$get = ['$translate', function ($translate) { return { /** * @ngdoc method * @name notification#log * * @description * Write a log message * @param {string} msg * @param {Object} placeholders * @param {string} _default */ log: function (msg, placeholders, _default) { translate('log', msg, placeholders, _default); }, /** * @ngdoc method * @name notification#info * * @description * Write an information message * @param {string} msg * @param {Object=} placeholders * @param {string=} _default */ info: function (msg, placeholders, _default) { translate('info', msg, placeholders, _default); }, /** * @ngdoc method * @name notification#success * * @description * Write an information message * @param {string} msg * @param {Object=} placeholders * @param {string=} _default */ success: function (msg, placeholders, _default) { translate('success', msg, placeholders, _default); }, /** * @ngdoc method * @name notification#warn * * @description * Write a warning message * @param {string} msg * @param {Object=} placeholders * @param {string=} _default */ warn: function (msg, placeholders, _default) { translate('warn', msg, placeholders, _default); }, /** * @ngdoc method * @name notification#error * * @description * Write an error message * @param {string} msg * @param {Object=} placeholders * @param {string=} _default */ error: function (msg, placeholders, _default) { translate('error', msg, placeholders, _default); }, /** * @ngdoc method * @name notification#debug * * @description * Write a debug message * @param {string} msg */ debug: function (msg) { if (angular.isString(msg)) { msg = new Error(msg); } console.debug(msg); }, /** * @ngdoc method * @name notification#merge * * @description adds list messages * @param {Object[]|string[]} data */ merge: function (data) { if (!data) { return; } if (angular.isString(data[0])) { data = data.map(function (value) { return {msg: value}; }); } angular.extend(messages, data); }, /** * @ngdoc method * @name notification#getAll * * @description returns list messages * @return {Object[]} */ getAll: function () { return messages; }, /** * @ngdoc method * @name notification#exists * * @description exists messages * @return {boolean} */ exists: function () { return !!messages; }, /** * @ngdoc method * @name notification#remove * * @description remove message * @param {number} index */ remove: function (index) { if (!!messages) { messages.splice(index, 1); } }, /** * @ngdoc method * @name notification#removeAll * * @description remove all messages */ removeAll: function () { messages = []; } }; function translate(type, msg, placeholders, _default) { var push = function (msg) { switch (type) { case 'warn': type = 'warning'; break; case 'error': type = 'danger'; break; case 'success': type = 'success'; break; default: type = 'info'; } messages.push({msg: msg, type: type}); }; $translate(msg, placeholders).then(push)['catch'](function (msg) { push(_default || msg); }); } }]; } angular .module('rock.forms', [ 'rock.forms.controllers', 'rock.forms.directives' ] ); angular .module( 'rock.forms.directives', [ 'ui.bootstrap.progressbar', 'template/progressbar/progress.html', 'template/progressbar/progressbar.html' ] ) .directive('rockFormFocus', rockFormFocus) .directive('rockPasswordStrong', rockPasswordStrong) .directive('rockMatch', rockMatch) .directive('rockResetField', rockResetField) .directive('rockResetFieldIcon', rockResetFieldIcon); function rockMatch() { return { require: 'ngModel', restrict: 'A', scope: { match: '=rockMatch' }, link: function ($scope, $element, attrs, ctrl) { $scope.$watch(function () { var modelValue = ctrl.$modelValue || ctrl.$$invalidModelValue; return (ctrl.$pristine && angular.isUndefined(modelValue)) || $scope.match === modelValue; }, function (currentValue) { ctrl.$setValidity('match', currentValue); }); } }; } rockFormFocus.$inject = ['$timeout']; function rockFormFocus($timeout) { var FOCUS_CLASS = "ng-focused"; return { restrict: 'A', require: 'ngModel', link: function (scope, element, attrs, ctrl) { ctrl.$focused = false; element.bind('focus', function () { element.addClass(FOCUS_CLASS); $timeout(function () { ctrl.$focused = false; }, 0); }).bind('blur', function () { element.removeClass(FOCUS_CLASS); $timeout(function () { ctrl.$focused = true; }, 0); }); } }; } rockPasswordStrong.$inject = ['$animate', 'stringHelper', '$templateCache']; function rockPasswordStrong($animate, StringHelper, $templateCache) { if (!$templateCache.get('form/strong-password')) { $templateCache.put('form/strong-password', '<progressbar value="value" type="{{class}}">{{value}}%</progressbar>'); } return { templateUrl: 'form/strong-password', restrict: 'A', scope: { pwd: '=r
rdStrong' }, link: function (scope, element) { $animate.enabled(element, false); var mesureStrength = function (p) { var matches = { pos: {}, neg: {} }, counts = { pos: {}, neg: { seqLetter: 0, seqNumber: 0, seqSymbol: 0 } }, tmp, strength = 0, letters = 'abcdefghijklmnopqrstuvwxyz', numbers = '01234567890', symbols = '\\!@#$%&/()=?¿', back, forth, i; if (p) { // Benefits matches.pos.lower = p.match(/[a-z]/g); matches.pos.upper = p.match(/[A-Z]/g); matches.pos.numbers = p.match(/\d/g); matches.pos.symbols = p.match(/[$-/:-?{-~!^_`\[\]]/g); matches.pos.middleNumber = p.slice(1, -1).match(/\d/g); matches.pos.middleSymbol = p.slice(1, -1).match(/[$-/:-?{-~!^_`\[\]]/g); counts.pos.lower = matches.pos.lower ? matches.pos.lower.length : 0; counts.pos.upper = matches.pos.upper ? matches.pos.upper.length : 0; counts.pos.numbers = matches.pos.numbers ? matches.pos.numbers.length : 0; counts.pos.symbols = matches.pos.symbols ? matches.pos.symbols.length : 0; tmp = _.reduce(counts.pos, function (memo, val) { // if has count will add 1 return memo + Math.min(1, val); }, 0); counts.pos.numChars = p.length; tmp += (counts.pos.numChars >= 8) ? 1 : 0; counts.pos.requirements = (tmp >= 3) ? tmp : 0; counts.pos.middleNumber = matches.pos.middleNumber ? matches.pos.middleNumber.length : 0; counts.pos.middleSymbol = matches.pos.middleSymbol ? matches.pos.middleSymbol.length : 0; // Deductions matches.neg.consecLower = p.match(/(?=([a-z]{2}))/g); matches.neg.consecUpper = p.match(/(?=([A-Z]{2}))/g); matches.neg.consecNumbers = p.match(/(?=(\d{2}))/g); matches.neg.onlyNumbers = p.match(/^[0-9]*$/g); matches.neg.onlyLetters = p.match(/^([a-z]|[A-Z])*$/g); counts.neg.consecLower = matches.neg.consecLower ? matches.neg.consecLower.length : 0; counts.neg.consecUpper = matches.neg.consecUpper ? matches.neg.consecUpper.length : 0; counts.neg.consecNumbers = matches.neg.consecNumbers ? matches.neg.consecNumbers.length : 0; // sequential letters (back and forth) for (i = 0; i < letters.length - 2; i++) { var p2 = p.toLowerCase(); forth = letters.substring(i, parseInt(i + 3)); back = StringHelper.reverse(forth); if (p2.indexOf(forth) !== -1 || p2.indexOf(back) !== -1) { counts.neg.seqLetter++; } } // sequential numbers (back and forth) for (i = 0; i < numbers.length - 2; i++) { forth = numbers.substring(i, parseInt(i + 3)); back = StringHelper.reverse(forth); if (p.indexOf(forth) !== -1 || p.toLowerCase().indexOf(back) !== -1) { counts.neg.seqNumber++; } } // sequential symbols (back and forth) for (i = 0; i < symbols.length - 2; i++) { forth = symbols.substring(i, parseInt(i + 3)); back = StringHelper.reverse(forth); if (p.indexOf(forth) !== -1 || p.toLowerCase().indexOf(back) !== -1) { counts.neg.seqSymbol++; } } // repeated chars counts.neg.repeated = _.chain(p.toLowerCase().split('')). countBy(function (val) { return val; }) .reject(function (val) { return val === 1; }) .reduce(function (memo, val) { return memo + val; }, 0) .value(); // Calculations strength += counts.pos.numChars * 4; if (counts.pos.upper) { strength += (counts.pos.numChars - counts.pos.upper) * 2; } if (counts.pos.lower) { strength += (counts.pos.numChars - counts.pos.lower) * 2; } if (counts.pos.upper || counts.pos.lower) { strength += counts.pos.numbers * 4; } strength += counts.pos.symbols * 6; strength += (counts.pos.middleSymbol + counts.pos.middleNumber) * 2; strength += counts.pos.requirements * 2; strength -= counts.neg.consecLower * 2; strength -= counts.neg.consecUpper * 2; strength -= counts.neg.consecNumbers * 2; strength -= counts.neg.seqNumber * 3; strength -= counts.neg.seqLetter * 3; strength -= counts.neg.seqSymbol * 3; if (matches.neg.onlyNumbers) { strength -= counts.pos.numChars; } if (matches.neg.onlyLetters) { strength -= counts.pos.numChars; } if (counts.neg.repeated) { strength -= (counts.neg.repeated / counts.pos.numChars) * 10; } } return Math.max(0, Math.min(100, Math.round(strength))); }, getClass = function (s) { switch (Math.round(s / 33)) { case 0: case 1: return 'danger'; case 2: return 'warning'; case 3: return 'success'; } return ''; }; scope.$watch('pwd', function () { scope.value = mesureStrength(scope.pwd); scope.class = getClass(scope.value); }); } }; } function rockResetField() { return { restrict: 'A', require: 'ngModel', link: function ($scope, element, attrs, ctrl) { $scope.$watch('isSend()', function (value) { if (value === true) { ctrl.$setViewValue(undefined); ctrl.$setPristine(true); ctrl.$render(); } }); } }; } rockResetFieldIcon.$inject = ['$compile', '$templateCache', 'notification']; function rockResetFieldIcon($compile, $templateCache, notification) { return { require: 'ngModel', link: function ($scope, $element, $attr, $ngModel) { var template; if (!(template = $templateCache.get('form/reset-field-icon'))) { template = '<i ng-show="enabled" ng-mousedown="resetField()" class="glyphicon glyphicon-remove-circle reset-icon"></i>'; $templateCache.put('form/reset-field-icon', template); } // limit to input element of specific types var inputTypes = /text|search|tel|url|email|password/i; if ($element[0].nodeName !== "INPUT") { notification.debug(new Error("'resetField' is limited to input elements")); return; } if (!inputTypes.test($attr.type)) { notification.debug(new Error("Invalid input type for resetField: " + $attr.type)); return; } $scope = $scope.$new(); // compiled reset icon template template = $compile(template)($scope); $element.after(template); $scope.resetField = function () { $ngModel.$setViewValue(undefined); $ngModel.$setPristine(true); $ngModel.$render(); }; $element.bind('input', function () { $scope.enabled = !$ngModel.$isEmpty($element.val()); }) .bind('focus', function () { $scope.enabled = !$ngModel.$isEmpty($element.val()); }) .bind('blur', function () { $scope.enabled = false; }); } }; } angular .module('rock.forms.controllers', ['pascalprecht.translate']) .controller('RockFormController', RockFormController) .filter('normalizeAlerts', normalizeAlerts); /** * @ngdoc filter * @name normalizeAlerts */ normalizeAlerts.$inject = ['httpUtils']; function normalizeAlerts(httpUtils) { return function (inputs, unique) { if (inputs) { if (unique === undefined) { unique = true; } return httpUtils.normalizeAlerts(inputs, unique); } }; } RockFormController.$inject = ['$scope', '$http', '$translate', 'csrfUtils', 'formUtils', 'userUtils', 'notification']; /** * * @param $scope * @param $http * @param {$translate} $translate * @param {formUtils} formUtils * @param {userUtils} userUtils * @param {notification} notification * @param {csrfUtils} csrfUtils * @constructor * @ngInject * @export */ function RockFormController($scope, $http, $translate, csrfUtils, formUtils, userUtils, notification) { $scope.response = {}; $scope.sending = false; $scope.class = 'alert-danger'; $scope.formName = null; $scope.validateOnChanged = false; /** * Is send http-request. * @return {boolean} */ $scope.isSend = function () { return $scope.sending; }; /** * Adds alert message by attribute. * @param {string} attributeName * @param {string} msg */ $scope.addAlert = function (attributeName, msg) { if (!$scope.response.messages) { $scope.response.messages = {}; } $scope.response.messages[attributeName] = msg; }; /** * Returns alert by attribute. * @return {string|undefined} */ $scope.getAlert = function (attributeName) { if (!$scope.isAlerts()) { return undefined; } return $scope.response.messages[attributeName]; }; /** * Returns list alerts. * @return {Object} */ $scope.getAlerts = function () { return $scope.response.messages; }; /** * Is alerts. * @return {boolean} */ $scope.isAlerts = function () { return !!$scope.response.messages; }; /** * Exists alert by attribute. * @return {boolean} */ $scope.existsAlert = function (attributeName) { if (!$scope.isAlerts()) { return false; } return !!$scope.response.messages[attributeName]; }; /** * Reset `$scope.response`. */ $scope.clear = function () { $scope.response = {}; }; /** * Pristine value. * @param {string} attributeName * @returns {boolean} */ $scope.pristine = function (attributeName) { var formName = $scope.formName; if (!$scope[formName] || !$scope[formName][attributeName]) { return false; } return $scope[formName][attributeName].$pristine; }; /** * Invalid value. * @param {string} attributeName * @returns {boolean} */ $scope.invalid = function (attributeName) { var formName = $scope.formName; if (!$scope[formName] || !$scope[formName][attributeName]) { return true; } return $scope[formName][attributeName].$invalid; }; /** * Bind error. * @param {string} attributeName * @return {string|undefined} */ $scope.bindError = function (attributeName) { return $scope.getAlert(attributeName); }; /** * Show error. * @param {string} attributeName * @param {string} ruleName * @returns {boolean} */ $scope.showError = function (attributeName, ruleName) { var formName = $scope.formName; if (!$scope[formName] || !$scope[formName][attributeName]) { return false; } if (!!$scope.validateOnChanged) { return ($scope[formName][attributeName].$dirty || $scope[formName].$submitted) && ($scope[formName][attributeName].$focused || $scope[formName].$submitted) && $scope[formName][attributeName].$error[ruleName]; } return ($scope[formName][attributeName].$dirty || $scope[formName].$submitted) && $scope[formName][attributeName].$error[ruleName]; }; /** * Hide error. * @param {string} attributeName * @returns {boolean} */ $scope.hideError = function (attributeName) { var formName = $scope.formName; if (!$scope[formName] || !$scope[formName][attributeName]) { return false; } return $scope[formName][attributeName].$valid; }; /** * Highlighting input. * @param {string} attributeName * @return {string} */ $scope.showHighlightError = function (attributeName) { var formName = $scope.formName; if (!$scope[formName] || !$scope[formName][attributeName]) { return ''; } if (!!$scope.validateOnChanged) { return $scope[formName][attributeName].$invalid && ($scope[formName][attributeName].$focused || $scope[formName].$submitted) && (!$scope[formName][attributeName].$pristine || $scope[formName].$submitted) ? 'has-error' : ''; } return $scope[formName][attributeName].$invalid && (!$scope[formName][attributeName].$pristine || $scope[formName].$submitted) ? 'has-error' : ''; }; /** * Returns `src` of captcha. * @return {string} */ $scope.getCaptcha = function () { return $scope.response.captcha; }; /** * Reload captcha. * @param {string} url * @param {Event} $event */ $scope.reloadCaptcha = function (url, $event) { if (!url) { return; } $event.preventDefault(); formUtils.reloadCaptcha(url).success(function (data) { if (data) { // changed src $event.target.src = data; return; } notification.debug('Request data "captcha" is empty.'); }); }; /** * Submit form * @param {string} url * @param {Event} $event */ $scope.submit = function (url, $event) { var formName, data = {}; if (!$scope.formName) { notification.debug('Name of form is empty'); $event.preventDefault(); return; } formName = $scope.formName; $scope[formName].$setSubmitted(); if ($scope[formName].$invalid) { $event.preventDefault(); return; } if (!url) { return; } $event.preventDefault(); $scope[formName].$submitted = false; if (!$scope[formName].values) { notification.debug('Values of form is empty'); return; } $scope.clear(); $scope.sending = true; data[formName] = $scope[formName].values; // add CSRF-token data[formName][csrfUtils.getParam()] = csrfUtils.getToken(); $http.post(url, data).success(httpSuccess).error(httpFail); }; /** * @param {string} url * @param {Event} $e */ $scope.logout = function (url, $e) { if (!url) { return; } $e.preventDefault(); userUtils.logout(url); }; function httpSuccess(data) { $scope.sending = false; //$scope.$root.$broadcast('onHttpFormSuccess'); if (!data) { return; } $scope.class = 'alert-success'; $translate('lang.success') .then(function (msg) { if (!$scope.response.messages) { $scope.response.messages = []; } $scope.response.messages.push(msg); }); } function httpFail(data, status) { $scope.sending = false; $scope.class = 'alert-danger'; //$scope.$root.$broadcast('onHttpFormFail'); if (status === 422) { $scope.response.messages = data; } } }})();
ockPasswo
lib.rs
#![deny(warnings)] //Only include any of this if stap is enabled for this build
pub mod probe; #[cfg(enabled)] pub mod provider; #[cfg(enabled)] pub mod tracer; #[cfg(enabled)] pub use probe::*; #[cfg(enabled)] pub use provider::*; #[cfg(enabled)] pub use tracer::*;
#[cfg(enabled)]
jwts_test.go
package jwts import ( "fmt" "math/rand" "testing" ) const ( lazyFox = "i can be such a lazy summer fox sometimes" lazyFox64 = "ImkgY2FuIGJlIHN1Y2ggYSBsYXp5IHN1bW1lciBmb3ggc29tZXRpbWVzIg" lazyFoxJSON = `"i can be such a lazy summer fox sometimes"` increment = "INCR" testJSONIncrement = "test_json_increment" testPerson = "test_person" tmk3 = "tmk3" ) var ( testLocalSessions = "local_sessions_test" testLocalSessionsBadAudChunk = "local_sessions_test_invalid_chunk" headerTest64 = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9" jwtxParamsTest = CreateTokenParams{ Aud: []string{testLocalSessions}, Iss: tmk3, Sub: testPerson, Lifetime: 3600, } tokenSecretTest, errTokenSecret = generateRandomByteArray(128, nil) tokenTest, errTokenTest = CreateToken(&jwtxParamsTest, tokenSecretTest, nil) lateDelay = int64(60) latePayloadTest = CreateTokenParams{ Aud: []string{testLocalSessions}, Delay: &lateDelay, Iss: tmk3, Sub: testPerson, Lifetime: 3600, } lateTokenSecret, errLateTokenSecret = generateRandomByteArray(128, nil) lateTokenTest, errLateTokenTest = CreateToken(&latePayloadTest, lateTokenSecret, nil) expiredTokenTest = CreateTokenParams{ Aud: []string{testLocalSessions}, Iss: tmk3, Sub: testPerson, Lifetime: 0, } expiredTokenSecret, errExpiredTokenPayloadSecret = generateRandomByteArray(128, nil) expiredToken, errExpiredTokenPayload = CreateToken(&expiredTokenTest, expiredTokenSecret, nil) ) var ( testClaims = CreateTokenParams{ Aud: []string{"hello", "world"}, Iss: "tmk3.com", Sub: "test_jwt", Lifetime: 1000000, } ) func generateRandomByteArray(n int, err error) (*[]byte, error) { if err != nil { return nil, err } token := make([]byte, n) length, errRandom := rand.Read(token) if errRandom != nil || length != n { return nil, errRandom } return &token, nil } func TestEncodeJSONToBase64(t *testing.T) { encoded, errEncode := encodeJSONToBase64(lazyFox) if errEncode != nil { t.Fail() t.Logf(errEncode.Error()) } if *encoded != lazyFox64 { t.Fail() t.Logf( fmt.Sprint( "expected: ", lazyFox64, ", instead found: ", *encoded, ), ) } } func
(t *testing.T) { encoded, errEncode := encodeJSONToBase64(nil) if errEncode == nil { t.Fail() t.Logf( fmt.Sprint( "encodeJSONToBase64 error should not be nil", ), ) } if encoded != nil { t.Fail() t.Logf( fmt.Sprint( "expected: ", nil, ", instead found: ", *encoded, ), ) } } func TestDecodeFromBase64(t *testing.T) { encoded, errEncode := encodeJSONToBase64(lazyFox) if errEncode != nil { t.Fail() t.Logf(errEncode.Error()) } decoded, errDecode := decodeFromBase64(encoded, nil) if errDecode != nil { t.Fail() t.Logf(errDecode.Error()) } if *decoded != lazyFoxJSON { t.Fail() t.Logf( fmt.Sprint( "expected: ", lazyFox, ", instead found: ", *decoded, ), ) } } func TestDecodeFromBase64FromNil(t *testing.T) { decoded, errDecode := decodeFromBase64(nil, nil) if errDecode == nil { t.Fail() t.Logf(errDecode.Error()) } if decoded != nil { t.Fail() t.Logf(fmt.Sprint("expected: ", nil, ", instead found: ", *decoded)) } } func TestCreateSignature(t *testing.T) { payload := "Hello World, this is not a a valid JWT!" secret, errSecret := generateRandomByteArray(256, nil) if errSecret != nil { t.Fail() t.Logf(errSecret.Error()) } signature, errSignature := createSignature( DefaultHeaderBase64, &payload, secret, errSecret, ) if errSignature != nil { t.Fail() t.Logf(errSignature.Error()) } if signature == nil { t.Fail() t.Logf("signature is nil") } } func TestCreateClaims(t *testing.T) { claims, errClaims := createClaims(&testClaims, nil) if claims == nil { t.Fail() t.Logf("claims should not be nil") } if errClaims != nil { t.Fail() t.Logf(errClaims.Error()) } } func TestRetrieveTokenChunks(t *testing.T) { tokenSecret, errTokenSecret := generateRandomByteArray(128, nil) token, errTokenPayload := CreateToken(&testClaims, tokenSecret, errTokenSecret) if token == nil { t.Fail() t.Logf("token should not be nil") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } tokenChunks, errTokenChunks := parseTokenChunks(token, nil) if tokenChunks == nil { t.Fail() t.Logf("token chunks should not be nil") } if errTokenChunks != nil { t.Fail() t.Logf(errTokenChunks.Error()) } } func TestUnmarsharHeader(t *testing.T) { decoded, errDecode := decodeFromBase64(&headerTest64, nil) if errDecode != nil { t.Fail() t.Logf(errDecode.Error()) } headerTest, errHeaderTest := unmarshalHeader(decoded, nil) if headerTest == nil { t.Fail() t.Logf("headerTest should not be nil") } if errHeaderTest != nil { t.Fail() t.Logf(errHeaderTest.Error()) } } func TestUnmarsharClaims(t *testing.T) { encoded, errEncode := encodeJSONToBase64(testClaims) if errEncode != nil { t.Fail() t.Logf(errEncode.Error()) } decoded, errDecode := decodeFromBase64(encoded, nil) if errDecode != nil { t.Fail() t.Logf(errDecode.Error()) } testClaims, errClaimsTest := unmarshalClaims(decoded, nil) if testClaims == nil { t.Fail() t.Logf("testClaims should not be nil") } if errClaimsTest != nil { t.Fail() t.Logf(errClaimsTest.Error()) } } func TestCreateToken(t *testing.T) { secret, errTokenSecret := generateRandomByteArray(128, nil) token, errTokenPayload := CreateToken(&testClaims, secret, errTokenSecret) if token == nil { t.Fail() t.Logf("token should not be nil") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } } func TestValidateSignature(t *testing.T) { tokenSecret, errTokenSecret := generateRandomByteArray(128, nil) token, errTokenPayload := CreateToken(&testClaims, tokenSecret, errTokenSecret) if token == nil { t.Fail() t.Logf("token should not be nil") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } chunks, errChunks := parseTokenChunks(token, errTokenPayload) signatureIsValid, errSignatureIsValid := validateSignature( chunks, tokenSecret, errChunks, ) if !signatureIsValid { t.Fail() t.Logf("token is not valid") } if errSignatureIsValid != nil { t.Fail() t.Logf(errSignatureIsValid.Error()) } } func TestParseTokenDetails(t *testing.T) { tokenSecret, errTokenSecret := generateRandomByteArray(128, nil) token, errTokenPayload := CreateToken(&testClaims, tokenSecret, errTokenSecret) if token == nil { t.Fail() t.Logf("token should not be nil") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } chunks, errChunks := parseTokenChunks(token, errTokenPayload) tokenDetails, errTokenDetails := parseTokenDetails( chunks, errChunks, ) if tokenDetails == nil { t.Fail() t.Logf("token should not be nil") } if errTokenDetails != nil { t.Fail() t.Logf(errTokenDetails.Error()) } if tokenDetails.Claims.Iss != testClaims.Iss { t.Fail() t.Logf( fmt.Sprint( "expected: ", testClaims.Iss, ", but found: ", tokenDetails.Claims.Iss, ), ) } if tokenDetails.Claims.Sub != testClaims.Sub { t.Fail() t.Logf( fmt.Sprint( "expected: ", testClaims.Sub, ", but found: ", tokenDetails.Claims.Sub, ), ) } } func TestVerifyToken(t *testing.T) { tokenIsValidWindow, errTokenPayload := VerifyToken(tokenTest, &testLocalSessions, nil) if !tokenIsValidWindow { t.Fail() t.Logf("token window is not valid") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } } func TestVerifyInvalidTokenWindowAndAud(t *testing.T) { tokenIsValidWindow, errTokenPayload := VerifyToken(lateTokenTest, &testLocalSessions, nil) if tokenIsValidWindow { t.Fail() t.Logf("token window should not be valid") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } } func TestVerifyExpiredTokenWindowAndAud(t *testing.T) { tokenIsValidWindow, errTokenPayload := VerifyToken(expiredToken, &testLocalSessions, nil) if tokenIsValidWindow { t.Fail() t.Logf("token window should be expired") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } } func TestVerifyInvalidTokenWindowAndInvalidAud(t *testing.T) { tokenIsValidWindow, errTokenPayload := VerifyToken(tokenTest, &testLocalSessionsBadAudChunk, nil) if tokenIsValidWindow { t.Fail() t.Logf("token aud chunk is not valid but still passed") } if errTokenPayload != nil { t.Fail() t.Logf(errTokenPayload.Error()) } } func TestValidateToken(t *testing.T) { tokenIsValid, errTokenValid := ValidateToken(tokenTest, tokenSecretTest, nil) if !tokenIsValid { t.Fail() t.Logf("token should be valid") } if errTokenValid != nil { t.Fail() t.Logf(errTokenValid.Error()) } }
TestEncodeToBase64WithNil
main.rs
/* fn largest<T: PartialOrd + Copy>(list: &[T]) -> T { let mut largest = list[0]; for &item in list { if item > largest { largest = item; } } largest } */ fn
<T: PartialOrd>(list: &[T]) -> &T { let mut largest = &list[0]; for item in list { if item > largest { largest = &item; } } largest } fn main() { let number_list = vec![34, 50, 25, 100, 65]; let result = largest(&number_list); println!("The largest number is {}", result); let char_list = vec!['y', 'm', 'a', 'q']; let result = largest(&char_list); println!("The largest char is {}", result); }
largest
test-comp3.component.ts
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~ Copyright 2020 Adobe Systems Incorporated ~ ~ Licensed under the Apache License, Version 2.0 (the "License"); ~ you may not use this file except in compliance with the License. ~ You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software
~ See the License for the specific language governing permissions and ~ limitations under the License. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/ import { Component, Input } from '@angular/core'; @Component({ selector: 'test-comp3', host: { '[attr.data-title]': 'title' }, template: `<div>{{ title }}</div>` }) export class Component3 { @Input() title:any; constructor() {} }
~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
onlyCountriesEurope.js
var input = document.querySelector("#phone"); window.intlTelInput(input, { onlyCountries: ["al", "ad", "at", "by", "be", "ba", "bg", "hr", "cz", "dk", "ee", "fo", "fi", "fr", "de", "gi", "gr", "va", "hu", "is", "ie", "it", "lv", "li", "lt", "lu", "mk", "mt", "md", "mc", "me", "nl", "no", "pl", "pt", "ro",
"ru", "sm", "rs", "sk", "si", "es", "se", "ch", "ua", "gb"], utilsScript: "../../build/js/utils.js?1551697588835" // just for formatting/placeholders etc });
lr.py
import torch import torch.nn as nn import torch.nn.functional as F from tqdm import tqdm, tqdm_notebook from ml_metrics import auc from sklearn.datasets import make_classification class LogsticRegression(nn.Module):
epochs = 5 batch_size = 128 X, y = make_classification(1000000) t_X, t_y = map(torch.FloatTensor, (X, y)) net = LogsticRegression(20, 2) loss_func = torch.nn.modules.loss.CrossEntropyLoss() optimizer = torch.optim.Adam(net.parameters()) bar_epochs = tqdm_notebook(range(epochs)) for e in bar_epochs: bar_epochs.set_description(f"Epoch {e}:") t = tqdm_notebook(range(0, t_X.size(0), batch_size)) for b in t: # for each training step # train your data... b_X = t_X[b:b + batch_size] b_y = t_y[b:b + batch_size] output = net(b_X) # rnn output loss = loss_func( output, b_y.long().view(-1)) # cross entropy loss and y is not one-hotted optimizer.zero_grad() # clear gradients for this training step loss.backward() # backpropagation, compute gradients optimizer.step() if b % 10000 == 0: t.set_description( f"Epoch {e}:" f"Loss: {loss.data.numpy():.5f} | " f"Auc: {auc(b_y.numpy(), output.data.numpy()[:, 1]):.5}") _net = net.eval() auc(y, _net(t_X).data.numpy()[:, -1])
def __init__(self, in_dim, n_class): super().__init__() self.fc1 = nn.Linear(in_dim, in_dim // 2) self.fc2 = nn.Linear(in_dim // 2, n_class) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) # return x return F.softmax(x, 1)
servers.go
package sql // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "github.com/Azure/go-autorest/tracing" "net/http" ) // ServersClient is the the Azure SQL Database management API provides a RESTful set of web services that interact with // Azure SQL Database services to manage your databases. The API enables you to create, retrieve, update, and delete // databases. type ServersClient struct { BaseClient } // NewServersClient creates an instance of the ServersClient client. func NewServersClient(subscriptionID string) ServersClient
// NewServersClientWithBaseURI creates an instance of the ServersClient client using a custom endpoint. Use this when // interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack). func NewServersClientWithBaseURI(baseURI string, subscriptionID string) ServersClient { return ServersClient{NewWithBaseURI(baseURI, subscriptionID)} } // CheckNameAvailability determines whether a resource can be created with the specified name. // Parameters: // parameters - the parameters to request for name availability. func (client ServersClient) CheckNameAvailability(ctx context.Context, parameters CheckNameAvailabilityRequest) (result CheckNameAvailabilityResponse, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServersClient.CheckNameAvailability") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters.Name", Name: validation.Null, Rule: true, Chain: nil}, {Target: "parameters.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { return result, validation.NewError("sql.ServersClient", "CheckNameAvailability", err.Error()) } req, err := client.CheckNameAvailabilityPreparer(ctx, parameters) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "CheckNameAvailability", nil, "Failure preparing request") return } resp, err := client.CheckNameAvailabilitySender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "sql.ServersClient", "CheckNameAvailability", resp, "Failure sending request") return } result, err = client.CheckNameAvailabilityResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "CheckNameAvailability", resp, "Failure responding to request") } return } // CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. func (client ServersClient) CheckNameAvailabilityPreparer(ctx context.Context, parameters CheckNameAvailabilityRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2014-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Sql/checkNameAvailability", pathParameters), autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client ServersClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always // closes the http.Response Body. func (client ServersClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResponse, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // CreateOrUpdate creates or updates a new server. // Parameters: // resourceGroupName - the name of the resource group that contains the resource. You can obtain this value // from the Azure Resource Manager API or the portal. // serverName - the name of the server. // parameters - the required parameters for creating or updating a server. func (client ServersClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, serverName string, parameters Server) (result Server, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServersClient.CreateOrUpdate") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, serverName, parameters) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "CreateOrUpdate", nil, "Failure preparing request") return } resp, err := client.CreateOrUpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "sql.ServersClient", "CreateOrUpdate", resp, "Failure sending request") return } result, err = client.CreateOrUpdateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "CreateOrUpdate", resp, "Failure responding to request") } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client ServersClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, serverName string, parameters Server) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "serverName": autorest.Encode("path", serverName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2014-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } parameters.Kind = nil preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}", pathParameters), autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ServersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. func (client ServersClient) CreateOrUpdateResponder(resp *http.Response) (result Server, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Delete deletes a SQL server. // Parameters: // resourceGroupName - the name of the resource group that contains the resource. You can obtain this value // from the Azure Resource Manager API or the portal. // serverName - the name of the server. func (client ServersClient) Delete(ctx context.Context, resourceGroupName string, serverName string) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServersClient.Delete") defer func() { sc := -1 if result.Response != nil { sc = result.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.DeletePreparer(ctx, resourceGroupName, serverName) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "Delete", nil, "Failure preparing request") return } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp err = autorest.NewErrorWithError(err, "sql.ServersClient", "Delete", resp, "Failure sending request") return } result, err = client.DeleteResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. func (client ServersClient) DeletePreparer(ctx context.Context, resourceGroupName string, serverName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "serverName": autorest.Encode("path", serverName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2014-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ServersClient) DeleteSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. func (client ServersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // Get gets a server. // Parameters: // resourceGroupName - the name of the resource group that contains the resource. You can obtain this value // from the Azure Resource Manager API or the portal. // serverName - the name of the server. func (client ServersClient) Get(ctx context.Context, resourceGroupName string, serverName string) (result Server, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServersClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetPreparer(ctx, resourceGroupName, serverName) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "sql.ServersClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. func (client ServersClient) GetPreparer(ctx context.Context, resourceGroupName string, serverName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "serverName": autorest.Encode("path", serverName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2014-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ServersClient) GetSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client ServersClient) GetResponder(resp *http.Response) (result Server, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // List returns a list of servers. func (client ServersClient) List(ctx context.Context) (result ServerListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServersClient.List") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ListPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "sql.ServersClient", "List", resp, "Failure sending request") return } result, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. func (client ServersClient) ListPreparer(ctx context.Context) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2014-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Sql/servers", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ServersClient) ListSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. func (client ServersClient) ListResponder(resp *http.Response) (result ServerListResult, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ListByResourceGroup returns a list of servers in a resource group. // Parameters: // resourceGroupName - the name of the resource group that contains the resource. You can obtain this value // from the Azure Resource Manager API or the portal. func (client ServersClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ServerListResult, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServersClient.ListByResourceGroup") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "ListByResourceGroup", nil, "Failure preparing request") return } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "sql.ServersClient", "ListByResourceGroup", resp, "Failure sending request") return } result, err = client.ListByResourceGroupResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "ListByResourceGroup", resp, "Failure responding to request") } return } // ListByResourceGroupPreparer prepares the ListByResourceGroup request. func (client ServersClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2014-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ServersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always // closes the http.Response Body. func (client ServersClient) ListByResourceGroupResponder(resp *http.Response) (result ServerListResult, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Update updates an existing server. // Parameters: // resourceGroupName - the name of the resource group that contains the resource. You can obtain this value // from the Azure Resource Manager API or the portal. // serverName - the name of the server. // parameters - the required parameters for updating a server. func (client ServersClient) Update(ctx context.Context, resourceGroupName string, serverName string, parameters ServerUpdate) (result Server, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ServersClient.Update") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.UpdatePreparer(ctx, resourceGroupName, serverName, parameters) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "Update", nil, "Failure preparing request") return } resp, err := client.UpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "sql.ServersClient", "Update", resp, "Failure sending request") return } result, err = client.UpdateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "sql.ServersClient", "Update", resp, "Failure responding to request") } return } // UpdatePreparer prepares the Update request. func (client ServersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, serverName string, parameters ServerUpdate) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "serverName": autorest.Encode("path", serverName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2014-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}", pathParameters), autorest.WithJSON(parameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client ServersClient) UpdateSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // UpdateResponder handles the response to the Update request. The method always // closes the http.Response Body. func (client ServersClient) UpdateResponder(resp *http.Response) (result Server, err error) { err = autorest.Respond( resp, azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }
{ return NewServersClientWithBaseURI(DefaultBaseURI, subscriptionID) }
utils_nvidia.py
import argparse import os import shutil import time import math import logging import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models import numpy as np try: from apex.parallel import DistributedDataParallel as DDP from apex.fp16_utils import * from apex import amp, optimizers from apex.multi_tensor_apply import multi_tensor_applier except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.") try: from nvidia.dali.plugin.pytorch import DALIClassificationIterator from nvidia.dali.pipeline import Pipeline import nvidia.dali.ops as ops import nvidia.dali.types as types except ImportError: raise ImportError("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.") from nasws.cnn.utils import AverageMeter from utils import accuracy # item() is a recent addition, so this helps with backward compatibility. def to_python_float(t): if hasattr(t, 'item'): return t.item() else: return t[0] class HybridTrainPipe(Pipeline): def __init__(self, batch_size, num_threads, device_id, data_dir, crop, shard_id, num_shards, dali_cpu=False, args=None, file_list=None ): super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id) self.input = ops.FileReader(file_root=data_dir, shard_id=args.apex_local_rank, num_shards=args.world_size, random_shuffle=True, pad_last_batch=True, file_list=file_list) #let user decide which pipeline works him bets for RN version he runs dali_device = 'cpu' if dali_cpu else 'gpu' decoder_device = 'cpu' if dali_cpu else 'mixed' # This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet # without additional reallocations device_memory_padding = 211025920 if decoder_device == 'mixed' else 0 host_memory_padding = 140544512 if decoder_device == 'mixed' else 0 self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB, device_memory_padding=device_memory_padding, host_memory_padding=host_memory_padding, random_aspect_ratio=[0.8, 1.25], random_area=[0.1, 1.0], num_attempts=100) self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR) self.cmnp = ops.CropMirrorNormalize(device="gpu", output_dtype=types.FLOAT, output_layout=types.NCHW, crop=(crop, crop), image_type=types.RGB, mean=[0.485 * 255,0.456 * 255,0.406 * 255], std=[0.229 * 255,0.224 * 255,0.225 * 255]) self.coin = ops.CoinFlip(probability=0.5) logging.info('DALI "{0}" variant'.format(dali_device)) def define_graph(self): rng = self.coin() self.jpegs, self.labels = self.input(name="Reader") images = self.decode(self.jpegs) images = self.res(images) output = self.cmnp(images.gpu(), mirror=rng) return [output, self.labels] class HybridValPipe(Pipeline): def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size, shard_id, num_shards, args=None): super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id) self.input = ops.FileReader(file_root=data_dir, shard_id=args.apex_local_rank, num_shards=args.world_size, random_shuffle=False, pad_last_batch=True) self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB) self.res = ops.Resize(device="gpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR) self.cmnp = ops.CropMirrorNormalize(device="gpu", output_dtype=types.FLOAT, output_layout=types.NCHW, crop=(crop, crop), image_type=types.RGB, mean=[0.485 * 255,0.456 * 255,0.406 * 255], std=[0.229 * 255,0.224 * 255,0.225 * 255]) def define_graph(self): self.jpegs, self.labels = self.input(name="Reader") images = self.decode(self.jpegs) images = self.res(images) output = self.cmnp(images) return [output, self.labels] def fast_collate(batch, memory_format): imgs = [img[0] for img in batch] targets = torch.tensor([target[1] for target in batch], dtype=torch.int64) w = imgs[0].size()[1] h = imgs[0].size()[2] # print(imgs[0].size()) tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format) for i, img in enumerate(imgs): nump_array = np.asarray(img, dtype=np.uint8) if(nump_array.ndim < 3): nump_array = np.expand_dims(nump_array, axis=-1) # nump_array = np.rollaxis(nump_array, 2) # print(nump_array.shape) tensor[i] += torch.from_numpy(nump_array) return tensor, targets class data_prefetcher(): def __init__(self, loader): self.loader = iter(loader) self.stream = torch.cuda.Stream() self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1) self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1) # With Amp, it isn't necessary to manually convert data to half. # if args.fp16: # self.mean = self.mean.half() # self.std = self.std.half() self.preload() def preload(self): try: self.next_input, self.next_target = next(self.loader) except StopIteration: self.next_input = None self.next_target = None return # if record_stream() doesn't work, another option is to make sure device inputs are created # on the main stream. # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda') # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda') # Need to make sure the memory allocated for next_* is not still in use by the main stream # at the time we start copying to next_*: # self.stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(self.stream): self.next_input = self.next_input.cuda(non_blocking=True) self.next_target = self.next_target.cuda(non_blocking=True) # more code for the alternative if record_stream() doesn't work: # copy_ will record the use of the pinned source tensor in this side stream. # self.next_input_gpu.copy_(self.next_input, non_blocking=True) # self.next_target_gpu.copy_(self.next_target, non_blocking=True) # self.next_input = self.next_input_gpu # self.next_target = self.next_target_gpu # With Amp, it isn't necessary to manually convert data to half. # if args.fp16: # self.next_input = self.next_input.half() # else: self.next_input = self.next_input.float() self.next_input = self.next_input.sub_(self.mean).div_(self.std) def next(self): torch.cuda.current_stream().wait_stream(self.stream) input = self.next_input target = self.next_target if input is not None: input.record_stream(torch.cuda.current_stream()) if target is not None: target.record_stream(torch.cuda.current_stream()) self.preload() return input, target def reduce_tensor(tensor, world_size): rt = tensor.clone() dist.all_reduce(rt, op=dist.reduce_op.SUM) rt /= world_size return rt def adjust_learning_rate(optimizer, epoch, step, len_epoch, args): """LR schedule that should yield 76% converged accuracy with batch size 256""" factor = epoch // 30 if epoch >= 80: factor = factor + 1 lr = args.learning_rate*(0.1**factor) """Warmup""" if epoch < 5: lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch) # if(args.apex_local_rank == 0): # print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr # def adjust_learning_rate(optimizer, epoch, args): # # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large # if args.epochs - epoch > 5: # lr = args.learning_rate * (args.epochs - 5 - epoch) / (args.epochs - 5) # else: # lr = args.learning_rate * (args.epochs - epoch) / ((args.epochs - 5) * 5) # for param_group in optimizer.param_groups: # param_group['lr'] = lr # return lr def train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode model.train() end = time.time() prefetcher = data_prefetcher(train_loader) input, target = prefetcher.next() i = 0 while input is not None: i += 1 if args.apex_profiling >= 0 and i == args.apex_profiling: print("Profiling begun at iteration {}".format(i)) torch.cuda.cudart().cudaProfilerStart() if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i)) adjust_learning_rate(optimizer, epoch, i, len(train_loader), args) # compute output if args.apex_profiling >= 0:
logits, logtis_aux = model(input) if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop() loss = criterion(logits, target) if args.auxiliary: loss_aux = criterion(logtis_aux, target) loss += args.auxiliary_weight * loss_aux # compute gradient and do SGD step optimizer.zero_grad() if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("backward") with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop() # for param in model.parameters(): # print(param.data.double().sum().item(), param.grad.data.double().sum().item()) if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("optimizer.step()") optimizer.step() if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop() if i%args.report_freq == 0: # Every report_freq iterations, check the loss, accuracy, and speed. # For best performance, it doesn't make sense to print these metrics every # iteration, since they incur an allreduce and some host<->device syncs. # Measure accuracy prec1, prec5 = accuracy(logits.data, target, topk=(1, 5)) # Average loss and accuracy across processes for logging if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data # to_python_float incurs a host<->device sync losses.update(to_python_float(reduced_loss), input.size(0)) top1.update(to_python_float(prec1), input.size(0)) top5.update(to_python_float(prec5), input.size(0)) torch.cuda.synchronize() batch_time.update((time.time() - end)/args.report_freq) end = time.time() if args.apex_local_rank == 0: logging.info('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Speed {3:.3f} ({4:.3f})\t' 'Loss {loss.val:.10f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, len(train_loader), args.world_size*args.batch_size/batch_time.val, args.world_size*args.batch_size/batch_time.avg, batch_time=batch_time, loss=losses, top1=top1, top5=top5)) if args.apex_profiling >= 0: torch.cuda.nvtx.range_push("prefetcher.next()") input, target = prefetcher.next() if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop() # Pop range "Body of iteration {}".format(i) if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop() if args.apex_profiling >= 0 and i == args.apex_profiling + 10: print("Profiling ended at iteration {}".format(i)) torch.cuda.cudart().cudaProfilerStop() quit() return top1.avg, losses.avg def validate(val_loader, model, criterion, args): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() prefetcher = data_prefetcher(val_loader) input, target = prefetcher.next() i = 0 while input is not None: i += 1 # compute output with torch.no_grad(): output, _ = model(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data losses.update(to_python_float(reduced_loss), input.size(0)) top1.update(to_python_float(prec1), input.size(0)) top5.update(to_python_float(prec5), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # TODO: Change timings to mirror train(). if args.apex_local_rank == 0 and i % args.report_freq == 0: logging.info('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Speed {2:.3f} ({3:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, len(val_loader), args.world_size * args.batch_size / batch_time.val, args.world_size * args.batch_size / batch_time.avg, batch_time=batch_time, loss=losses, top1=top1, top5=top5)) input, target = prefetcher.next() logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}' .format(top1=top1, top5=top5)) return top1.avg, top5.avg, losses.avg def dali_apex_train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode model.train() end = time.time() for i, data in enumerate(train_loader): input = data[0]["data"] target = data[0]["label"].squeeze().cuda().long() train_loader_len = int(math.ceil(train_loader._size / args.batch_size)) if args.dali_profiling >= 0 and i == args.dali_profiling: print("Profiling begun at iteration {}".format(i)) torch.cuda.cudart().cudaProfilerStart() if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i)) # adjust_learning_rate(optimizer, epoch, i, train_loader_len, args) if args.debug: if i > 10: logging.info('Break in debug mode after 10 batchs...') break # compute output if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("forward") logits, logtis_aux = model(input) if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop() loss = criterion(logits, target) if args.auxiliary: loss_aux = criterion(logtis_aux, target) loss += args.auxiliary_weight * loss_aux # compute gradient and do SGD step optimizer.zero_grad() if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("backward") if args.apex_opt_level is not None: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop() if args.dali_profiling >= 0: torch.cuda.nvtx.range_push("optimizer.step()") optimizer.step() if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop() if i%args.report_freq == 0: # Every print_freq iterations, check the loss, accuracy, and speed. # For best performance, it doesn't make sense to print these metrics every # iteration, since they incur an allreduce and some host<->device syncs. # Measure accuracy prec1, prec5 = accuracy(logits.data, target, topk=(1, 5)) # Average loss and accuracy across processes for logging if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data # to_python_float incurs a host<->device sync losses.update(to_python_float(reduced_loss), input.size(0)) top1.update(to_python_float(prec1), input.size(0)) top5.update(to_python_float(prec5), input.size(0)) torch.cuda.synchronize() batch_time.update((time.time() - end)/args.report_freq) end = time.time() if args.apex_local_rank == 0: logging.info('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Speed {3:.3f} ({4:.3f})\t' 'Loss {loss.val:.10f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, train_loader_len, args.world_size*args.batch_size/batch_time.val, args.world_size*args.batch_size/batch_time.avg, batch_time=batch_time, loss=losses, top1=top1, top5=top5)) # Pop range "Body of iteration {}".format(i) if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop() if args.dali_profiling >= 0 and i == args.dali_profiling + 2: print("Profiling ended at iteration {}".format(i)) torch.cuda.cudart().cudaProfilerStop() quit() return top1.avg, losses.avg def dali_validate(val_loader, model, criterion, args): batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to evaluate mode model.eval() end = time.time() for i, data in enumerate(val_loader): input = data[0]["data"] target = data[0]["label"].squeeze().cuda().long() val_loader_len = int(val_loader._size / args.batch_size) if args.debug: if i > 10: break # compute output with torch.no_grad(): output, _ = model(input) loss = criterion(output, target) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) prec1 = reduce_tensor(prec1, args.world_size) prec5 = reduce_tensor(prec5, args.world_size) else: reduced_loss = loss.data losses.update(to_python_float(reduced_loss), input.size(0)) top1.update(to_python_float(prec1), input.size(0)) top5.update(to_python_float(prec5), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # TODO: Change timings to mirror train(). if args.apex_local_rank == 0 and i % args.report_freq == 0: logging.info('Test: [{0}/{1}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Speed {2:.3f} ({3:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format( i, val_loader_len, args.world_size * args.batch_size / batch_time.val, args.world_size * args.batch_size / batch_time.avg, batch_time=batch_time, loss=losses, top1=top1, top5=top5)) logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}' .format(top1=top1, top5=top5)) return top1.avg, top5.avg, losses.avg
torch.cuda.nvtx.range_push("forward")
read_test.go
// Copyright (c) 2021 VMware, Inc. or its affiliates. All Rights Reserved. // Copyright (c) 2012-2021, Sean Treadway, SoundCloud Ltd. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package amqp091
import ( "strings" "testing" ) func TestGoFuzzCrashers(t *testing.T) { if testing.Short() { t.Skip("excessive allocation") } testData := []string{ "\b000000", "\x02\x16\x10�[��\t\xbdui�" + "\x10\x01\x00\xff\xbf\xef\xbfサn\x99\x00\x10r", "\x0300\x00\x00\x00\x040000", } for idx, testStr := range testData { r := Reader{strings.NewReader(testStr)} frame, err := r.ReadFrame() if err != nil && frame != nil { t.Errorf("%d. frame is not nil: %#v err = %v", idx, frame, err) } } }
sti_test.go
package strategy import ( "fmt" "reflect" "strings" "testing" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apiserver/pkg/admission" kapi "k8s.io/kubernetes/pkg/api" kapihelper "k8s.io/kubernetes/pkg/api/helper" "k8s.io/kubernetes/pkg/api/v1" buildapi "github.com/openshift/origin/pkg/build/apis/build" _ "github.com/openshift/origin/pkg/build/apis/build/install" "github.com/openshift/origin/pkg/build/util" buildutil "github.com/openshift/origin/pkg/build/util" ) type FakeAdmissionControl struct { admit bool } func (a *FakeAdmissionControl) Admit(attr admission.Attributes) (err error) { if a.admit { return nil } return fmt.Errorf("pod not allowed") } func (a *FakeAdmissionControl) Handles(operation admission.Operation) bool { return true } func
(t *testing.T) { testSTICreateBuildPod(t, false) } func TestSTICreateBuildPodRootAllowed(t *testing.T) { testSTICreateBuildPod(t, true) } var nodeSelector = map[string]string{"node": "mynode"} func testSTICreateBuildPod(t *testing.T, rootAllowed bool) { strategy := &SourceBuildStrategy{ Image: "sti-test-image", Codec: kapi.Codecs.LegacyCodec(buildapi.LegacySchemeGroupVersion), AdmissionControl: &FakeAdmissionControl{admit: rootAllowed}, } build := mockSTIBuild() actual, err := strategy.CreateBuildPod(build) if err != nil { t.Errorf("Unexpected error: %v", err) } if expected, actual := buildapi.GetBuildPodName(build), actual.ObjectMeta.Name; expected != actual { t.Errorf("Expected %s, but got %s!", expected, actual) } if !reflect.DeepEqual(map[string]string{buildapi.BuildLabel: buildapi.LabelValue(build.Name)}, actual.Labels) { t.Errorf("Pod Labels does not match Build Labels!") } if !reflect.DeepEqual(nodeSelector, actual.Spec.NodeSelector) { t.Errorf("Pod NodeSelector does not match Build NodeSelector. Expected: %v, got: %v", nodeSelector, actual.Spec.NodeSelector) } container := actual.Spec.Containers[0] if container.Name != "sti-build" { t.Errorf("Expected sti-build, but got %s!", container.Name) } if container.Image != strategy.Image { t.Errorf("Expected %s image, got %s!", container.Image, strategy.Image) } if container.ImagePullPolicy != v1.PullIfNotPresent { t.Errorf("Expected %v, got %v", v1.PullIfNotPresent, container.ImagePullPolicy) } if actual.Spec.RestartPolicy != v1.RestartPolicyNever { t.Errorf("Expected never, got %#v", actual.Spec.RestartPolicy) } // strategy ENV variables are whitelisted(filtered) into the container environment, and not all // the values are allowed, so don't expect to see the filtered values in the result. expectedKeys := map[string]string{"BUILD": "", "SOURCE_REPOSITORY": "", "SOURCE_URI": "", "SOURCE_CONTEXT_DIR": "", "SOURCE_REF": "", "ORIGIN_VERSION": "", "BUILD_LOGLEVEL": "", "PUSH_DOCKERCFG_PATH": "", "PULL_DOCKERCFG_PATH": ""} if !rootAllowed { expectedKeys["ALLOWED_UIDS"] = "" expectedKeys["DROP_CAPS"] = "" } gotKeys := map[string]string{} for _, k := range container.Env { gotKeys[k.Name] = "" } if !reflect.DeepEqual(expectedKeys, gotKeys) { t.Errorf("Expected environment keys:\n%v\ngot keys\n%v", expectedKeys, gotKeys) } // the pod has 5 volumes but the git source secret is not mounted into the main container. if len(container.VolumeMounts) != 4 { t.Fatalf("Expected 4 volumes in container, got %d", len(container.VolumeMounts)) } for i, expected := range []string{buildutil.BuildWorkDirMount, dockerSocketPath, DockerPushSecretMountPath, DockerPullSecretMountPath} { if container.VolumeMounts[i].MountPath != expected { t.Fatalf("Expected %s in VolumeMount[%d], got %s", expected, i, container.VolumeMounts[i].MountPath) } } if len(actual.Spec.Volumes) != 5 { t.Fatalf("Expected 5 volumes in Build pod, got %d", len(actual.Spec.Volumes)) } if *actual.Spec.ActiveDeadlineSeconds != 60 { t.Errorf("Expected ActiveDeadlineSeconds 60, got %d", *actual.Spec.ActiveDeadlineSeconds) } if !kapihelper.Semantic.DeepEqual(container.Resources, util.CopyApiResourcesToV1Resources(&build.Spec.Resources)) { t.Fatalf("Expected actual=expected, %v != %v", container.Resources, build.Spec.Resources) } found := false foundIllegal := false foundAllowedUIDs := false foundDropCaps := false for _, v := range container.Env { if v.Name == "BUILD_LOGLEVEL" && v.Value == "bar" { found = true } if v.Name == "ILLEGAL" { foundIllegal = true } if v.Name == buildapi.AllowedUIDs && v.Value == "1-" { foundAllowedUIDs = true } if v.Name == buildapi.DropCapabilities && v.Value == "KILL,MKNOD,SETGID,SETUID" { foundDropCaps = true } } if !found { t.Fatalf("Expected variable BUILD_LOGLEVEL be defined for the container") } if foundIllegal { t.Fatalf("Found illegal environment variable 'ILLEGAL' defined on container") } if foundAllowedUIDs && rootAllowed { t.Fatalf("Did not expect %s when root is allowed", buildapi.AllowedUIDs) } if !foundAllowedUIDs && !rootAllowed { t.Fatalf("Expected %s when root is not allowed", buildapi.AllowedUIDs) } if foundDropCaps && rootAllowed { t.Fatalf("Did not expect %s when root is allowed", buildapi.DropCapabilities) } if !foundDropCaps && !rootAllowed { t.Fatalf("Expected %s when root is not allowed", buildapi.DropCapabilities) } buildJSON, _ := runtime.Encode(kapi.Codecs.LegacyCodec(buildapi.LegacySchemeGroupVersion), build) errorCases := map[int][]string{ 0: {"BUILD", string(buildJSON)}, } for index, exp := range errorCases { if e := container.Env[index]; e.Name != exp[0] || e.Value != exp[1] { t.Errorf("Expected %s:%s, got %s:%s!\n", exp[0], exp[1], e.Name, e.Value) } } checkAliasing(t, actual) } func TestS2IBuildLongName(t *testing.T) { strategy := &SourceBuildStrategy{ Image: "sti-test-image", Codec: kapi.Codecs.LegacyCodec(buildapi.LegacySchemeGroupVersion), AdmissionControl: &FakeAdmissionControl{admit: true}, } build := mockSTIBuild() build.Name = strings.Repeat("a", validation.DNS1123LabelMaxLength*2) pod, err := strategy.CreateBuildPod(build) if err != nil { t.Fatalf("unexpected: %v", err) } if pod.Labels[buildapi.BuildLabel] != build.Name[:validation.DNS1123LabelMaxLength] { t.Errorf("Unexpected build label value: %s", pod.Labels[buildapi.BuildLabel]) } } func mockSTIBuild() *buildapi.Build { timeout := int64(60) return &buildapi.Build{ ObjectMeta: metav1.ObjectMeta{ Name: "stiBuild", Labels: map[string]string{ "name": "stiBuild", }, }, Spec: buildapi.BuildSpec{ CommonSpec: buildapi.CommonSpec{ Revision: &buildapi.SourceRevision{ Git: &buildapi.GitSourceRevision{}, }, Source: buildapi.BuildSource{ Git: &buildapi.GitBuildSource{ URI: "http://my.build.com/the/stibuild/Dockerfile", Ref: "master", }, ContextDir: "foo", SourceSecret: &kapi.LocalObjectReference{Name: "fooSecret"}, }, Strategy: buildapi.BuildStrategy{ SourceStrategy: &buildapi.SourceBuildStrategy{ From: kapi.ObjectReference{ Kind: "DockerImage", Name: "repository/sti-builder", }, PullSecret: &kapi.LocalObjectReference{Name: "bar"}, Scripts: "http://my.build.com/the/sti/scripts", Env: []kapi.EnvVar{ {Name: "BUILD_LOGLEVEL", Value: "bar"}, {Name: "ILLEGAL", Value: "foo"}, }, }, }, Output: buildapi.BuildOutput{ To: &kapi.ObjectReference{ Kind: "DockerImage", Name: "docker-registry/repository/stiBuild", }, PushSecret: &kapi.LocalObjectReference{Name: "foo"}, }, Resources: kapi.ResourceRequirements{ Limits: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceCPU): resource.MustParse("10"), kapi.ResourceName(kapi.ResourceMemory): resource.MustParse("10G"), }, }, CompletionDeadlineSeconds: &timeout, NodeSelector: nodeSelector, }, }, Status: buildapi.BuildStatus{ Phase: buildapi.BuildPhaseNew, }, } }
TestSTICreateBuildPodRootNotAllowed
_error.tsx
/* library package */ import { NextPage } from 'next' import Error from 'next/error' interface Props {
const Page: NextPage<Props> = ({ statusCode }) => { return <Error statusCode={statusCode} /> } Page.getInitialProps = async ({ res, err }) => { const statusCode = res ? res.statusCode : err ? err.statusCode : 404 return { statusCode } } export default Page
statusCode?: any }
__init__.py
# -*- coding: utf-8; py-indent-offset: 2 -*- """ This package is used to model elemental ions in crystal structures. It handles both identification and building of ions, but relies on the solvent module to flag candidate sites for screening. Notes ----- .. [1] Echols, N. et al. Automated identification of elemental ions in macromolecular crystal structures. Acta Crystallogr. D. Biol. Crystallogr. 70, 1104–14 (2014). """ from __future__ import division import iotbx.cif from libtbx import group_args, Auto, slots_getstate_setstate from libtbx.utils import Sorry from libtbx import group_args from math import exp import time import os import sys DEFAULT_IONS = ["MG", "CA", "ZN", "CL"] HALIDES = ["F", "CL", "BR", "IOD"] TRANSITION_METALS = ["MN", "FE", "CO", "CU", "NI", "ZN", "PT"] SUPPORTED = TRANSITION_METALS + HALIDES + ["NA", "MG", "K", "CA", "CD", "HG"] def _cif_param_as_list (param) : if
def _cif_param_as_int (param) : if (param == ".") : return None return int(param) def _cif_param_as_float (param) : if (param == ".") : return None return float(param) class metal_parameters (group_args) : def __str__ (self) : return "%s%+d" % (self.element.upper(), self.charge) def charge_as_int (self): """ Gets the charge of a parameter as an integer. Returns ------- int """ return self.charge def scattering_type (self): """ Makes a string showing the element and its associated charge. Note that this format is slightly different from the __str__ method, which puts the +/- between the element symbol and the charge number. Returns ------- str Examples -------- >>> from mmtbx.ions import metal_parameters >>> print metal_parameters(element="FE", charge=3) FE+3 >>> print metal_parameters(element="FE", charge=3).scattering_type() FE3+ >>> print metal_parameters(element="CL", charge=-1).scattering_type() CL1- """ charge_symbol = "" if (self.charge > 0) : charge_symbol = "+" elif (self.charge < 0) : charge_symbol = "-" s = "%2s%1d%s" % (self.element.strip(), abs(self.charge), charge_symbol) return s class parameter_server (slots_getstate_setstate) : """ Class for retrieving information from ion_parameters.cif Attributes ---------- params : iotbx.cif.model.block """ __slots__ = ["params", "_metal_params", "_charge_params", "_resname_elem", "_default_charges"] def __init__ (self) : params_path = os.path.join(os.path.split(__file__)[0], "ion_parameters.cif") assert os.path.isfile(params_path) cif_model = iotbx.cif.reader(file_path=params_path).model() self.params = cif_model["ions"] self._metal_params = {} self._charge_params = {} self._resname_elem = {} self._default_charges = {} def is_supported_element (self, symbol): """ Checks if symbol is a supported element by this parameter server. Parameters ---------- symbol : str Returns ------- bool """ return symbol in self.params['_lib_valence.atom_symbol'] def is_supported_donor (self, symbol) : """ Checks if an element is a supported donor atom. Parameters ---------- symbol : str Returns ------- bool """ return symbol in self.params['_lib_valence.donor_symbol'] def get_valence_params (self, atom1, atom2): """ Gets the valence parameters (r_0 and b) used for calculating valences from bond distances. Parameters ---------- atom1 : mmtbx.ions.metal_parameters atom2 : mmtbx.ions.metal_parameters Returns ------- float or None r_0 in the equation exp((r - r_0) / b) float or None b in the equation exp((r - r_0) / b) Examples -------- >>> from mmtbx.ions import server, metal_parameters >>> zn_params = metal_parameters(element="ZN", charge=2) >>> n_params = metal_parameters(element="N", charge=-3) >>> print server.get_valence_params(zn_params, n_params) (1.77, 0.37) """ for i_elem, symbol in enumerate(self.params['_lib_valence.atom_symbol']) : if (symbol == atom1.element) : i_charge = int(self.params['_lib_valence.atom_charge'][i_elem]) i_other = self.params['_lib_valence.donor_symbol'][i_elem] i_other_charge = int(self.params['_lib_valence.donor_charge'][i_elem]) if ((i_charge == atom1.charge_as_int()) and (i_other == atom2.element) and (i_other_charge == atom2.charge_as_int())) : valence = float(self.params['_lib_valence.value'][i_elem]) return valence, 0.37 charge1 = atom1.charge_as_int() charge2 = atom2.charge_as_int() return None, None def _get_default_charge(self, element): if element in self._default_charges: return self._default_charges[element] p = self.params for i_elem, elem in enumerate(p["_lib_charge.element"]): if elem == element: charge = int(p["_lib_charge.charge"][i_elem]) self._default_charges[element] = charge return charge return 0 def _get_charge_params(self, resname, element=None): resname = resname.strip().upper() if element is not None: element = element.strip().upper() p = self.params if element is None: # Determine the element from the residue name (I.E. "HOH" => "O") if resname in self._resname_elem: element = self._resname_elem[resname] else: resn_elements = [(resn, p["_lib_charge.element"][i_resn]) for i_resn, resn in enumerate(p["_lib_charge.resname"]) if resn == resname] if len(resn_elements) > 1: raise Sorry("Ambiguous element for residue: " + resname) elif len(resn_elements) < 1: raise Sorry("Unknown element for residue: " + resname) element = resn_elements[0][1] self._resname_elem[resname] = element if (resname, element) in self._charge_params: return self._charge_params[(resname, element)] for i_resn, resn in enumerate(p["_lib_charge.resname"]): if resn == resname and element == p["_lib_charge.element"][i_resn]: elem_charge = \ p["_lib_charge.element"][i_resn], int(p["_lib_charge.charge"][i_resn]) break else: elem_charge = element, self._get_default_charge(element) self._charge_params[(resname, element)] = elem_charge return elem_charge def get_element(self, atom): """ Gets the element associated with an atom. Parameters ---------- atom : iotbx.pdb.hierarchy.atom or str Returns ------- str """ if isinstance(atom, str): resname = atom.strip().upper() if resname in self.params["_lib_charge.element"]: return resname else: if hasattr(atom, "element") and isinstance(atom.element, str): return atom.element.strip().upper() resname = atom.fetch_labels().resname.strip().upper() return self._get_charge_params(resname=resname)[0] def get_charge(self, atom): """ Gets the charge associated with an atom or element. Parameters ---------- atom : iotbx.pdb.hierarchy.atom or str Returns ------- int Examples -------- >>> from iotbx.pdb.hierarchy import atom >>> from mmtbx.ions import server >>> atom_dummy = atom() >>> atom_dummy.element = "N" >>> atom_dummy.charge = "-3" >>> print server.get_charge(atom_dummy) -3 >>> print server.get_charge("N") -3 """ if isinstance(atom, str): atom = atom.strip().upper() try: charge = self._get_charge_params(resname=atom)[1] except Sorry: charge = self._get_charge_params(resname="", element=atom)[1] else: charge = atom.charge if not isinstance(charge, int): charge = atom.charge_as_int() if charge != 0: return charge resname = atom.fetch_labels().resname.strip().upper() element = atom.element.strip().upper() charge = self._get_charge_params(resname=resname, element=element)[1] return charge def get_charges(self, atom): """ Retrieves all charges that are expected to be associated with an atom or element within ion_parameters.cif. This list is manually updated based on the ligand IDs listed by the PDB. Parameters ---------- atom : iotbx.pdb.hierarchy.atom or str Returns ------- list of int Examples -------- >>> from mmtbx.ions import server >>> print server.get_charges("CU") [1, 2, 3] >>> print server.get_charges("ZN") [2] """ element = self.get_element(atom) p = self.params charges = set() for i_elem, elem in enumerate(p["_lib_charge.element"]): if elem == element: charges.add(int(p["_lib_charge.charge"][i_elem])) return sorted(charges) def get_metal_parameters (self, element): """ Gets all metal parameters associated with an element. Parameters ---------- element : str Returns ------- mmtbx.ions.metal_parameters or None """ p = self.params for i_elem, symbol in enumerate(p['_lib_elems.element']) : if (symbol == element.upper()) : if (symbol in self._metal_params) : return self._metal_params[symbol] assert (p['_lib_ligands.element'][i_elem] == symbol) params = metal_parameters( element=symbol, charge=_cif_param_as_int(p['_lib_elems.charge'][i_elem]), vec_sum_cutoff=_cif_param_as_float( p["_lib_elems.vec_sum_cutoff"][i_elem]), coord_num_lower=_cif_param_as_int( p["_lib_elems.coord_num_lower"][i_elem]), coord_num_upper=_cif_param_as_int( p["_lib_elems.coord_num_upper"][i_elem]), min_coordinating_non_waters=_cif_param_as_int( p["_lib_elems.min_coordinating_non_waters"][i_elem]), cvbs_lower=_cif_param_as_float(p['_lib_elems.cvbs_lower'][i_elem]), cvbs_upper=_cif_param_as_float(p['_lib_elems.cvbs_upper'][i_elem]), cvbs_expected=_cif_param_as_float( p['_lib_elems.cvbs_expected'][i_elem]), allowed_coordinating_atoms=_cif_param_as_list( p['_lib_ligands.allowed_coordinating_atoms'][i_elem]), allowed_coordinating_residues=_cif_param_as_list( p['_lib_ligands.allowed_coordinating_residues'][i_elem]), allowed_geometries=_cif_param_as_list( p['_lib_ligands.allowed_geometries'][i_elem]), allowed_backbone_atoms=_cif_param_as_list( p['_lib_ligands.allowed_backbone_atoms'][i_elem])) self._metal_params[symbol] = params return params return None def calculate_valence (self, ion, donor, distance): """ Calculates the single valence contribution of one ion donor pair, separated by distance. ion and donor should be AtomGuess objects. Parameters ---------- ion : mmtbx.ions.metal_parameters donor : mmtbx.ions.metal_parameters distance : float Returns ------- float Examples -------- >>> from mmtbx.ions import server, metal_parameters >>> ion = server.get_metal_parameters("ZN") >>> donor = metal_parameters(element="N", charge="-3") >>> valence = server.calculate_valence(ion, donor, 2.20) >>> print round(valence, 2) 0.31 """ element = donor.element if (not self.is_supported_donor(element)) : return 0 r_0, b = self.get_valence_params(ion, donor) if (r_0 is None) : # Try again, this time using the default charge for the donor donor = metal_parameters( charge=self.get_charge(element), element=element) r_0, b = self.get_valence_params(ion, donor) if r_0 is None: return 0 return exp((r_0 - distance) / b) def calculate_valences (self, ion, nearby_atoms): """ Calculates all of the valence contributions between ion and each atom of nearby_atoms, each element of which should be a tuple of an atom and a vector from the ion's location. Parameters ---------- ion : mmtbx.ions.metal_parameters nearby_atoms : list of mmtbx.ions.environment.atom_contact Returns ------- list of scitbx.matrix.rec List of vectors, whose magnitudes are equal to the valence contributions from each donor atom. Examples -------- >>> from libtbx import group_args >>> from iotbx.pdb.hierarchy import atom >>> from mmtbx.ions import server >>> from mmtbx.ions.environment import atom_contact >>> from scitbx.matrix import rec >>> ion = server.get_metal_parameters("ZN") >>> vector_1 = rec([2.0, 0, 0], [1, 3]) >>> vector_2 = rec([-2.0, 0, 0], [1, 3]) >>> vector_3 = rec([0, 2.0, 0], [1, 3]) >>> vector_4 = rec([0, 0, 2.0], [1, 3]) >>> atom_dummy = atom() >>> atom_dummy.element = "N" >>> atom_dummy.charge = "-3" >>> atom_dummy.occ = 1 >>> atom_dummy.parent = lambda: group_args(atoms=lambda: []) >>> donors = [atom_contact(atom_dummy, vector_1, None, None), ... atom_contact(atom_dummy, vector_2, None, None), ... atom_contact(atom_dummy, vector_3, None, None), ... atom_contact(atom_dummy, vector_4, None, None)] >>> vectors = server.calculate_valences(ion, donors) >>> bvs = sum(abs(i) for i in vectors) >>> print round(bvs, 2) 2.15 """ vectors = [] for contact in nearby_atoms: donor = metal_parameters( element=contact.element, charge=contact.charge) distance = abs(contact.vector) valence = self.calculate_valence(ion, donor, distance) * contact.occ if valence == 0: if ((donor.element not in ["H", "C", "AX"]) and (not self.is_supported_donor(donor.element))) : pass elif distance != 0: vectors.append(contact.vector / distance * valence) return vectors def check_supported (elements): """ Checks if elements are supported by ion identitication process. Parameters ---------- elements : list of str Returns ------- bool Raises ------ libtbx.utils.Sorry Examples -------- >>> from mmtbx.ions import check_supported >>> check_supported(["CA", "ZN", "FE"]) True """ if (elements is None) : raise Sorry("No elements specified for ion picking - must be either "+ "'Auto' or a comma-separated list of element symbols.") elif (elements is not Auto) : # XXX somehow comma-separation of phil strings fields doesn't work if isinstance(elements, str) or isinstance(elements, unicode) : elements = elements.replace(",", " ").split() elif (isinstance(elements, list)) and (len(elements) == 1) : elements = elements[0].split(",") if (elements == ['X']) : # XXX hack for testing - X is "dummy" element return True for elem in elements : if (not elem.strip().upper() in SUPPORTED) : raise Sorry( ("Identification of ions with element symbol '%s' is not supported! "+ "Choices are: %s") % (elem, " ".join(SUPPORTED))) return True # global parameter_server instance server = parameter_server() class atom_type_flags (object) : """ Simple container for information about the identity of an atom via a set of enumerated boolean flags. The responsibility for toggling these flags based on analysis of the site is left to external code. Parameters ---------- name: element symbol or HOH Examples -------- >>> flags = atom_type_flags("HOH") >>> if (fofc_map_value > 3.0) : ... flags.high_fofc = True """ flag_names_and_labels = [ # these flags usually indicate a heavier atom type ("low_b_iso", "Abnormally low B-factor"), ("high_occ", "Abnormally high occupancy"), ("high_fofc", "mFo-DFc peak"), ("high_two_fofc", "Abnormally high 2mFo-DFc map value"), ("high_anom", "Anomalous map peak"), ("high_fdp", "Abnormally high refined f''"), # the next set suggest a lighter element (or water, or nothing) ("high_b_iso", "Abnormally high B-factor"), ("low_occ", "Abnormally low occupancy"), ("low_two_fofc", "Low 2mFo-DFc map value"), ("low_fofc", "mFo-DFc hole"), ("low_anom", "Poor anomalous map value"), ("low_fdp", "Abnormally low refined f''"), # ("bad_geom", "Unexpected coordination geometry"), ("missing_geom", "No recognizable coordination geometry"), ("bad_vectors", "Bad coordination vectors"), ("bad_valence", "Bad bond valence sum"), ("too_few_non_waters", "Too few non-water coordinating atoms"), ("too_few_coord", "Too few coordinating atoms"), ("too_many_coord", "Too many coordinating atoms"), ("like_coord", "Coordinating atom of same charge"), ("bad_coord_atom", "Disallowed or unusual coordinating atom"), ("bad_coord_residue", "Disallowed or unusual coordinating residue"), ("very_bad_valence", "Very bad bond valence sum"), ("bad_halide", "Poor halide site"), ("coord_geom", "Appears to be coordinating another site with distinct geometry"), ("close_contact", "Unusually close contact to oxygen atom"), ] __slots__ = [ fl for (fl, label) in flag_names_and_labels ] + ["name"] def __init__ (self, name) : self.name = name for attr in self.__slots__[:-1] : setattr(self, attr, False) def get_flag_captions (self) : """ Retrieve a list of strings describing the issues that have been identified. These will have '+++' or '---' appended if they indicate that the actual element is heavier or lighter than the current refined scatterer type. """ captions = [] for i_attr, attr in enumerate(self.__slots__[:-1]) : if getattr(self, attr) : if (i_attr < 6) : # probably something heavier captions.append("%s (+++)" % self.flag_names_and_labels[i_attr][1]) elif (6 <= i_attr < 12) : # probably something lighter captions.append("%s (---)" % self.flag_names_and_labels[i_attr][1]) else : captions.append(self.flag_names_and_labels[i_attr][1]) return captions def show (self, out=sys.stdout, prefix="") : """ Print out a list of all issues that have been identified. """ captions = self.get_flag_captions() if (len(captions) == 0) : print >> out, prefix+"(No problems detected.)" else : print >> out, prefix+"The following problems were detected with %s:" %\ self.name have_plus = have_minus = False for caption in caption : print >> out, prefix+" %s" % caption if ("---" in caption) : have_minus = True elif ("+++" in caption) : have_plus = True if (have_plus or have_minus) : print >> out, prefix+\ "(+++ indicates a heavier element, --- indicates a lighter one)"
(param == ".") : return None return param.split(",")
newsletterNewForm.js
import React, { Component } from "react"; import { reduxForm, Field } from "redux-form"; import { FormTitle } from "../formTitle"; import { FormInput, FormButton, FormTextArea, FormImage } from "../formFields"; import TextLink from "../textLink"; class
extends Component { render() { const { handleSubmit } = this.props; return ( <form onSubmit = {handleSubmit} className="new-newsletter-form"> <FormTitle className="new-newsletter-form__title" text="New Newsletter" /> <Field className="new-newsletter-form__newsletter-title" placeholder="Newsletter Title" name="title" type="text" title="Newsletter Title" component={FormInput} /> <Field className="new-newsletter-form__body" placeholder="Newsletter Body" name="body" type="text" title="Body" component={FormTextArea} /> <Field className="new-newsletter-form__submit" small={true} danger={true} red={true} name="submit" type="submit" title="Submit" component={FormButton} /> <Field className="new-newsletter-form__cancel" small={true} name="cancel" type="button" title="Cancel" component={FormButton} onClick={this.props.onCancel} /> <Field className="new-newsletter-form__image" small={true} name="image" type="file" title="Image" component={FormImage} /> </form> ); } } NewNewsletterForm = reduxForm({ form: "newnewsletter" })(NewNewsletterForm); export default NewNewsletterForm;
NewNewsletterForm
handler.rs
pub async fn hello_world(_req: hyper::Request<hyper::Body>) -> Result<hyper::Response<hyper::Body>, std::convert::Infallible> { Ok(hyper::Response::new("Hello, World".into()))
}
proof_provider.rs
// This file is part of Substrate. // Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. //! Proof utilities use crate::{ChangesProof, StorageProof}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider<Block: BlockT> { /// Reads storage value at a given block + key, returning read proof. fn read_proof( &self, id: &BlockId<Block>, keys: &mut dyn Iterator<Item = &[u8]>, ) -> sp_blockchain::Result<StorageProof>; /// Reads child storage value at a given block + storage_key + key, returning /// read proof. fn read_child_proof( &self, id: &BlockId<Block>, child_info: &ChildInfo, keys: &mut dyn Iterator<Item = &[u8]>, ) -> sp_blockchain::Result<StorageProof>; /// Execute a call to a contract on top of state in a block of given hash /// AND returning execution proof. /// /// No changes are made. fn execution_proof( &self, id: &BlockId<Block>, method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec<u8>, StorageProof)>; /// Reads given header and generates CHT-based header proof. fn header_proof( &self, id: &BlockId<Block>, ) -> sp_blockchain::Result<(Block::Header, StorageProof)>; /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given /// blocks range. `min` is the hash of the first block, which changes trie root is known to the
/// we can't use changes tries from descendants of this block. /// Works only for runtimes that are supporting changes tries. fn key_changes_proof( &self, first: Block::Hash, last: Block::Hash, min: Block::Hash, max: Block::Hash, storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result<ChangesProof<Block::Header>>; /// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively, /// building proofs until size limit is reached. Returns combined proof and the number of /// collected keys. fn read_proof_collection( &self, id: &BlockId<Block>, start_key: &[u8], size_limit: usize, ) -> sp_blockchain::Result<(StorageProof, u32)>; /// Given a `BlockId` iterate over all storage values starting at `start_key`. /// Returns collected keys and values. fn storage_collection( &self, id: &BlockId<Block>, start_key: &[u8], size_limit: usize, ) -> sp_blockchain::Result<Vec<(Vec<u8>, Vec<u8>)>>; /// Verify read storage proof for a set of keys. /// Returns collected key-value pairs and a flag indicating if iteration is complete. fn verify_range_proof( &self, root: Block::Hash, proof: StorageProof, start_key: &[u8], ) -> sp_blockchain::Result<(Vec<(Vec<u8>, Vec<u8>)>, bool)>; }
/// requester - when we're using changes tries from ascendants of this block, we should provide /// proofs for changes tries roots `max` is the hash of the last block known to the requester -
dcmd.go
package dcmd import ( "github.com/osgochina/donkeygo/container/dvar" "github.com/osgochina/donkeygo/internal/command" "os" "strings" ) var ( defaultCommandFuncMap = make(map[string]func()) ) // Init 初始化参数 func Init(args ...string) { command.Init(args...) } // GetOpt 获取option中的值 func GetOpt(name string, def ...string) string { Init() return command.GetOpt(name, def...) } // GetOptVar 获取var类型的options func GetOptVar(name string, def ...string) *dvar.Var { Init() return dvar.New(GetOpt(name, def...)) } // GetOptAll 获取全部options func GetOptAll() map[string]string { Init() return command.GetOptAll() } // ContainsOpt 判断指定name的option是否存在 func ContainsOpt(name string, def ...string) bool { Init() return command.ContainsOpt(name) } // GetArg 获取参数 func GetArg(index int, def ...string) string { Init() return command.GetArg(index, def...) } // GetArgVar 获取var类型的参数 func GetArgVar(index int, def ...string) *dvar.Var { Init() return dvar.New(GetArg(index, def...)) } // GetArgAll 获取全部参数 func GetArgAll() []string { Init() return command.GetArgAll() } // GetWithEnv 获取参数,如果命令行不存在则从环境变量中获取 func GetWithEnv(key string, def ...interface{}) *dvar.Var { return GetOptWithEnv(key, def...) } // GetOptWithEnv returns the command line argument of the specified <key>. // If the argument does not exist, then it returns the environment variable with specified <key>. // It returns the default value <def> if none of them exists. // // Fetching Rules: // 1. Command line arguments are in lowercase format, eg: gf.<package name>.<variable name>; // 2. Environment arguments are in uppercase format, eg: GF_<package name>_<variable name>; func GetOptWithEnv(key string, def ...interface{}) *dvar.Var { cmdKey := strings.ToLower(strings.Replace(key, "_", ".", -1)) if ContainsOpt(cmdKey) { return dvar.New(GetOpt(cmdKey)) } else { envKey := strings.ToUpper(strings.Replace(key, ".", "_", -1)) if r, ok := os.LookupEnv
} else { if len(def) > 0 { return dvar.New(def[0]) } } } return dvar.New(nil) } // BuildOptions 通过map构造出options func BuildOptions(m map[string]string, prefix ...string) string { options := "" leadStr := "-" if len(prefix) > 0 { leadStr = prefix[0] } for k, v := range m { if len(options) > 0 { options += " " } options += leadStr + k if v != "" { options += "=" + v } } return options }
(envKey); ok { return dvar.New(r)
subscribe_starttx.rs
#[doc = "Reader of register SUBSCRIBE_STARTTX"] pub type R = crate::R<u32, super::SUBSCRIBE_STARTTX>; #[doc = "Writer for register SUBSCRIBE_STARTTX"] pub type W = crate::W<u32, super::SUBSCRIBE_STARTTX>; #[doc = "Register SUBSCRIBE_STARTTX `reset()`'s with value 0"] impl crate::ResetValue for super::SUBSCRIBE_STARTTX { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `CHIDX`"] pub type CHIDX_R = crate::R<u8, u8>; #[doc = "Write proxy for field `CHIDX`"] pub struct CHIDX_W<'a> { w: &'a mut W, } impl<'a> CHIDX_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f); self.w } } #[doc = "\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum EN_A { #[doc = "0: Disable subscription"] DISABLED, #[doc = "1: Enable subscription"] ENABLED, } impl From<EN_A> for bool { #[inline(always)] fn from(variant: EN_A) -> Self { match variant { EN_A::DISABLED => false, EN_A::ENABLED => true, }
#[doc = "Reader of field `EN`"] pub type EN_R = crate::R<bool, EN_A>; impl EN_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> EN_A { match self.bits { false => EN_A::DISABLED, true => EN_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == EN_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == EN_A::ENABLED } } #[doc = "Write proxy for field `EN`"] pub struct EN_W<'a> { w: &'a mut W, } impl<'a> EN_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EN_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Disable subscription"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(EN_A::DISABLED) } #[doc = "Enable subscription"] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(EN_A::ENABLED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31); self.w } } impl R { #[doc = "Bits 0:3 - Channel that task STARTTX will subscribe to"] #[inline(always)] pub fn chidx(&self) -> CHIDX_R { CHIDX_R::new((self.bits & 0x0f) as u8) } #[doc = "Bit 31"] #[inline(always)] pub fn en(&self) -> EN_R { EN_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bits 0:3 - Channel that task STARTTX will subscribe to"] #[inline(always)] pub fn chidx(&mut self) -> CHIDX_W { CHIDX_W { w: self } } #[doc = "Bit 31"] #[inline(always)] pub fn en(&mut self) -> EN_W { EN_W { w: self } } }
} }
referrals.tsx
import React from "react" import NewLayout from "../components/layout" import { NoPicNavCard } from "../components/content-cards" import { IndexCardGrid } from "../components/content-wrappers" import { ExternalButton } from "../components/buttons" export default function Home() { return ( <NewLayout titleTwo=" - referrals" description="Referral links to some of Mike Tarpey's most-used services." url="https://miketarpey.com/referrals" h1text="referrals" currentPage="referrals" > <IndexCardGrid> <NoPicNavCard internal={false} label="DollarShaveClub" gridrowcss="1fr 1.5fr 1fr" > <h5>Favorite razor since the UConn days.</h5> <ExternalButton label="visit dollarshaveclub.com" url="http://shaved.by/lfiQl" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="Second Nature" gridrowcss="1fr 1.5fr 1fr" > <h5>Subscription air filters for your home.</h5> <ExternalButton label="visit secondnature.com" url="http://fbuy.me/rCbJC" kind="light"
<NoPicNavCard internal={false} label="Personal Capital" gridrowcss="1fr 1.5fr 1fr" > <h5>Track all of your financial accounts in one place.</h5> <ExternalButton label="visit personalcapital.com" url="https://pcap.rocks/m31912" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="Binance.US" gridrowcss="1fr 1.5fr 1fr" > <h5> My favorite crypto exchange, with the lowest fees. (Do your own research before investing!) </h5> <ExternalButton label="visit binance.us" url="https://accounts.binance.us/en/register?ref=52573715" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="FanDuel" gridrowcss="1fr 1.5fr 1fr" > <h5>I place all sports bets here. (Please gamble responsibly!)</h5> <ExternalButton label="visit fanduel.com" url="https://account.sportsbook.fanduel.com/join/select-state#RAF=Michael_3702975" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="Coinbase Pro" gridrowcss="1fr 1.5fr 1fr" > <h5> Second favorite crypto exchange. (I recommend Coinbase Pro - lower fees than vanilla Coinbase!) </h5> <ExternalButton label="visit coinbase.com" url="https://www.coinbase.com/join/5a6eab9d8bf65d0545be6e9a" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="CoinTracker" gridrowcss="1fr 1.5fr 1fr" > <h5>Tax tracking for crypto.</h5> <ExternalButton label="visit cointracker.io" url="https://www.cointracker.io/i/NV7md8i4fbmJ" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="Fundrise" gridrowcss="1fr 1.5fr 1fr" > <h5> Platform for investing in real estate projects without having to be a mogul. </h5> <ExternalButton label="visit fundrise.com" url="https://fundrise.com/r/0o5p5" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="Backblaze" gridrowcss="1fr 1.5fr 1fr" > <h5> Painless backup of your computer's hard drive. Protect your digital life! </h5> <ExternalButton label="visit backblaze.com" url="https://secure.backblaze.com/r/01bqxp" kind="light" width="100%" /> </NoPicNavCard> <NoPicNavCard internal={false} label="Honey" gridrowcss="1fr 1.5fr 1fr"> <h5>Save money on every internet shopping trip.</h5> <ExternalButton label="visit joinhoney.com" url="https://www.joinhoney.com/ref/zyofyg" kind="light" width="100%" /> </NoPicNavCard> </IndexCardGrid> </NewLayout> ) }
width="100%" /> </NoPicNavCard>
glbs.go
package cisv1 import ( "fmt" "time" "github.com/IBM-Cloud/bluemix-go/client" ) type Glb struct { Id string `json:"id"` Name string `json:"name"` Desc string `json:"description"` FallbackPool string `json:"fallback_pool"` DefaultPools []string `json:"default_pools"` Ttl int `json:"ttl"` Proxied bool `json:"proxied"` CreatedOn *time.Time `json:"created_on,omitempty"` ModifiedOn *time.Time `json:"modified_on,omitempty"` SessionAffinity string `json:"session_affinity"` Enabled bool `json:"enabled,omitempty"` // Future support // RegionPools map[string][]string `json:"region_pools"` // PopPools map[string][]string `json:"pop_pools"` } type GlbResults struct { GlbList []Glb `json:"result"` ResultsInfo ResultsCount `json:"result_info"` Success bool `json:"success"` Errors []Error `json:"errors"` } type GlbResult struct { Glb Glb `json:"result"` Success bool `json:"success"` Errors []Error `json:"errors"` Messages []string `json:"messages"` } type GlbBody struct { Desc string `json:"description,omitempty"` Proxied bool `json:"proxied,omitempty"` Name string `json:"name"` FallbackPool string `json:"fallback_pool"` DefaultPools []string `json:"default_pools"` SessionAffinity string `json:"session_affinity,omitempty"` Ttl int `json:"ttl,omitempty"` Enabled bool `json:"enabled,omitempty"` } type GlbDelete struct { Result struct { GlbId string } `json:"result"` Success bool `json:"success"` Errors []Error `json:"errors"` Messages []string `json:"messages"` } type Glbs interface { ListGlbs(cisId string, zoneId string) ([]Glb, error) GetGlb(cisId string, zoneId string, glbId string) (*Glb, error) CreateGlb(cisId string, zoneId string, glbBody GlbBody) (*Glb, error) DeleteGlb(cisId string, zoneId string, glbId string) error UpdateGlb(cisId string, zoneId string, glbId string, glbBody GlbBody) (*Glb, error) } type glbs struct { client *client.Client } func
(c *client.Client) Glbs { return &glbs{ client: c, } } func (r *glbs) ListGlbs(cisId string, zoneId string) ([]Glb, error) { glbResults := GlbResults{} rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers", cisId, zoneId) _, err := r.client.Get(rawURL, &glbResults) if err != nil { return nil, err } return glbResults.GlbList, err } func (r *glbs) GetGlb(cisId string, zoneId string, glbId string) (*Glb, error) { glbResult := GlbResult{} rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers/%s", cisId, zoneId, glbId) _, err := r.client.Get(rawURL, &glbResult, nil) if err != nil { return nil, err } return &glbResult.Glb, nil } func (r *glbs) DeleteGlb(cisId string, zoneId string, glbId string) error { rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers/%s", cisId, zoneId, glbId) _, err := r.client.Delete(rawURL) if err != nil { return err } return nil } func (r *glbs) CreateGlb(cisId string, zoneId string, glbBody GlbBody) (*Glb, error) { glbResult := GlbResult{} rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers", cisId, zoneId) _, err := r.client.Post(rawURL, &glbBody, &glbResult) if err != nil { return nil, err } return &glbResult.Glb, nil } func (r *glbs) UpdateGlb(cisId string, zoneId string, glbId string, glbBody GlbBody) (*Glb, error) { glbResult := GlbResult{} rawURL := fmt.Sprintf("/v1/%s/zones/%s/load_balancers/%s", cisId, zoneId, glbId) _, err := r.client.Put(rawURL, &glbBody, &glbResult) if err != nil { return nil, err } return &glbResult.Glb, nil }
newGlbAPI
executor_unix.go
// +build !windows package buildkit import ( "os" "path/filepath" "strconv" "sync" "github.com/docker/libnetwork" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/runcexecutor" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/network" specs "github.com/opencontainers/runtime-spec/specs-go" ) const networkName = "bridge" func
(root, netnsRoot string, net libnetwork.NetworkController) (executor.Executor, error) { networkProviders := map[pb.NetMode]network.Provider{ pb.NetMode_UNSET: &bridgeProvider{NetworkController: net, netnsRoot: netnsRoot}, pb.NetMode_HOST: network.NewHostProvider(), pb.NetMode_NONE: network.NewNoneProvider(), } return runcexecutor.New(runcexecutor.Opt{ Root: filepath.Join(root, "executor"), CommandCandidates: []string{"docker-runc", "runc"}, }, networkProviders) } type bridgeProvider struct { libnetwork.NetworkController netnsRoot string } func (p *bridgeProvider) New() (network.Namespace, error) { n, err := p.NetworkByName(networkName) if err != nil { return nil, err } iface := &lnInterface{ready: make(chan struct{}), provider: p} iface.Once.Do(func() { go iface.init(p.NetworkController, n) }) return iface, nil } type lnInterface struct { ep libnetwork.Endpoint sbx libnetwork.Sandbox sync.Once err error ready chan struct{} provider *bridgeProvider } func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Network) { defer close(iface.ready) id := identity.NewID() ep, err := n.CreateEndpoint(id) if err != nil { iface.err = err return } sbx, err := c.NewSandbox(id) if err != nil { iface.err = err return } if err := ep.Join(sbx); err != nil { iface.err = err return } iface.sbx = sbx iface.ep = ep } func (iface *lnInterface) Set(s *specs.Spec) { <-iface.ready if iface.err != nil { return } // attach netns to bridge within the container namespace, using reexec in a prestart hook s.Hooks = &specs.Hooks{ Prestart: []specs.Hook{{ Path: filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe"), Args: []string{"libnetwork-setkey", iface.sbx.ContainerID(), iface.provider.NetworkController.ID()}, }}, } } func (iface *lnInterface) Close() error { <-iface.ready err := iface.sbx.Delete() if iface.err != nil { // iface.err takes precedence over cleanup errors return iface.err } return err }
newExecutor
htru2.py
"""Dataset for Predicting a Pulsar Star""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow_datasets.public_api as tfds import tensorflow as tf import os _CITATION = """\ @article{10.1093/mnras/stw656, author = {Lyon, R. J. and Stappers, B. W. and Cooper, S. and Brooke, J. M. and Knowles, J. D.}, title = "{Fifty years of pulsar candidate selection: from simple filters to a new principled real-time classification approach}", journal = {Monthly Notices of the Royal Astronomical Society}, volume = {459}, number = {1}, pages = {1104-1123}, year = {2016}, month = {04}, abstract = "{Improving survey specifications are causing an exponential rise in pulsar candidate numbers and data volumes. We study the candidate filters used to mitigate these problems during the past 50 years. We find that some existing methods such as applying constraints on the total number of candidates collected per observation, may have detrimental effects on the success of pulsar searches. Those methods immune to such effects are found to be ill-equipped to deal with the problems associated with increasing data volumes and candidate numbers, motivating the development of new approaches. We therefore present a new method designed for online operation. It selects promising candidates using a purpose-built tree-based machine learning classifier, the Gaussian Hellinger Very Fast Decision Tree, and a new set of features for describing candidates. The features have been chosen so as to (i) maximize the separation between candidates arising from noise and those of probable astrophysical origin, and (ii) be as survey-independent as possible. Using these features our new approach can process millions of candidates in seconds (∼1 million every 15 s), with high levels of pulsar recall (90 per cent+). This technique is therefore applicable to the large volumes of data expected to be produced by the Square Kilometre Array. Use of this approach has assisted in the discovery of 20 new pulsars in data obtained during the Low-Frequency Array Tied-Array All-Sky Survey.}", issn = {0035-8711}, doi = {10.1093/mnras/stw656}, url = {https://doi.org/10.1093/mnras/stw656}, eprint = {http://oup.prod.sis.lan/mnras/article-pdf/459/1/1104/8115310/stw656.pdf}, } """ _DESCRIPTION = """\ HTRU2 is a data set which describes a sample of pulsar candidates collected during the High Time Resolution Universe Survey (South). Pulsars are a rare type of Neutron star that produce radio emission detectable here on Earth. They are of considerable scientific interest as probes of space-time, the inter-stellar medium, and states of matter. As pulsars rotate, their emission beam sweeps across the sky, and when this crosses our line of sight, produces a detectable pattern of broadband radio emission. As pulsars rotate rapidly, this pattern repeats periodically. Thus, pulsar search involves looking for periodic radio signals with large radio telescopes. Each pulsar produces a slightly different emission pattern, which varies slightly with each rotation. Thus a potential signal detection known as a 'candidate', is averaged over many rotations of the pulsar, as determined by the length of an observation. In the absence of additional info, each candidate could potentially describe a real pulsar. However, in practice almost all detections are caused by radio frequency interference (RFI) and noise, making legitimate signals hard to find. Machine learning tools are now being used to automatically label pulsar candidates to facilitate rapid analysis. Classification systems in particular are being widely adopted, which treat the candidate data sets as binary classification problems. Here the legitimate pulsar examples are a minority positive class, and spurious examples the majority negative class. At present multi-class labels are unavailable, given the costs associated with data annotation. The data set shared here contains 16,259 spurious examples caused by RFI/noise, and 1,639 real pulsar examples. These examples have all been checked by human annotators. """ _URL = "http://archive.ics.uci.edu/ml/machine-learning-databases/00372/HTRU2.zip" class Htru2(tfds.core.GeneratorBasedBuilder): """Dataset for Predicting a Pulsar Star""" VERSION = tfds.core.Version('2.0.0', experiments={tfds.core.Experiment.S3: False}) def _info(self): return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict({ "Features" : tfds.features.FeaturesDict({ "Mean of the integrated profile" : tf.float64, "Standard deviation of the integrated profile" : tf.float64, "Excess kurtosis of the integrated profile" : tf.float64, "Skewness of the integrated profile" : tf.float64, "Mean of the DM-SNR curve" : tf.float64, "Standard deviation of the DM-SNR curve" : tf.float64, "Excess kurtosis of the DM-SNR curve" : tf.float64, "Skewness of the DM-SNR curve" : tf.float64, }), "Class" : tfds.features.ClassLabel(num_classes=2) }), supervised_keys=None, homepage="https://archive.ics.uci.edu/ml/datasets/HTRU2", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" path = dl_manager.download_and_extract(_URL) return [ tfds.core.SplitGenerator( name=tfds.Split.TRAIN, num_shards=1, gen_kwargs={ 'file_path': path, }), ] def _generate_examples(self, file_path): """Yields examples.""" with tf.io.gfile.GFile(os.path.join(file_path, "HTRU_2.csv"), "r") as csvfile: features = [ "Mean of the integrated profile", "Standard deviation of the integrated profile", "Excess kurtosis of the integrated profile", "Skewness of the integrated profile", "Mean of the DM-SNR curve", "Standard deviation of the DM-SNR curve", "Excess kurtosis of the DM-SNR curve", "Skewness of the DM-SNR curve", "Class" # 0 for noise, 1 for pulsar ] lines = csvfile.readlines() for i in lines: feature_lst = i.split(",") length_increase = 0 for j in range(len(feature_lst)): if j % (len(features) - 1) == 0 and j != 0: temp = feature_lst[j + length_increase][0:] feature_lst[j + length_increase] = feature_lst[j + length_increase][0] feature_lst.insert(j + length_increase + 1, temp) length_increase += 1 feature_dict = {} for j in range(len(feature_lst)): if j % len(features) == 0: featur
elif j % len(features) < len(features) - 1: feature_dict[features[j % len(features)]] = float(feature_lst[j]) elif j % len(features) == len(features) - 1: yield j // len(features), {"Features" : feature_dict, "Class" : int(feature_lst[j])}
e_dict = {} feature_dict[features[j % len(features)]] = float(feature_lst[j])
logonpanic.go
// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. package watchdog import ( "fmt" "runtime" "github.com/DataDog/datadog-agent/pkg/trace/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" ) const shortErrMsgLen = 17 // 20 char max with tailing "..." // shortMsg shortens the length of error message to avoid having high // cardinality on "err:" tags func shortErrMsg(msg string) string { if len(msg) <= shortErrMsgLen { return msg } return msg[:shortErrMsgLen] + "..." } // LogOnPanic catches panics and logs them on the fly. It also flushes // the log file, ensuring the message appears. Then it propagates the panic // so that the program flow remains unchanged. func LogOnPanic()
{ if err := recover(); err != nil { // Full print of the trace in the logs buf := make([]byte, 4096) length := runtime.Stack(buf, false) stacktrace := string(buf[:length]) errMsg := fmt.Sprintf("%v", err) logMsg := "Unexpected panic: " + errMsg + "\n" + stacktrace metrics.Gauge("datadog.trace_agent.panic", 1, []string{ "err:" + shortErrMsg(errMsg), }, 1) log.Error(logMsg) log.Flush() panic(err) } }
forms.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from flask_wtf import FlaskForm from wtforms import StringField from wtforms import PasswordField from wtforms import BooleanField from wtforms import SubmitField from wtforms.validators import DataRequired from wtforms.validators import Length from wtforms.validators import Email from wtforms.validators import Regexp from wtforms.validators import EqualTo from wtforms import ValidationError from ..models import User class LoginForm(FlaskForm): """ 用户登录表单 """ user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()]) password = PasswordField('密码', validators=[DataRequired()]) remember_me = BooleanField('记住我') submit = SubmitField('登录') class RegistrationForm(FlaskForm): # 第一个参数是在页面显示的字符 user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()]) user_name = StringField('用户名', validators=[DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'User Name must have two letters,numbers dots or underscores')]) password = PasswordField('密码', validators=[DataRequired(), EqualTo('password2', message='两次输入的密码必须一致。')]) password2 = PasswordField('确认密码', validators=[DataRequired()]) submit = SubmitField('注册') """ 这个表单中还有两个自定义的验证函数,以方法的形式实现。 如果表单类中定义了以validate_开头且后面跟着字段名的方法,这个方法就和常规的验证函数一起调用。 """ def validate_user_email(self, field): if User.query.filter_by(user_email=field.data).first(): raise ValidationError('该邮件地址已经被注册。') def validate_user_name(self, field): if User.query.filter_by(user_name=field.data).first(): raise ValidationError('该用户名已经被使用。') class ChangePasswordForm(FlaskForm): """ 更新密码的表单 """ old_password = PasswordField('旧密码', validators=[DataRequired()]) password = PasswordField('新密码', validators=[ DataRequired(), EqualTo('password2', message='两次输入的密码必须一致。')]) password2 = PasswordField('确认新密码', validators=[DataRequired()]) submit = SubmitField('更改密码') class PasswordResetRequestForm(FlaskForm): """ 重置密码请求表单 """ user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()]) submit = SubmitField('重置密码') class PasswordResetForm(FlaskForm): """ 重置密码表单 """ password = PasswordField('新密码', validators=[ DataRequired(), EqualTo('password2', message='两次输入的密码不一致。')]) password2 = PasswordField('确认密码', validators=[DataRequired()]) submit = SubmitField('重置密码') class ChangeEmailForm(FlaskForm): user_email = StringField('新电子邮件地址', validators=[DataRequired(), Length(1, 64), Email()]) password = PasswordField('密码', validators=[DataRequired()]) submit = SubmitField('更改电子邮箱') @staticmethod def validate_user_email(self, field): if User.query.filter_by(user_email=field.data).first(): raise ValidationError('该邮箱已经注册。')
sum.rs
use crate::prelude::*; use ops::reduce::{Reduce, ReduceOp}; use std::ops::Add; pub type SumOp<Source, Item> = ReduceOp<Source, fn(Item, Item) -> Item, Item, Item>; pub trait Sum<Item> { /// Calculates the sum of numbers emitted by an source observable and emits /// this sum when source completes. /// /// Emits zero when source completed as an and empty sequence. /// Emits error when source observable emits it. /// /// # Examples /// /// ``` /// use rxrust::prelude::*; /// use rxrust::ops::Sum; /// /// observable::from_iter(vec![1, 1, 1, 1, 1]) /// .sum() /// .subscribe(|v| println!("{}", v)); /// /// // print log: /// // 5 /// ``` /// fn sum(self) -> SumOp<Self, Item> where Self: Sized, Item: Copy + Default + Add<Item, Output = Item>, { self.reduce(|acc, v| acc + v) } } impl<O, Item> Sum<Item> for O {} #[cfg(test)] mod test { use crate::{ops::Sum, prelude::*}; #[test] fn sum() { let mut emitted = 0; observable::from_iter(vec![1, 1, 1, 1, 1]) .sum() .subscribe(|v| emitted = v); assert_eq!(5, emitted); } #[test] fn
() { let mut emitted = 0; observable::of(123).sum().subscribe(|v| emitted = v); assert_eq!(123, emitted); } #[test] fn sum_on_empty_observable() { let mut emitted = 0; observable::empty().sum().subscribe(|v| emitted = v); assert_eq!(0, emitted); } #[test] fn sum_on_mixed_sign_values() { let mut emitted = 0; observable::from_iter(vec![1, -1, 1, -1, -1]) .sum() .subscribe(|v| emitted = v); assert_eq!(-1, emitted); } #[test] fn sum_fork_and_shared() { // type to type can fork let m = observable::from_iter(0..100).sum(); m.fork() .sum() .fork() .to_shared() .fork() .to_shared() .subscribe(|_| {}); } }
sum_on_single_item
contract_event.rs
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ account_config::{ AdminTransactionEvent, BaseUrlRotationEvent, BurnEvent, CancelBurnEvent, ComplianceKeyRotationEvent, CreateAccountEvent, MintEvent, NewBlockEvent, NewEpochEvent, PreburnEvent, ReceivedMintEvent, ReceivedPaymentEvent, SentPaymentEvent, ToXDXExchangeRateUpdateEvent, }, event::EventKey, ledger_info::LedgerInfo, proof::EventProof, transaction::Version, }; use anyhow::{ensure, Error, Result}; use diem_crypto::hash::CryptoHash; use diem_crypto_derive::{BCSCryptoHash, CryptoHasher}; use move_core_types::{language_storage::TypeTag, move_resource::MoveResource}; #[cfg(any(test, feature = "fuzzing"))] use proptest_derive::Arbitrary; use serde::{Deserialize, Serialize}; use std::{convert::TryFrom, ops::Deref}; /// Support versioning of the data structure. #[derive(Hash, Clone, Eq, PartialEq, Serialize, Deserialize, CryptoHasher, BCSCryptoHash)] pub enum ContractEvent { V0(ContractEventV0), } impl ContractEvent { pub fn new( key: EventKey, sequence_number: u64, type_tag: TypeTag, event_data: Vec<u8>, ) -> Self { ContractEvent::V0(ContractEventV0::new( key, sequence_number, type_tag, event_data, )) } } // Temporary hack to avoid massive changes, it won't work when new variant comes and needs proper // dispatch at that time. impl Deref for ContractEvent { type Target = ContractEventV0; fn deref(&self) -> &Self::Target { match self { ContractEvent::V0(event) => event, } } } /// Entry produced via a call to the `emit_event` builtin. #[derive(Hash, Clone, Eq, PartialEq, Serialize, Deserialize, CryptoHasher)] pub struct ContractEventV0 { /// The unique key that the event was emitted to key: EventKey, /// The number of messages that have been emitted to the path previously sequence_number: u64, /// The type of the data type_tag: TypeTag, /// The data payload of the event #[serde(with = "serde_bytes")] event_data: Vec<u8>, } impl ContractEventV0 { pub fn new( key: EventKey, sequence_number: u64, type_tag: TypeTag, event_data: Vec<u8>, ) -> Self { Self { key, sequence_number, type_tag, event_data, } } pub fn key(&self) -> &EventKey { &self.key } pub fn sequence_number(&self) -> u64 { self.sequence_number } pub fn event_data(&self) -> &[u8] { &self.event_data } pub fn type_tag(&self) -> &TypeTag { &self.type_tag } } impl TryFrom<&ContractEvent> for SentPaymentEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(SentPaymentEvent::struct_tag()) { anyhow::bail!("Expected Sent Payment") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for ReceivedPaymentEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(ReceivedPaymentEvent::struct_tag()) { anyhow::bail!("Expected Received Payment") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for ToXDXExchangeRateUpdateEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(ToXDXExchangeRateUpdateEvent::struct_tag()) { anyhow::bail!("Expected ToXDXExchangeRateUpdateEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for MintEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(MintEvent::struct_tag()) { anyhow::bail!("Expected MintEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for ReceivedMintEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(ReceivedMintEvent::struct_tag()) { anyhow::bail!("Expected ReceivedMintEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for BurnEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(BurnEvent::struct_tag()) { anyhow::bail!("Expected BurnEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for PreburnEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(PreburnEvent::struct_tag())
Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for CancelBurnEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(CancelBurnEvent::struct_tag()) { anyhow::bail!("Expected CancelBurnEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for AdminTransactionEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(Self::struct_tag()) { anyhow::bail!("Expected AdminTransactionEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for NewBlockEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(Self::struct_tag()) { anyhow::bail!("Expected NewBlockEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for NewEpochEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(Self::struct_tag()) { anyhow::bail!("Expected NewEpochEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for ComplianceKeyRotationEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(Self::struct_tag()) { anyhow::bail!("Expected ComplianceKeyRotationEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for BaseUrlRotationEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(Self::struct_tag()) { anyhow::bail!("Expected BaseUrlRotationEvent") } Self::try_from_bytes(&event.event_data) } } impl TryFrom<&ContractEvent> for CreateAccountEvent { type Error = Error; fn try_from(event: &ContractEvent) -> Result<Self> { if event.type_tag != TypeTag::Struct(Self::struct_tag()) { anyhow::bail!("Expected CreateAccountEvent") } Self::try_from_bytes(&event.event_data) } } impl std::fmt::Debug for ContractEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "ContractEvent {{ key: {:?}, index: {:?}, type: {:?}, event_data: {:?} }}", self.key, self.sequence_number, self.type_tag, hex::encode(&self.event_data) ) } } impl std::fmt::Display for ContractEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Ok(payload) = SentPaymentEvent::try_from(self) { write!( f, "ContractEvent {{ key: {}, index: {:?}, type: {:?}, event_data: {:?} }}", self.key, self.sequence_number, self.type_tag, payload, ) } else if let Ok(payload) = ReceivedPaymentEvent::try_from(self) { write!( f, "ContractEvent {{ key: {}, index: {:?}, type: {:?}, event_data: {:?} }}", self.key, self.sequence_number, self.type_tag, payload, ) } else { write!(f, "{:?}", self) } } } #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[cfg_attr(any(test, feature = "fuzzing"), derive(Arbitrary))] pub struct EventWithProof { pub transaction_version: u64, // Should be `Version` pub event_index: u64, pub event: ContractEvent, pub proof: EventProof, } impl std::fmt::Display for EventWithProof { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, "EventWithProof {{ \n\ttransaction_version: {}, \n\tevent_index: {}, \ \n\tevent: {}, \n\tproof: {:?} \n}}", self.transaction_version, self.event_index, self.event, self.proof ) } } impl EventWithProof { /// Constructor. pub fn new( transaction_version: Version, event_index: u64, event: ContractEvent, proof: EventProof, ) -> Self { Self { transaction_version, event_index, event, proof, } } /// Verifies the event with the proof, both carried by `self`. /// /// Two things are ensured if no error is raised: /// 1. This event exists in the ledger represented by `ledger_info`. /// 2. And this event has the same `event_key`, `sequence_number`, `transaction_version`, /// and `event_index` as indicated in the parameter list. If any of these parameter is unknown /// to the call site and is supposed to be informed by this struct, get it from the struct /// itself, such as: `event_with_proof.event.access_path()`, `event_with_proof.event_index()`, /// etc. pub fn verify( &self, ledger_info: &LedgerInfo, event_key: &EventKey, sequence_number: u64, transaction_version: Version, event_index: u64, ) -> Result<()> { ensure!( self.event.key() == event_key, "Event key ({}) not expected ({}).", self.event.key(), *event_key, ); ensure!( self.event.sequence_number == sequence_number, "Sequence number ({}) not expected ({}).", self.event.sequence_number(), sequence_number, ); ensure!( self.transaction_version == transaction_version, "Transaction version ({}) not expected ({}).", self.transaction_version, transaction_version, ); ensure!( self.event_index == event_index, "Event index ({}) not expected ({}).", self.event_index, event_index, ); self.proof.verify( ledger_info, self.event.hash(), transaction_version, event_index, )?; Ok(()) } }
{ anyhow::bail!("Expected PreburnEvent") }
traffic_clone_profile.go
package models // This file is auto-generated. // Please contact [email protected] for any change requests. // TrafficCloneProfile traffic clone profile // swagger:model TrafficCloneProfile type TrafficCloneProfile struct { // UNIX time since epoch in microseconds. Units(MICROSECONDS). // Read Only: true LastModified *string `json:"_last_modified,omitempty"` // Field introduced in 17.1.1. Maximum of 10 items allowed. CloneServers []*CloneServer `json:"clone_servers,omitempty"` // It is a reference to an object of type Cloud. Field introduced in 17.1.1. CloudRef *string `json:"cloud_ref,omitempty"` // Key value pairs for granular object access control. Also allows for classification and tagging of similar objects. Field introduced in 20.1.2. Maximum of 4 items allowed. Labels []*KeyValue `json:"labels,omitempty"` // Name for the Traffic Clone Profile. Field introduced in 17.1.1. // Required: true Name *string `json:"name"`
PreserveClientIP *bool `json:"preserve_client_ip,omitempty"` // It is a reference to an object of type Tenant. Field introduced in 17.1.1. TenantRef *string `json:"tenant_ref,omitempty"` // url // Read Only: true URL *string `json:"url,omitempty"` // UUID of the Traffic Clone Profile. Field introduced in 17.1.1. UUID *string `json:"uuid,omitempty"` }
// Specifies if client IP needs to be preserved to clone destination. Field introduced in 17.1.1.
headless_service.go
/* Copyright 2021 RadonDB. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */
"github.com/presslabs/controller-util/syncer" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/radondb/radondb-mysql-kubernetes/cluster" "github.com/radondb/radondb-mysql-kubernetes/utils" ) // NewHeadlessSVCSyncer returns headless service syncer. func NewHeadlessSVCSyncer(cli client.Client, c *cluster.Cluster) syncer.Interface { service := &corev1.Service{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Service", }, ObjectMeta: metav1.ObjectMeta{ Name: c.GetNameForResource(utils.HeadlessSVC), Namespace: c.Namespace, Labels: map[string]string{ "app.kubernetes.io/name": "mysql", "app.kubernetes.io/managed-by": "mysql.radondb.com", }, }, } return syncer.NewObjectSyncer("HeadlessSVC", c.Unwrap(), service, cli, func() error { service.Spec.Type = "ClusterIP" service.Spec.ClusterIP = "None" service.Spec.Selector = labels.Set{ "app.kubernetes.io/name": "mysql", "app.kubernetes.io/managed-by": "mysql.radondb.com", } // Use `publishNotReadyAddresses` to be able to access pods even if the pod is not ready. service.Spec.PublishNotReadyAddresses = true service.Spec.Ports = []corev1.ServicePort{ { Name: utils.MysqlPortName, Port: utils.MysqlPort, TargetPort: intstr.FromInt(utils.MysqlPort), }, } if c.Spec.MetricsOpts.Enabled { service.Spec.Ports = append(service.Spec.Ports, corev1.ServicePort{ Name: utils.MetricsPortName, Port: utils.MetricsPort, TargetPort: intstr.FromInt(utils.MetricsPort), }) } return nil }) }
package syncer import (