file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
index.test.js | // import React from 'react'
// import { shallow } from 'enzyme'
// import FileUploader from '../index'
describe('TODO <FileUploader />', () => {
it('Expect to have unit tests specified', () => {
expect(true).toEqual(true) | })
}) |
|
lstm.py |
"""
Based on chainer official example
https://github.com/pfnet/chainer/tree/master/examples/ptb
Modified by shi3z March 28,2016
"""
class RNNLM(chainer.Chain):
"""Recurrent neural net languabe model for penn tree bank corpus.
This is an example of deep LSTM network for infinite length input.
"""
def __init__(self, n_input_units=1000,n_vocab=100, n_units=100, train=True):
super(RNNLM, self).__init__(
inputVector= L.Linear(n_input_units, n_units),
embed=L.EmbedID(n_vocab, n_units),
l1=L.LSTM(n_units, n_units),
l2=L.LSTM(n_units, n_units),
l3=L.Linear(n_units, n_vocab),
)
self.train = train
def reset_state(self):
self.l1.reset_state()
self.l2.reset_state()
self.l3.reset_state()
def __call__(self, x,mode=0):
if mode == 1:
h0 = self.inputVector(x)
else:
h0 = self.embed(x)
h1 = self.l1(F.dropout(h0, train=self.train))
h2 = self.l2(F.dropout(h1, train=self.train))
y = self.l3(F.dropout(h2, train=self.train))
return y | import chainer
import chainer.functions as F
import chainer.links as L |
|
r2_chess_pgn.py | #!/usr/bin/env python
import sys, rospy, tf, moveit_commander, random
from geometry_msgs.msg import Pose, Point, Quaternion
import pgn
class R2ChessboardPGN:
def __init__(self):
self.left_arm = moveit_commander.MoveGroupCommander("left_arm")
self.left_hand = moveit_commander.MoveGroupCommander("left_hand")
def setGrasp(self, state):
if state == "pre-pinch":
vec = [ 0.3, 0, 1.57, 0, # index
-0.1, 0, 1.57, 0, # middle
0, 0, 0, # ring
0, 0, 0, # pinkie
0, 1.1, 0, 0] # thumb
elif state == "pinch":
vec = [ 0, 0, 1.57, 0,
0, 0, 1.57, 0,
0, 0, 0,
0, 0, 0,
0, 1.1, 0, 0]
elif state == "open":
vec = [0] * 18
else:
raise ValueError("unknown hand state: %s" % state)
self.left_hand.set_joint_value_target(vec)
self.left_hand.go(True)
def setPose(self, x, y, z, phi, theta, psi):
orient = \
Quaternion(*tf.transformations.quaternion_from_euler(phi, theta, psi))
pose = Pose(Point(x, y, z), orient)
self.left_arm.set_pose_target(pose)
self.left_arm.go(True)
def setSquare(self, square, height_above_board):
if len(square) != 2 or not square[1].isdigit():
raise ValueError(
"expected a chess rank and file like 'b3' but found %s instead" %
square)
print "going to %s" % square
rank_y = -0.24 - 0.05 * int(square[1])
file_x = 0.5 - 0.05 * (ord(square[0]) - ord('a'))
z = float(height_above_board) + 1.0
self.setPose(file_x, rank_y, z, 3.14, 0.3, -1.57)
def playGame(self, pgn_filename):
game = pgn.loads(open(pgn_filename).read())[0]
self.setGrasp("pre-pinch")
self.setSquare("a1", 0.15)
for move in game.moves:
self.setSquare(move[0:2], 0.10)
self.setSquare(move[0:2], 0.015)
self.setGrasp("pinch")
self.setSquare(move[0:2], 0.10)
self.setSquare(move[2:4], 0.10)
self.setSquare(move[2:4], 0.015)
self.setGrasp("pre-pinch")
self.setSquare(move[2:4], 0.10)
if __name__ == '__main__':
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('r2_chess_pgn',anonymous=True)
argv = rospy.myargv(argv=sys.argv) # filter out any arguments used by ROS
if len(argv) != 2:
print "usage: r2_chess_pgn.py PGNFILE" | moveit_commander.roscpp_shutdown() | sys.exit(1)
print "playing %s" % argv[1]
r2pgn = R2ChessboardPGN()
r2pgn.playGame(argv[1]) |
serial.rs | use crate::parallel::Reducer;
#[cfg(not(feature = "parallel"))]
pub fn join<O1: Send, O2: Send>(left: impl FnOnce() -> O1 + Send, right: impl FnOnce() -> O2 + Send) -> (O1, O2) |
pub fn in_parallel<I, S, O, R>(
input: impl Iterator<Item = I> + Send,
_thread_limit: Option<usize>,
new_thread_state: impl Fn(usize) -> S + Send + Sync,
consume: impl Fn(I, &mut S) -> O + Send + Sync,
mut reducer: R,
) -> Result<<R as Reducer>::Output, <R as Reducer>::Error>
where
R: Reducer<Input = O>,
I: Send,
O: Send,
{
let mut state = new_thread_state(0);
for item in input {
reducer.feed(consume(item, &mut state))?;
}
reducer.finalize()
}
| {
(left(), right())
} |
statefulset_adapter.go | /*
Copyright 2020 The OpenYurt Authors.
Copyright 2019 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@CHANGELOG
OpenYurt Authors:
change statefulset adapter
*/
package adapter
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
alpha1 "github.com/alibaba/openyurt/pkg/yurtappmanager/apis/apps/v1alpha1"
yurtctlutil "github.com/alibaba/openyurt/pkg/yurtappmanager/controller/util" |
type StatefulSetAdapter struct {
client.Client
Scheme *runtime.Scheme
}
var _ Adapter = &StatefulSetAdapter{}
// NewResourceObject creates a empty StatefulSet object.
func (a *StatefulSetAdapter) NewResourceObject() runtime.Object {
return &appsv1.StatefulSet{}
}
// NewResourceListObject creates a empty StatefulSetList object.
func (a *StatefulSetAdapter) NewResourceListObject() runtime.Object {
return &appsv1.StatefulSetList{}
}
// GetStatusObservedGeneration returns the observed generation of the pool.
func (a *StatefulSetAdapter) GetStatusObservedGeneration(obj metav1.Object) int64 {
return obj.(*appsv1.StatefulSet).Status.ObservedGeneration
}
// GetDetails returns the replicas detail the pool needs.
func (a *StatefulSetAdapter) GetDetails(obj metav1.Object) (ReplicasInfo, error) {
set := obj.(*appsv1.StatefulSet)
var specReplicas int32
if set.Spec.Replicas != nil {
specReplicas = *set.Spec.Replicas
}
replicasInfo := ReplicasInfo{
Replicas: specReplicas,
ReadyReplicas: set.Status.ReadyReplicas,
}
return replicasInfo, nil
}
// GetPoolFailure returns the failure information of the pool.
// StatefulSet has no condition.
func (a *StatefulSetAdapter) GetPoolFailure() *string {
return nil
}
// ApplyPoolTemplate updates the pool to the latest revision, depending on the StatefulSetTemplate.
func (a *StatefulSetAdapter) ApplyPoolTemplate(ud *alpha1.UnitedDeployment, poolName, revision string,
replicas int32, obj runtime.Object, config map[string]string) error {
set := obj.(*appsv1.StatefulSet)
var poolConfig *alpha1.Pool
for _, pool := range ud.Spec.Topology.Pools {
if pool.Name == poolName {
poolConfig = &pool
break
}
}
if poolConfig == nil {
return fmt.Errorf("fail to find pool config %s", poolName)
}
set.Namespace = ud.Namespace
if set.Labels == nil {
set.Labels = map[string]string{}
}
for k, v := range ud.Spec.WorkloadTemplate.StatefulSetTemplate.Labels {
set.Labels[k] = v
}
for k, v := range ud.Spec.Selector.MatchLabels {
set.Labels[k] = v
}
set.Labels[alpha1.ControllerRevisionHashLabelKey] = revision
// record the pool name as a label
set.Labels[alpha1.PoolNameLabelKey] = poolName
if set.Annotations == nil {
set.Annotations = map[string]string{}
}
for k, v := range ud.Spec.WorkloadTemplate.StatefulSetTemplate.Annotations {
set.Annotations[k] = v
}
set.GenerateName = getPoolPrefix(ud.Name, poolName)
selectors := ud.Spec.Selector.DeepCopy()
selectors.MatchLabels[alpha1.PoolNameLabelKey] = poolName
if err := controllerutil.SetControllerReference(ud, set, a.Scheme); err != nil {
return err
}
set.Spec.Selector = selectors
set.Spec.Replicas = &replicas
set.Spec.UpdateStrategy = *ud.Spec.WorkloadTemplate.StatefulSetTemplate.Spec.UpdateStrategy.DeepCopy()
set.Spec.Template = *ud.Spec.WorkloadTemplate.StatefulSetTemplate.Spec.Template.DeepCopy()
if set.Spec.Template.Labels == nil {
set.Spec.Template.Labels = map[string]string{}
}
set.Spec.Template.Labels[alpha1.PoolNameLabelKey] = poolName
set.Spec.Template.Labels[alpha1.ControllerRevisionHashLabelKey] = revision
set.Spec.RevisionHistoryLimit = ud.Spec.RevisionHistoryLimit
set.Spec.PodManagementPolicy = ud.Spec.WorkloadTemplate.StatefulSetTemplate.Spec.PodManagementPolicy
set.Spec.ServiceName = ud.Spec.WorkloadTemplate.StatefulSetTemplate.Spec.ServiceName
set.Spec.VolumeClaimTemplates = ud.Spec.WorkloadTemplate.StatefulSetTemplate.Spec.VolumeClaimTemplates
attachNodeAffinityAndTolerations(&set.Spec.Template.Spec, poolConfig)
return nil
}
// PostUpdate does some works after pool updated. StatefulSet will implement this method to clean stuck pods.
func (a *StatefulSetAdapter) PostUpdate(ud *alpha1.UnitedDeployment, obj runtime.Object, revision string) error {
/*
if strategy == nil {
return nil
}
set := obj.(*appsv1.StatefulSet)
if set.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType {
return nil
}
// If RollingUpdate, work around for issue https://github.com/kubernetes/kubernetes/issues/67250
return a.deleteStuckPods(set, revision, strategy.GetPartition())
*/
return nil
}
// IsExpected checks the pool is the expected revision or not.
// The revision label can tell the current pool revision.
func (a *StatefulSetAdapter) IsExpected(obj metav1.Object, revision string) bool {
return obj.GetLabels()[alpha1.ControllerRevisionHashLabelKey] != revision
}
func (a *StatefulSetAdapter) getStatefulSetPods(set *appsv1.StatefulSet) ([]*corev1.Pod, error) {
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
return nil, err
}
podList := &corev1.PodList{}
err = a.Client.List(context.TODO(), podList, &client.ListOptions{LabelSelector: selector})
if err != nil {
return nil, err
}
manager, err := refmanager.New(a.Client, set.Spec.Selector, set, a.Scheme)
if err != nil {
return nil, err
}
selected := make([]metav1.Object, len(podList.Items))
for i, pod := range podList.Items {
selected[i] = pod.DeepCopy()
}
claimed, err := manager.ClaimOwnedObjects(selected)
if err != nil {
return nil, err
}
claimedPods := make([]*corev1.Pod, len(claimed))
for i, pod := range claimed {
claimedPods[i] = pod.(*corev1.Pod)
}
return claimedPods, nil
}
// deleteStucckPods tries to work around the blocking issue https://github.com/kubernetes/kubernetes/issues/67250
func (a *StatefulSetAdapter) deleteStuckPods(set *appsv1.StatefulSet, revision string, partition int32) error {
pods, err := a.getStatefulSetPods(set)
if err != nil {
return err
}
for i := range pods {
pod := pods[i]
// If the pod is considered as stuck, delete it.
if isPodStuckForRollingUpdate(pod, revision, partition) {
klog.V(2).Infof("Delete pod %s/%s at stuck state", pod.Namespace, pod.Name)
err = a.Delete(context.TODO(), pod, client.PropagationPolicy(metav1.DeletePropagationBackground))
if err != nil {
return err
}
}
}
return nil
}
// isPodStuckForRollingUpdate checks whether the pod is stuck under strategy RollingUpdate.
// If a pod needs to upgrade (pod_ordinal >= partition && pod_revision != sts_revision)
// and its readiness is false, or worse status like Pending, ImagePullBackOff, it will be blocked.
func isPodStuckForRollingUpdate(pod *corev1.Pod, revision string, partition int32) bool {
if yurtctlutil.GetOrdinal(pod) < partition {
return false
}
if getRevision(pod) == revision {
return false
}
return !podutil.IsPodReadyConditionTrue(pod.Status)
} | "github.com/alibaba/openyurt/pkg/yurtappmanager/util/refmanager"
) |
get_run_resources_responses.go | // Copyright 2018-2021 Polyaxon, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package runs_v1
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/polyaxon/sdks/go/http_client/v1/service_model"
)
// GetRunResourcesReader is a Reader for the GetRunResources structure.
type GetRunResourcesReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetRunResourcesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetRunResourcesOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 204:
result := NewGetRunResourcesNoContent()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewGetRunResourcesForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewGetRunResourcesNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
result := NewGetRunResourcesDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGetRunResourcesOK creates a GetRunResourcesOK with default headers values
func NewGetRunResourcesOK() *GetRunResourcesOK {
return &GetRunResourcesOK{}
}
/* GetRunResourcesOK describes a response with status code 200, with default header values.
A successful response.
*/
type GetRunResourcesOK struct {
Payload *service_model.V1EventsResponse
}
func (o *GetRunResourcesOK) Error() string {
return fmt.Sprintf("[GET /streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/resources][%d] getRunResourcesOK %+v", 200, o.Payload)
}
func (o *GetRunResourcesOK) GetPayload() *service_model.V1EventsResponse {
return o.Payload
}
func (o *GetRunResourcesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(service_model.V1EventsResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetRunResourcesNoContent creates a GetRunResourcesNoContent with default headers values
func | () *GetRunResourcesNoContent {
return &GetRunResourcesNoContent{}
}
/* GetRunResourcesNoContent describes a response with status code 204, with default header values.
No content.
*/
type GetRunResourcesNoContent struct {
Payload interface{}
}
func (o *GetRunResourcesNoContent) Error() string {
return fmt.Sprintf("[GET /streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/resources][%d] getRunResourcesNoContent %+v", 204, o.Payload)
}
func (o *GetRunResourcesNoContent) GetPayload() interface{} {
return o.Payload
}
func (o *GetRunResourcesNoContent) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetRunResourcesForbidden creates a GetRunResourcesForbidden with default headers values
func NewGetRunResourcesForbidden() *GetRunResourcesForbidden {
return &GetRunResourcesForbidden{}
}
/* GetRunResourcesForbidden describes a response with status code 403, with default header values.
You don't have permission to access the resource.
*/
type GetRunResourcesForbidden struct {
Payload interface{}
}
func (o *GetRunResourcesForbidden) Error() string {
return fmt.Sprintf("[GET /streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/resources][%d] getRunResourcesForbidden %+v", 403, o.Payload)
}
func (o *GetRunResourcesForbidden) GetPayload() interface{} {
return o.Payload
}
func (o *GetRunResourcesForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetRunResourcesNotFound creates a GetRunResourcesNotFound with default headers values
func NewGetRunResourcesNotFound() *GetRunResourcesNotFound {
return &GetRunResourcesNotFound{}
}
/* GetRunResourcesNotFound describes a response with status code 404, with default header values.
Resource does not exist.
*/
type GetRunResourcesNotFound struct {
Payload interface{}
}
func (o *GetRunResourcesNotFound) Error() string {
return fmt.Sprintf("[GET /streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/resources][%d] getRunResourcesNotFound %+v", 404, o.Payload)
}
func (o *GetRunResourcesNotFound) GetPayload() interface{} {
return o.Payload
}
func (o *GetRunResourcesNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetRunResourcesDefault creates a GetRunResourcesDefault with default headers values
func NewGetRunResourcesDefault(code int) *GetRunResourcesDefault {
return &GetRunResourcesDefault{
_statusCode: code,
}
}
/* GetRunResourcesDefault describes a response with status code -1, with default header values.
An unexpected error response.
*/
type GetRunResourcesDefault struct {
_statusCode int
Payload *service_model.RuntimeError
}
// Code gets the status code for the get run resources default response
func (o *GetRunResourcesDefault) Code() int {
return o._statusCode
}
func (o *GetRunResourcesDefault) Error() string {
return fmt.Sprintf("[GET /streams/v1/{namespace}/{owner}/{project}/runs/{uuid}/resources][%d] GetRunResources default %+v", o._statusCode, o.Payload)
}
func (o *GetRunResourcesDefault) GetPayload() *service_model.RuntimeError {
return o.Payload
}
func (o *GetRunResourcesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(service_model.RuntimeError)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| NewGetRunResourcesNoContent |
main.go | package main
func | () {
println("hello world tree")
}
| main |
gridmovers.py | #----------------------------------------------------------------------------
# Name: GridColMover.py
# Purpose: Grid Column Mover Extension
#
# Author: Gerrit van Dyk (email: [email protected])
#
# Version 0.1
# Date: Nov 19, 2002
# RCS-ID: $Id$
# Licence: wxWindows license
#----------------------------------------------------------------------------
# 12/07/2003 - Jeff Grimmett ([email protected])
#
# o 2.5 Compatability changes
#
# 12/18/2003 - Jeff Grimmett ([email protected])
#
# o wxGridColMoveEvent -> GridColMoveEvent
# o wxGridRowMoveEvent -> GridRowMoveEvent
# o wxGridColMover -> GridColMover
# o wxGridRowMover -> GridRowMover
#
import wx
import wx.grid
#----------------------------------------------------------------------------
# event class and macros
#
# New style 12/7/03
#
wxEVT_COMMAND_GRID_COL_MOVE = wx.NewEventType()
wxEVT_COMMAND_GRID_ROW_MOVE = wx.NewEventType()
EVT_GRID_COL_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_COL_MOVE, 1)
EVT_GRID_ROW_MOVE = wx.PyEventBinder(wxEVT_COMMAND_GRID_ROW_MOVE, 1)
#----------------------------------------------------------------------------
class GridColMoveEvent(wx.PyCommandEvent):
def __init__(self, id, dCol, bCol):
wx.PyCommandEvent.__init__(self, id = id)
self.SetEventType(wxEVT_COMMAND_GRID_COL_MOVE)
self.moveColumn = dCol
self.beforeColumn = bCol
def GetMoveColumn(self):
return self.moveColumn
def GetBeforeColumn(self):
return self.beforeColumn
class GridRowMoveEvent(wx.PyCommandEvent):
def __init__(self, id, dRow, bRow):
wx.PyCommandEvent.__init__(self,id = id)
self.SetEventType(wxEVT_COMMAND_GRID_ROW_MOVE)
self.moveRow = dRow
self.beforeRow = bRow
def GetMoveRow(self):
return self.moveRow
def GetBeforeRow(self):
return self.beforeRow
#----------------------------------------------------------------------------
# graft new methods into the wxGrid class
def _ColToRect(self,col):
if self.GetNumberRows() > 0:
rect = self.CellToRect(0,col)
else:
rect = wx.Rect()
rect.height = self.GetColLabelSize()
rect.width = self.GetColSize(col)
for cCol in range(0,col):
rect.x += self.GetColSize(cCol)
rect.y = self.GetGridColLabelWindow().GetPosition()[1]
return rect
wx.grid.Grid.ColToRect = _ColToRect
def _RowToRect(self,row):
if self.GetNumberCols() > 0:
rect = self.CellToRect(row,0)
else:
rect = wx.Rect()
rect.width = self.GetRowLabelSize()
rect.height = self.GetRowSize(row)
for cRow in range(0,row):
rect.y += self.GetRowSize(cRow)
rect.x = self.GetGridRowLabelWindow().GetPosition()[0]
return rect
wx.grid.Grid.RowToRect = _RowToRect
#----------------------------------------------------------------------------
class ColDragWindow(wx.Window):
def __init__(self,parent,image,dragCol):
wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER)
self.image = image
self.SetSize((self.image.GetWidth(),self.image.GetHeight()))
self.ux = parent.GetScrollPixelsPerUnit()[0]
self.moveColumn = dragCol
self.Bind(wx.EVT_PAINT, self.OnPaint)
def DisplayAt(self,pos,y):
x = self.GetPositionTuple()[0]
if x == pos:
self.Refresh() # Need to display insertion point
else:
self.MoveXY(pos,y)
def GetMoveColumn(self):
return self.moveColumn
def _GetInsertionInfo(self):
parent = self.GetParent()
sx = parent.GetViewStart()[0] * self.ux
sx -= parent.GetRowLabelSize()
x = self.GetPosition()[0]
w = self.GetSize()[0]
sCol = parent.XToCol(x + sx)
eCol = parent.XToCol(x + w + sx)
iPos = xPos = xCol = 99999
centerPos = x + sx + (w / 2)
for col in range(sCol,eCol + 1):
cx = parent.ColToRect(col)[0]
if abs(cx - centerPos) < iPos:
iPos = abs(cx - centerPos)
xCol = col
xPos = cx
if xCol < 0 or xCol > parent.GetNumberCols():
xCol = parent.GetNumberCols()
return (xPos - sx - x,xCol)
def GetInsertionColumn(self):
return self._GetInsertionInfo()[1]
def GetInsertionPos(self):
return self._GetInsertionInfo()[0]
def OnPaint(self,evt):
dc = wx.PaintDC(self)
w,h = self.GetSize()
dc.DrawBitmap(self.image, 0,0)
dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(0,0, w,h)
iPos = self.GetInsertionPos()
dc.DrawLine(iPos,h - 10, iPos,h)
class RowDragWindow(wx.Window):
def __init__(self,parent,image,dragRow):
wx.Window.__init__(self,parent,-1, style=wx.SIMPLE_BORDER)
self.image = image
self.SetSize((self.image.GetWidth(),self.image.GetHeight()))
self.uy = parent.GetScrollPixelsPerUnit()[1]
self.moveRow = dragRow
self.Bind(wx.EVT_PAINT, self.OnPaint)
def DisplayAt(self,x,pos):
y = self.GetPosition()[1]
if y == pos:
self.Refresh() # Need to display insertion point
else:
self.MoveXY(x,pos)
def GetMoveRow(self):
return self.moveRow
def _GetInsertionInfo(self):
parent = self.GetParent()
sy = parent.GetViewStart()[1] * self.uy
sy -= parent.GetColLabelSize()
y = self.GetPosition()[1]
h = self.GetSize()[1]
sRow = parent.YToRow(y + sy)
eRow = parent.YToRow(y + h + sy)
iPos = yPos = yRow = 99999
centerPos = y + sy + (h / 2)
for row in range(sRow,eRow + 1):
cy = parent.RowToRect(row)[1]
if abs(cy - centerPos) < iPos:
iPos = abs(cy - centerPos)
yRow = row
yPos = cy
if yRow < 0 or yRow > parent.GetNumberRows():
yRow = parent.GetNumberRows()
return (yPos - sy - y,yRow)
def GetInsertionRow(self):
return self._GetInsertionInfo()[1]
def GetInsertionPos(self):
return self._GetInsertionInfo()[0]
def OnPaint(self,evt):
dc = wx.PaintDC(self)
w,h = self.GetSize()
dc.DrawBitmap(self.image, 0,0)
dc.SetPen(wx.Pen(wx.BLACK,1,wx.SOLID))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(0,0, w,h)
iPos = self.GetInsertionPos()
dc.DrawLine(w - 10,iPos, w,iPos)
#----------------------------------------------------------------------------
class GridColMover(wx.EvtHandler):
def __init__(self,grid):
wx.EvtHandler.__init__(self)
self.grid = grid
self.lwin = grid.GetGridColLabelWindow()
self.lwin.PushEventHandler(self)
self.colWin = None
self.ux = self.grid.GetScrollPixelsPerUnit()[0]
self.startX = -10
self.cellX = 0
self.didMove = False
self.isDragging = False
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEFT_DOWN, self.OnPress)
self.Bind(wx.EVT_LEFT_UP, self.OnRelease)
def OnMouseMove(self,evt):
if not self.isDragging:
evt.Skip()
else:
_rlSize = self.grid.GetRowLabelSize()
if abs(self.startX - evt.X) >= 3 \
and abs(evt.X - self.lastX) >= 3:
self.lastX = evt.X
self.didMove = True
sx,y = self.grid.GetViewStart()
w,h = self.lwin.GetClientSize()
x = sx * self.ux
if (evt.X + x) < x:
x = evt.X + x
elif evt.X > w:
x += evt.X - w
if x < 1: x = 0
else: x /= self.ux
if x != sx:
if wx.Platform == '__WXMSW__':
self.colWin.Show(False)
self.grid.Scroll(x,y)
x,y = self.lwin.ClientToScreenXY(evt.X,0)
x,y = self.grid.ScreenToClientXY(x,y)
if not self.colWin.IsShown():
self.colWin.Show(True)
px = x - self.cellX
if px < 0 + _rlSize: px = 0 + _rlSize
if px > w - self.colWin.GetSize()[0] + _rlSize:
px = w - self.colWin.GetSize()[0] + _rlSize
self.colWin.DisplayAt(px,y)
return
def OnPress(self,evt):
self.startX = self.lastX = evt.X
_rlSize = self.grid.GetRowLabelSize()
sx = self.grid.GetViewStart()[0] * self.ux
sx -= _rlSize
px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y)
px,py = self.grid.ScreenToClientXY(px,py)
if self.grid.XToEdgeOfCol(px + sx) != wx.NOT_FOUND:
evt.Skip()
return
self.isDragging = True
self.didMove = False
col = self.grid.XToCol(px + sx)
rect = self.grid.ColToRect(col)
self.cellX = px + sx - rect.x
size = self.lwin.GetSize()
rect.y = 0
rect.x -= sx + _rlSize
rect.height = size[1]
colImg = self._CaptureImage(rect)
self.colWin = ColDragWindow(self.grid,colImg,col)
self.colWin.Show(False)
self.lwin.CaptureMouse()
evt.Skip()
def OnRelease(self,evt):
if self.isDragging:
self.lwin.ReleaseMouse()
self.colWin.Show(False)
self.isDragging = False
if not self.didMove:
px = self.lwin.ClientToScreenXY(self.startX,0)[0]
px = self.grid.ScreenToClientXY(px,0)[0]
sx = self.grid.GetViewStart()[0] * self.ux
sx -= self.grid.GetRowLabelSize()
col = self.grid.XToCol(px+sx)
if col != wx.NOT_FOUND:
self.grid.SelectCol(col,evt.ControlDown())
return
else:
bCol = self.colWin.GetInsertionColumn()
dCol = self.colWin.GetMoveColumn()
wx.PostEvent(self,
GridColMoveEvent(self.grid.GetId(), dCol, bCol))
self.colWin.Destroy()
evt.Skip()
def _CaptureImage(self,rect):
bmp = wx.EmptyBitmap(rect.width,rect.height)
memdc = wx.MemoryDC()
memdc.SelectObject(bmp)
dc = wx.WindowDC(self.lwin)
memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y)
memdc.SelectObject(wx.NullBitmap)
return bmp
class GridRowMover(wx.EvtHandler):
def __init__(self,grid):
wx.EvtHandler.__init__(self)
self.grid = grid
self.lwin = grid.GetGridRowLabelWindow()
self.lwin.PushEventHandler(self)
self.rowWin = None
self.uy = self.grid.GetScrollPixelsPerUnit()[1]
self.startY = -10
self.cellY = 0
self.didMove = False
self.isDragging = False
self.Bind(wx.EVT_MOTION, self.OnMouseMove)
self.Bind(wx.EVT_LEFT_DOWN, self.OnPress)
self.Bind(wx.EVT_LEFT_UP, self.OnRelease)
def OnMouseMove(self,evt):
if not self.isDragging:
evt.Skip()
else:
_clSize = self.grid.GetColLabelSize()
if abs(self.startY - evt.Y) >= 3 \
and abs(evt.Y - self.lastY) >= 3:
self.lastY = evt.Y
self.didMove = True
x,sy = self.grid.GetViewStart()
w,h = self.lwin.GetClientSizeTuple()
y = sy * self.uy
if (evt.Y + y) < y:
y = evt.Y + y
elif evt.Y > h:
y += evt.Y - h
if y < 1:
y = 0
else:
y /= self.uy
if y != sy:
if wx.Platform == '__WXMSW__':
self.rowWin.Show(False)
self.grid.Scroll(x,y)
x,y = self.lwin.ClientToScreenXY(0,evt.Y)
x,y = self.grid.ScreenToClientXY(x,y)
if not self.rowWin.IsShown():
self.rowWin.Show(True)
py = y - self.cellY
if py < 0 + _clSize:
py = 0 + _clSize
if py > h - self.rowWin.GetSize()[1] + _clSize:
py = h - self.rowWin.GetSize()[1] + _clSize
self.rowWin.DisplayAt(x,py)
return
def OnPress(self,evt):
self.startY = self.lastY = evt.Y
_clSize = self.grid.GetColLabelSize()
sy = self.grid.GetViewStart()[1] * self.uy
sy -= _clSize
px,py = self.lwin.ClientToScreenXY(evt.X,evt.Y)
px,py = self.grid.ScreenToClientXY(px,py)
| row = self.grid.YToRow(py + sy)
if row == wx.NOT_FOUND:
evt.Skip()
return
self.isDragging = True
self.didMove = False
rect = self.grid.RowToRect(row)
self.cellY = py + sy - rect.y
size = self.lwin.GetSize()
rect.x = 0
rect.y -= sy + _clSize
rect.width = size[0]
rowImg = self._CaptureImage(rect)
self.rowWin = RowDragWindow(self.grid,rowImg,row)
self.rowWin.Show(False)
self.lwin.CaptureMouse()
evt.Skip()
def OnRelease(self,evt):
if self.isDragging:
self.lwin.ReleaseMouse()
self.rowWin.Show(False)
self.isDragging = False
if not self.didMove:
py = self.lwin.ClientToScreenXY(0,self.startY)[1]
py = self.grid.ScreenToClientXY(0,py)[1]
sy = self.grid.GetViewStart()[1] * self.uy
sy -= self.grid.GetColLabelSize()
row = self.grid.YToRow(py + sy)
if row != wx.NOT_FOUND:
self.grid.SelectRow(row,evt.ControlDown())
return
else:
bRow = self.rowWin.GetInsertionRow()
dRow = self.rowWin.GetMoveRow()
wx.PostEvent(self,
GridRowMoveEvent(self.grid.GetId(), dRow, bRow))
self.rowWin.Destroy()
evt.Skip()
def _CaptureImage(self,rect):
bmp = wx.EmptyBitmap(rect.width,rect.height)
memdc = wx.MemoryDC()
memdc.SelectObject(bmp)
dc = wx.WindowDC(self.lwin)
memdc.Blit(0,0, rect.width, rect.height, dc, rect.x, rect.y)
memdc.SelectObject(wx.NullBitmap)
return bmp
#---------------------------------------------------------------------------- | if self.grid.YToEdgeOfRow(py + sy) != wx.NOT_FOUND:
evt.Skip()
return
|
KatanaOfFire.js | const DrawCard = require('../../drawcard.js');
class | extends DrawCard {
setupCardAbilities(ability) {
this.whileAttached({
effect: ability.effects.modifyMilitarySkill(() => this.totalKatanaModifier())
});
}
canPlay(context) {
if(!this.controller.cardsInPlay.any(card => card.getType() === 'character' && card.hasTrait('shugenja'))) {
return false;
}
return super.canPlay(context);
}
// Helper methods for clarity
controllerHasFireRing() {
return this.game.rings.fire.isConsideredClaimed(this.controller);
}
numberOfFireCards() {
return this.controller.getNumberOfCardsInPlay(card => card.hasTrait('fire'));
}
totalKatanaModifier() {
var skillModifier = this.controllerHasFireRing() ? 2 : 0;
skillModifier += this.numberOfFireCards();
return skillModifier;
}
}
KatanaOfFire.id = 'katana-of-fire';
module.exports = KatanaOfFire;
| KatanaOfFire |
chromedb.go | // Copyright 2020 Michael J. Fromberger. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package chromedb supports reading and modifying a Chrome cookies database.
package chromedb
import (
"encoding/hex"
"fmt"
"runtime"
"time"
"crawshaw.io/sqlite"
"github.com/creachadair/cookies"
)
const (
readCookiesStmt = `
SELECT
rowid, name, value, encrypted_value, host_key, path,
expires_utc, creation_utc,
is_secure, is_httponly, samesite
FROM cookies;`
writeCookieStmt = `
UPDATE cookies SET
name = $name,
%[1]s = %[2]s,
host_key = $host,
path = $path,
expires_utc = $expires,
creation_utc = $created,
is_secure = $secure,
is_httponly = $httponly,
samesite = $samesite
WHERE rowid = $rowid;`
dropCookieStmt = `DELETE FROM cookies WHERE rowid = $rowid;`
// The Chrome timestamp epoch in seconds, 1601-01-01T00:00:00Z.
chromeEpoch = 11644473600
)
// Open opens the Chrome cookie database at the specified path.
func Open(path string, opts *Options) (*Store, error) {
conn, err := sqlite.OpenConn(path, sqlite.SQLITE_OPEN_READWRITE)
if err != nil {
return nil, err
}
return &Store{
conn: conn,
key: opts.encryptionKey(),
}, nil
}
// Options provide optional settings for opening a Chrome cookie database.
// A nil *Options is ready for use, and provides empty values.
type Options struct {
Passphrase string // the passphrase for encrypted values
// The number of PBKDF2 iterations to use when converting the passphrase
// into an encryption key. If ≤ 0, use a default based on runtime.GOOS.
Iterations int
}
// encryptionKey returns the encryption key generated from o, or nil.
func (o *Options) encryptionKey() []byte {
if o == nil || o.Passphrase == "" {
return nil
}
iter := o.Iterations
if iter <= 0 {
switch runtime.GOOS {
case "darwin":
iter = 1003
default:
iter = 1
}
}
return encryptionKey(o.Passphrase, iter)
}
// A Store connects to a collection of cookies stored in an SQLite database
// using the Google Chrome cookie schema.
type Store struct {
conn *sqlite.Conn
key []byte // encryption key, or nil
}
// Scan satisfies part of the cookies.Store interface.
func (s *Store) Scan(f cookies.ScanFunc) (err error) {
cs, err := s.readCookies()
if err != nil {
return err
}
defer s.begin(&err)()
for _, c := range cs {
act, err := f(c)
if err != nil {
return err
}
switch act {
case cookies.Keep:
continue
case cookies.Update:
if err := s.writeCookie(c); err != nil {
return err
}
case cookies.Discard:
if err := s.dropCookie(c); err != nil {
return err
}
default:
return fmt.Errorf("unknown action %v", act)
}
}
return nil
}
// Commit satisfies part of the cookies.Store interface.
// In this implementation it is a no-op without error.
func (s *Store) Commit() error { return nil }
// readCookies reads all the cookies in the database.
func (s *Store) readCookies() ([]*Cookie, error) {
stmt, err := s.conn.Prepare(readCookiesStmt)
if err != nil {
return nil, err
}
stmt.Reset()
var cs []*Cookie
for {
ok, err := stmt.Step()
if err != nil {
return nil, err
} else if !ok {
break
}
value := stmt.GetText("value")
// If the value is empty, check for an encrypted value.
if value == "" && stmt.GetLen("encrypted_value") != 0 {
// If we don't have an encryption key, mark the value.
if len(s.key) == 0 {
value = "[ENCRYPTED]"
} else {
buf := make([]byte, stmt.GetLen("encrypted_value"))
stmt.GetBytes("encrypted_value", buf)
dec, err := decryptValue(s.key, buf)
if err != nil {
return nil, fmt.Errorf("decrypting value: %w", err)
}
value = string(dec)
}
}
cs = append(cs, &Cookie{
C: cookies.C{
Name: stmt.GetText("name"),
Value: value,
Domain: stmt.GetText("host_key"),
Path: stmt.GetText("path"),
Expires: timestampToTime(stmt.GetInt64("expires_utc")),
Created: timestampToTime(stmt.GetInt64("creation_utc")),
Flags: cookies.Flags{
Secure: stmt.GetInt64("is_secure") != 0,
HTTPOnly: stmt.GetInt64("is_httponly") != 0,
},
SameSite: decodeSitePolicy(stmt.GetInt64("samesite")),
},
rowID: stmt.GetInt64("rowid"),
})
}
return cs, nil
}
// begin begins a transaction and returns a function to finish that
// transaction. If *err == nil, the transaction is committed; otherwise the
// transaction is rolled back.
func (s *Store) begin(err *error) func() {
stmt := s.conn.Prep("BEGIN TRANSACTION;")
stmt.Step()
return func() {
if *err == nil {
s.conn.Prep("COMMIT;").Step()
} else {
s.conn.Prep("ROLLBACK;").Step()
}
}
}
// dropCookie deletes c from the database.
func (s *Store) dropCookie(c *Cookie) error {
stmt, err := s.conn.Prepare(dropCookieStmt)
if err != nil {
return err
}
stmt.Reset()
stmt.SetInt64("$rowid", c.rowID)
_, err = stmt.Step()
return err
}
// hexString encodes a binary blob as a SQL hex string literal, X'....'.
func hexString(data []byte) string { return `X'` + hex.EncodeToString(data) + `'` }
// writeCookie writes the current state of c to the store.
func (s *Store) writeCookie(c *Cookie) error {
var query string
if len(s.key) == 0 {
query = fmt.Sprintf(writeCookieStmt, "value", "$value")
} else if enc, err := encryptValue(s.key, []byte(c.Value)); err != nil {
return fmt.Errorf("encrypting value: %w", err)
} else {
query = fmt.Sprintf(writeCookieStmt, "encrypted_value", hexString(enc))
}
stmt, err := s.conn.Prepare(query)
if err != nil {
return err
}
stmt.Reset()
stmt.SetInt64("$rowid", c.rowID)
stmt.SetText("$name", c.Name)
stmt.SetText("$host", c.Domain)
stmt.SetText("$path", c.Path)
stmt.SetInt64("$expires", timeToTimestamp(c.Expires))
stmt.SetInt64("$created", timeToTimestamp(c.Created))
stmt.SetInt64("$secure", boolToInt(c.Flags.Secure))
stmt.SetInt64("$httponly", boolToInt(c.Flags.HTTPOnly))
stmt.SetInt64("$samesite", encodeSitePolicy(c.SameSite))
if len(s.key) == 0 {
stmt.SetText("$value", c.Value)
}
_, err = stmt.Step()
return err
}
// A Cookie represents a single cookie from a Chrome database.
//
// Values are automatically encrypted and decrypted if the store has an
// encryption key. If no decryption key is provided, encrypted values are
// represented by a Value with string "[ENCRYPTED]"; if an invalid decryption
// key is given, an error is reported.
type Cookie struct {
cookies.C
rowID int64
}
// Get satisfies part of the cookies.Editor interface.
func (c *Cookie) Get() cookies.C { return c.C }
// Set satisfies part of the cookies.Editor interface.
func (c *Cookie) Set(o cookies.C) error { c.C = o; return nil }
// decodeSitePolicy maps a Chrome SameSite policy to the generic enum.
func decodeSitePolicy(v int64) cookies.SameSite {
switch v {
case 0:
return cookies.None
case 1:
return cookies.Lax
case 2:
return cookies.Strict
default:
return cookies.Unknown
}
}
// encodeSitePoicy maps a generic SameSite policy to the Chrome enum.
func en | cookies.SameSite) int64 {
switch p {
case cookies.None:
return 0
case cookies.Lax:
return 1
case cookies.Strict:
return 2
default:
return -1 // unspecified
}
}
// timestampToTime converts a value in microseconds sincde the Chrome epoch to
// a time in UTC.
func timestampToTime(usec int64) time.Time {
sec := usec/1e6 - chromeEpoch
nano := (usec % 1e6) * 1000
return time.Unix(sec, nano).In(time.UTC)
}
// timeToTimestamp conversts a time value to microseconds since the Chrome epoch.
func timeToTimestamp(t time.Time) int64 {
sec := t.Unix() + chromeEpoch
usec := int64(t.Nanosecond()) / 1000
return sec*1e6 + usec
}
// boolToInt converts a bool to an int64 for storage in SQLite.
func boolToInt(v bool) int64 {
if v {
return 1
}
return 0
}
| codeSitePolicy(p |
84.189b9de2.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[84],{595:function(s,t,a){"use strict";a.r(t);var n=a(3),r=Object(n.a)({},(function(){var s=this,t=s.$createElement,a=s._self._c||t;return a("ContentSlotsDistributor",{attrs:{"slot-key":s.$parent.slotKey}},[a("div",{staticClass:"language-js line-numbers-mode"},[a("pre",{pre:!0,attrs:{class:"language-js"}},[a("code",[a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 联合类型和类型保护")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("interface")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Dog")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n fly"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" Boolean\n "),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("bark")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("interface")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Bird")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n fly"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" Boolean\n "),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("sing")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("trainAnial")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("animal"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" Dog "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("|")]),s._v(" Bird")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 使用 '|' 操作符表示联合类型")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("if")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("animal"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("fly"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("animal "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("as")]),s._v(" Bird"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("sing")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 使用 'as' 进行类型断言")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("else")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("animal "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("as")]),s._v(" Dog"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("bark")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 使用 'as' 进行类型断言")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n除了上面使用 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'as'")]),s._v(" 类型断言的方式进行类型保护还可以使用 "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("in")]),s._v("、"),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("typeof")]),s._v("、"),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("instanceof")]),s._v("语法去做类型保护 "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// interface 不能使用 instanceof")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 枚举")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("enum")]),s._v(" Status "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("OFFLINE")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 不赋值默认从 0 开始")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("ONLINE")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("DELETE")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("getResult")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("status")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("if")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("status "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("===")]),s._v(" Status"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("OFFLINE")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'OFFLINE'")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("else")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("if")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("status "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("===")]),s._v(" Status"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("ONLINE")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'ONLINE'")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("else")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("if")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("status "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("===")]),s._v(" Status"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("DELETE")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'DELETE'")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'ERROR'")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\nconsole"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("getResult")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("Status"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("OFFLINE")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// OFFLINE")]),s._v("\nconsole"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("getResult")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// OFFLINE")]),s._v("\n枚举和数组使用方式差不多"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 可以使用下标方式取值\nStatus"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("===")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("OFFLINE")]),s._v("\nStatus"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("2")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("===")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("ONLINE")]),s._v("\nStatus"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("3")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("===")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("DELETE")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 泛型 generic 泛指的类型")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1.")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("函数")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" join"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("P")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("first"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" second"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" Array"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("P")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// Array<P> === P[]")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" first\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("const")]),s._v(" join "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("P")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("first"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" second"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" Array"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("P")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token parameter"}},[a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")])]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" first\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n join"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),s._v("number"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" string"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'2'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),s._v(" 指的是泛型的名称"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 随便定义"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 上面函数的意思是它接收一个任意类型的数据"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 返回值也是任意类型\n 在调用函数的时候可以显式的声明是 "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),s._v("number"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),s._v(" 类型"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 也可以让 ts 自行推断"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 即不声明\n 函数中可以写多个泛型"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 调用时可以写 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'interface'")]),s._v("\n Array"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("P")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),s._v(" 表示接收数组"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 里面的每一项是泛型"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 也可以写成 "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("P")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("2.")]),s._v(" 类"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("interface")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Item")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("DataManager")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("extends")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Item")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// class DataManager<T extends number | string>")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("private")]),s._v(" data"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")])]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("getItem")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("index"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" number"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("data"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),s._v("index"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("name\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("const")]),s._v(" data "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("new")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("DataManager")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'chenj'")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("3.")]),s._v(" keyof"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" 相当于遍历\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("interface")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Person")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string\n age"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" number\n gender"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Teacher")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("private")]),s._v(" info"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" Person")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n getInfo"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("extends")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("keyof")]),s._v(" Person"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("key"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("info"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),s._v("key"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 命名空间 namespace")]),s._v("\nnamespace components "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Header")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'Header'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("export")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Footer")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'Footer'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("///<reference path='./components.ts /> 使用三个斜杠表示引用关系结合第 3 点")]),s._v("\nnamespace Home "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("export")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Page")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("new")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("components"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("Footer")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1.")]),s._v(" 如果不使用 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'namespace'")]),s._v(" 编译的后的代码会多出很多全局变量\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("2.")]),s._v(" 而使用了 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'namespace'")]),s._v(" 只有加了 "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("export")]),s._v(" 的才能通过 Home"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("Footer 进行访问\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("3.")]),s._v(" 可以写多个命名空间"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 但是之间不需要在代码里声明引用关系\n 但是需要在 tsconfig"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("json 文件中配置 outFile 使得编译成一个输出文件而不是每个文件都输出\n 配置了 outFile 就不能使用 commonjs 和 "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("ES6")]),s._v(" 模块规范\n"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// import 模块化")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1.")]),s._v(" 和 js 的模块化一样"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 需要配合 webpack 进行 amd 或其他模块规范的解析\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("2.")]),s._v(" 使用命名空间会导致变量查找麻烦问题"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 可以使用模块化进行模块拆分\n"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// parcel 编译 ts")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1.")]),s._v(" parcel 相当于是 webpack 的 devServer\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("2.")]),s._v(" npm install parcel@next\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("3.")]),s._v(" 在 "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("package")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("json 的 script 中写 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v('"dev"')]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v('"parcel index.html"')]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("4.")]),s._v(" 在 html 中可以直接引用 ts 文件\n"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// .d.ts 理解")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1.")]),s._v(" 在 ts 中引入 js 写的npm包需要安装 @type"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("/")]),s._v("包名 的依赖即 "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("d"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("ts 文件\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("2.")]),s._v(" 在 "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("d"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("ts 文件中使用 declare 关键字定义全局变量或函数或对象\n declare "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("var")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("$")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("param")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 全局变量")]),s._v("\n declare "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("$")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("selector")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 全局函数")]),s._v("\n declare namespace $ "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 全局对象")]),s._v("\n namespace fn "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("init")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("3.")]),s._v(" 模块化\n declare module "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'jquery'")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("var")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("$")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("param")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 全局变量")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("$")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("selector")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("void")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 全局函数")]),s._v("\n namespace $ "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 全局对象")]),s._v("\n namespace fn "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("init")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("export")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" $\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("--")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 装饰器, 本身是一个函数, 通过 @ 符合使用, 可能会报错是实验性语法, 需要打开 tsconfig.json 的配置")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// @ 后的函数相当于是把类扩展了, 接收的参数就是装饰的类")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// @decorator class A {} 相当于 class A {} A = decorator(A) || A")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("1.")]),s._v(" 类的装饰器\n 普通写法\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("decorator1")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("constructor"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 类的装饰器接收的参数是构造函数")]),s._v("\n constructor"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("prototype"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("getName")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'chenj'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'decorator1'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("decorator2")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("constructor"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 类的装饰器接收的参数是构造函数")]),s._v("\n constructor"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("prototype"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("getName")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'chenj'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'decorator2'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n @decorator1 @decorator2 "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Test")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("name "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" name\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'name'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n 会先输出 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'name'")]),s._v(" 在输出 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'decorator2'")]),s._v(" 再输出 "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'decorator2'")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 执行顺序从右往左"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 从下往上\n 跟实例化没关系"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 没实例化也会执行"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 执行一次"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 在构造函数之后\n 高级写法\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("decorator")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("<")]),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("extends")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("new")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("...")]),s._v("args"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")])]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=>")]),s._v(" any"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(">")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("constructor"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token constant"}},[s._v("T")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("extends")]),s._v(" constructor "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n name "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'chenj'")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("const")]),s._v(" Test "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("decorator")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("name "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" name\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("2.")]),s._v(" 类的方法装饰器\n 方法分为普通方法和静态方法"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 都会接收三个参数 target、key即方法名、descriptor相当于属性描述符\n 普通方法的第一个参数 target 对应的是类的 prototype\n 静态方法的第一个参数 target 对应的是类的构造函数\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("getNameDecorator")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("target"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" key"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" descriptor"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" PropertyDescriptor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n descriptor"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("writable "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token boolean"}},[s._v("true")]),s._v("\n descriptor"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function-variable function"}},[s._v("value")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'chenj'")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Test")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("name "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" name\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n @getNameDecorator "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("getName")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("name\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("3.")]),s._v(" 类的方法的参数装饰器\n 只接收两个参数"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 没有 descriptor\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("paramDecorator")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("target"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" key"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" paramIndex"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" number")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("target"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("key"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("paramIndex"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Test")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("getName")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("@paramDecorator name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n console"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("log")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),s._v("name"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("3.")]),s._v(" 类的getter、setter装饰器\n 和类的方法装饰器差不多"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 跟普通方法一样接收三个参数\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("visitDecorator")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("target"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" key"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" descriptor"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" PropertyDescriptor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n descriptor"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("writable "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token boolean"}},[s._v("false")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Test")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("private")]),s._v(" _name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string\n "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("constructor")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("_name "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" name\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("get")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("name")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("_name\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n @visitDecorator\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("set")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("name")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("name"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("this")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(".")]),s._v("_name "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" name\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n"),a("span",{pre:!0,attrs:{class:"token number"}},[s._v("4.")]),s._v(" 类的属性装饰器\n 只接收两个参数"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" 没有 descriptor\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("function")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token function"}},[s._v("nameDecorator")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("(")]),a("span",{pre:!0,attrs:{class:"token parameter"}},[s._v("target"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(",")]),s._v(" key"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" string")]),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v(")")]),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" any "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n target"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("[")]),s._v("key"),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("]")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'chenj2'")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token comment"}},[s._v("// 这样修改的是原型上的 name 而不是实例上的 name")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("const")]),s._v(" descriptor"),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v(":")]),s._v(" PropertyDescriptor "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n writable "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token boolean"}},[s._v("false")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("return")]),s._v(" descriptor\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token keyword"}},[s._v("class")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token class-name"}},[s._v("Test")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("{")]),s._v("\n @nameDecorator\n name "),a("span",{pre:!0,attrs:{class:"token operator"}},[s._v("=")]),s._v(" "),a("span",{pre:!0,attrs:{class:"token string"}},[s._v("'chenj'")]),s._v("\n "),a("span",{pre:!0,attrs:{class:"token punctuation"}},[s._v("}")]),s._v("\n")])]),s._v(" "),a("div",{staticClass:"line-numbers-wrapper"},[a("span",{staticClass:"line-number"},[s._v("1")]),a("br"),a("span",{staticClass:"line-number"},[s._v("2")]),a("br"),a("span",{staticClass:"line-number"},[s._v("3")]),a("br"),a("span",{staticClass:"line-number"},[s._v("4")]),a("br"),a("span",{staticClass:"line-number"},[s._v("5")]),a("br"),a("span",{staticClass:"line-number"},[s._v("6")]),a("br"),a("span",{staticClass:"line-number"},[s._v("7")]),a("br"),a("span",{staticClass:"line-number"},[s._v("8")]),a("br"),a("span",{staticClass:"line-number"},[s._v("9")]),a("br"),a("span",{staticClass:"line-number"},[s._v("10")]),a("br"),a("span",{staticClass:"line-number"},[s._v("11")]),a("br"),a("span",{staticClass:"line-number"},[s._v("12")]),a("br"),a("span",{staticClass:"line-number"},[s._v("13")]),a("br"),a("span",{staticClass:"line-number"},[s._v("14")]),a("br"),a("span",{staticClass:"line-number"},[s._v("15")]),a("br"),a("span",{staticClass:"line-number"},[s._v("16")]),a("br"),a("span",{staticClass:"line-number"},[s._v("17")]),a("br"),a("span",{staticClass:"line-number"},[s._v("18")]),a("br"),a("span",{staticClass:"line-number"},[s._v("19")]),a("br"),a("span",{staticClass:"line-number"},[s._v("20")]),a("br"),a("span",{staticClass:"line-number"},[s._v("21")]),a("br"),a("span",{staticClass:"line-number"},[s._v("22")]),a("br"),a("span",{staticClass:"line-number"},[s._v("23")]),a("br"),a("span",{staticClass:"line-number"},[s._v("24")]),a("br"),a("span",{staticClass:"line-number"},[s._v("25")]),a("br"),a("span",{staticClass:"line-number"},[s._v("26")]),a("br"),a("span",{staticClass:"line-number"},[s._v("27")]),a("br"),a("span",{staticClass:"line-number"},[s._v("28")]),a("br"),a("span",{staticClass:"line-number"},[s._v("29")]),a("br"),a("span",{staticClass:"line-number"},[s._v("30")]),a("br"),a("span",{staticClass:"line-number"},[s._v("31")]),a("br"),a("span",{staticClass:"line-number"},[s._v("32")]),a("br"),a("span",{staticClass:"line-number"},[s._v("33")]),a("br"),a("span",{staticClass:"line-number"},[s._v("34")]),a("br"),a("span",{staticClass:"line-number"},[s._v("35")]),a("br"),a("span",{staticClass:"line-number"},[s._v("36")]),a("br"),a("span",{staticClass:"line-number"},[s._v("37")]),a("br"),a("span",{staticClass:"line-number"},[s._v("38")]),a("br"),a("span",{staticClass:"line-number"},[s._v("39")]),a("br"),a("span",{staticClass:"line-number"},[s._v("40")]),a("br"),a("span",{staticClass:"line-number"},[s._v("41")]),a("br"),a("span",{staticClass:"line-number"},[s._v("42")]),a("br"),a("span",{staticClass:"line-number"},[s._v("43")]),a("br"),a("span",{staticClass:"line-number"},[s._v("44")]),a("br"),a("span",{staticClass:"line-number"},[s._v("45")]),a("br"),a("span",{staticClass:"line-number"},[s._v("46")]),a("br"),a("span",{staticClass:"line-number"},[s._v("47")]),a("br"),a("span",{staticClass:"line-number"},[s._v("48")]),a("br"),a("span",{staticClass:"line-number"},[s._v("49")]),a("br"),a("span",{staticClass:"line-number"},[s._v("50")]),a("br"),a("span",{staticClass:"line-number"},[s._v("51")]),a("br"),a("span",{staticClass:"line-number"},[s._v("52")]),a("br"),a("span",{staticClass:"line-number"},[s._v("53")]),a("br"),a("span",{staticClass:"line-number"},[s._v("54")]),a("br"),a("span",{staticClass:"line-number"},[s._v("55")]),a("br"),a("span",{staticClass:"line-number"},[s._v("56")]),a("br"),a("span",{staticClass:"line-number"},[s._v("57")]),a("br"),a("span",{staticClass:"line-number"},[s._v("58")]),a("br"),a("span",{staticClass:"line-number"},[s._v("59")]),a("br"),a("span",{staticClass:"line-number"},[s._v("60")]),a("br"),a("span",{staticClass:"line-number"},[s._v("61")]),a("br"),a("span",{staticClass:"line-number"},[s._v("62")]),a("br"),a("span",{staticClass:"line-number"},[s._v("63")]),a("br"),a("span",{staticClass:"line-number"},[s._v("64")]),a("br"),a("span",{staticClass:"line-number"},[s._v("65")]),a("br"),a("span",{staticClass:"line-number"},[s._v("66")]),a("br"),a("span",{staticClass:"line-number"},[s._v("67")]),a("br"),a("span",{staticClass:"line-number"},[s._v("68")]),a("br"),a("span",{staticClass:"line-number"},[s._v("69")]),a("br"),a("span",{staticClass:"line-number"},[s._v("70")]),a("br"),a("span",{staticClass:"line-number"},[s._v("71")]),a("br"),a("span",{staticClass:"line-number"},[s._v("72")]),a("br"),a("span",{staticClass:"line-number"},[s._v("73")]),a("br"),a("span",{staticClass:"line-number"},[s._v("74")]),a("br"),a("span",{staticClass:"line-number"},[s._v("75")]),a("br"),a("span",{staticClass:"line-number"},[s._v("76")]),a("br"),a("span",{staticClass:"line-number"},[s._v("77")]),a("br"),a("span",{staticClass:"line-number"},[s._v("78")]),a("br"),a("span",{staticClass:"line-number"},[s._v("79")]),a("br"),a("span",{staticClass:"line-number"},[s._v("80")]),a("br"),a("span",{staticClass:"line-number"},[s._v("81")]),a("br"),a("span",{staticClass:"line-number"},[s._v("82")]),a("br"),a("span",{staticClass:"line-number"},[s._v("83")]),a("br"),a("span",{staticClass:"line-number"},[s._v("84")]),a("br"),a("span",{staticClass:"line-number"},[s._v("85")]),a("br"),a("span",{staticClass:"line-number"},[s._v("86")]),a("br"),a("span",{staticClass:"line-number"},[s._v("87")]),a("br"),a("span",{staticClass:"line-number"},[s._v("88")]),a("br"),a("span",{staticClass:"line-number"},[s._v("89")]),a("br"),a("span",{staticClass:"line-number"},[s._v("90")]),a("br"),a("span",{staticClass:"line-number"},[s._v("91")]),a("br"),a("span",{staticClass:"line-number"},[s._v("92")]),a("br"),a("span",{staticClass:"line-number"},[s._v("93")]),a("br"),a("span",{staticClass:"line-number"},[s._v("94")]),a("br"),a("span",{staticClass:"line-number"},[s._v("95")]),a("br"),a("span",{staticClass:"line-number"},[s._v("96")]),a("br"),a("span",{staticClass:"line-number"},[s._v("97")]),a("br"),a("span",{staticClass:"line-number"},[s._v("98")]),a("br"),a("span",{staticClass:"line-number"},[s._v("99")]),a("br"),a("span",{staticClass:"line-number"},[s._v("100")]),a("br"),a("span",{staticClass:"line-number"},[s._v("101")]),a("br"),a("span",{staticClass:"line-number"},[s._v("102")]),a("br"),a("span",{staticClass:"line-number"},[s._v("103")]),a("br"),a("span",{staticClass:"line-number"},[s._v("104")]),a("br"),a("span",{staticClass:"line-number"},[s._v("105")]),a("br"),a("span",{staticClass:"line-number"},[s._v("106")]),a("br"),a("span",{staticClass:"line-number"},[s._v("107")]),a("br"),a("span",{staticClass:"line-number"},[s._v("108")]),a("br"),a("span",{staticClass:"line-number"},[s._v("109")]),a("br"),a("span",{staticClass:"line-number"},[s._v("110")]),a("br"),a("span",{staticClass:"line-number"},[s._v("111")]),a("br"),a("span",{staticClass:"line-number"},[s._v("112")]),a("br"),a("span",{staticClass:"line-number"},[s._v("113")]),a("br"),a("span",{staticClass:"line-number"},[s._v("114")]),a("br"),a("span",{staticClass:"line-number"},[s._v("115")]),a("br"),a("span",{staticClass:"line-number"},[s._v("116")]),a("br"),a("span",{staticClass:"line-number"},[s._v("117")]),a("br"),a("span",{staticClass:"line-number"},[s._v("118")]),a("br"),a("span",{staticClass:"line-number"},[s._v("119")]),a("br"),a("span",{staticClass:"line-number"},[s._v("120")]),a("br"),a("span",{staticClass:"line-number"},[s._v("121")]),a("br"),a("span",{staticClass:"line-number"},[s._v("122")]),a("br"),a("span",{staticClass:"line-number"},[s._v("123")]),a("br"),a("span",{staticClass:"line-number"},[s._v("124")]),a("br"),a("span",{staticClass:"line-number"},[s._v("125")]),a("br"),a("span",{staticClass:"line-number"},[s._v("126")]),a("br"),a("span",{staticClass:"line-number"},[s._v("127")]),a("br"),a("span",{staticClass:"line-number"},[s._v("128")]),a("br"),a("span",{staticClass:"line-number"},[s._v("129")]),a("br"),a("span",{staticClass:"line-number"},[s._v("130")]),a("br"),a("span",{staticClass:"line-number"},[s._v("131")]),a("br"),a("span",{staticClass:"line-number"},[s._v("132")]),a("br"),a("span",{staticClass:"line-number"},[s._v("133")]),a("br"),a("span",{staticClass:"line-number"},[s._v("134")]),a("br"),a("span",{staticClass:"line-number"},[s._v("135")]),a("br"),a("span",{staticClass:"line-number"},[s._v("136")]),a("br"),a("span",{staticClass:"line-number"},[s._v("137")]),a("br"),a("span",{staticClass:"line-number"},[s._v("138")]),a("br"),a("span",{staticClass:"line-number"},[s._v("139")]),a("br"),a("span",{staticClass:"line-number"},[s._v("140")]),a("br"),a("span",{staticClass:"line-number"},[s._v("141")]),a("br"),a("span",{staticClass:"line-number"},[s._v("142")]),a("br"),a("span",{staticClass:"line-number"},[s._v("143")]),a("br"),a("span",{staticClass:"line-number"},[s._v("144")]),a("br"),a("span",{staticClass:"line-number"},[s._v("145")]),a("br"),a("span",{staticClass:"line-number"},[s._v("146")]),a("br"),a("span",{staticClass:"line-number"},[s._v("147")]),a("br"),a("span",{staticClass:"line-number"},[s._v("148")]),a("br"),a("span",{staticClass:"line-number"},[s._v("149")]),a("br"),a("span",{staticClass:"line-number"},[s._v("150")]),a("br"),a("span",{staticClass:"line-number"},[s._v("151")]),a("br"),a("span",{staticClass:"line-number"},[s._v("152")]),a("br"),a("span",{staticClass:"line-number"},[s._v("153")]),a("br"),a("span",{staticClass:"line-number"},[s._v("154")]),a("br"),a("span",{staticClass:"line-number"},[s._v("155")]),a("br"),a("span",{staticClass:"line-number"},[s._v("156")]),a("br"),a("span",{staticClass:"line-number"},[s._v("157")]),a("br"),a("span",{staticClass:"line-number"},[s._v("158")]),a("br"),a("span",{staticClass:"line-number"},[s._v("159")]),a("br"),a("span",{staticClass:"line-number"},[s._v("160")]),a("br"),a("span",{staticClass:"line-number"},[s._v("161")]),a("br"),a("span",{staticClass:"line-number"},[s._v("162")]),a("br"),a("span",{staticClass:"line-number"},[s._v("163")]),a("br"),a("span",{staticClass:"line-number"},[s._v("164")]),a("br"),a("span",{staticClass:"line-number"},[s._v("165")]),a("br"),a("span",{staticClass:"line-number"},[s._v("166")]),a("br"),a("span",{staticClass:"line-number"},[s._v("167")]),a("br"),a("span",{staticClass:"line-number"},[s._v("168")]),a("br"),a("span",{staticClass:"line-number"},[s._v("169")]),a("br"),a("span",{staticClass:"line-number"},[s._v("170")]),a("br"),a("span",{staticClass:"line-number"},[s._v("171")]),a("br"),a("span",{staticClass:"line-number"},[s._v("172")]),a("br"),a("span",{staticClass:"line-number"},[s._v("173")]),a("br"),a("span",{staticClass:"line-number"},[s._v("174")]),a("br"),a("span",{staticClass:"line-number"},[s._v("175")]),a("br"),a("span",{staticClass:"line-number"},[s._v("176")]),a("br"),a("span",{staticClass:"line-number"},[s._v("177")]),a("br"),a("span",{staticClass:"line-number"},[s._v("178")]),a("br"),a("span",{staticClass:"line-number"},[s._v("179")]),a("br"),a("span",{staticClass:"line-number"},[s._v("180")]),a("br"),a("span",{staticClass:"line-number"},[s._v("181")]),a("br"),a("span",{staticClass:"line-number"},[s._v("182")]),a("br"),a("span",{staticClass:"line-number"},[s._v("183")]),a("br"),a("span",{staticClass:"line-number"},[s._v("184")]),a("br"),a("span",{staticClass:"line-number"},[s._v("185")]),a("br"),a("span",{staticClass:"line-number"},[s._v("186")]),a("br"),a("span",{staticClass:"line-number"},[s._v("187")]),a("br"),a("span",{staticClass:"line-number"},[s._v("188")]),a("br"),a("span",{staticClass:"line-number"},[s._v("189")]),a("br"),a("span",{staticClass:"line-number"},[s._v("190")]),a("br"),a("span",{staticClass:"line-number"},[s._v("191")]),a("br"),a("span",{staticClass:"line-number"},[s._v("192")]),a("br"),a("span",{staticClass:"line-number"},[s._v("193")]),a("br"),a("span",{staticClass:"line-number"},[s._v("194")]),a("br"),a("span",{staticClass:"line-number"},[s._v("195")]),a("br"),a("span",{staticClass:"line-number"},[s._v("196")]),a("br"),a("span",{staticClass:"line-number"},[s._v("197")]),a("br"),a("span",{staticClass:"line-number"},[s._v("198")]),a("br"),a("span",{staticClass:"line-number"},[s._v("199")]),a("br"),a("span",{staticClass:"line-number"},[s._v("200")]),a("br"),a("span",{staticClass:"line-number"},[s._v("201")]),a("br"),a("span",{staticClass:"line-number"},[s._v("202")]),a("br"),a("span",{staticClass:"line-number"},[s._v("203")]),a("br"),a("span",{staticClass:"line-number"},[s._v("204")]),a("br"),a("span",{staticClass:"line-number"},[s._v("205")]),a("br"),a("span",{staticClass:"line-number"},[s._v("206")]),a("br"),a("span",{staticClass:"line-number"},[s._v("207")]),a("br"),a("span",{staticClass:"line-number"},[s._v("208")]),a("br"),a("span",{staticClass:"line-number"},[s._v("209")]),a("br"),a("span",{staticClass:"line-number"},[s._v("210")]),a("br"),a("span",{staticClass:"line-number"},[s._v("211")]),a("br"),a("span",{staticClass:"line-number"},[s._v("212")]),a("br"),a("span",{staticClass:"line-number"},[s._v("213")]),a("br"),a("span",{staticClass:"line-number"},[s._v("214")]),a("br"),a("span",{staticClass:"line-number"},[s._v("215")]),a("br"),a("span",{staticClass:"line-number"},[s._v("216")]),a("br"),a("span",{staticClass:"line-number"},[s._v("217")]),a("br"),a("span",{staticClass:"line-number"},[s._v("218")]),a("br"),a("span",{staticClass:"line-number"},[s._v("219")]),a("br"),a("span",{staticClass:"line-number"},[s._v("220")]),a("br"),a("span",{staticClass:"line-number"},[s._v("221")]),a("br"),a("span",{staticClass:"line-number"},[s._v("222")]),a("br"),a("span",{staticClass:"line-number"},[s._v("223")]),a("br"),a("span",{staticClass:"line-number"},[s._v("224")]),a("br"),a("span",{staticClass:"line-number"},[s._v("225")]),a("br"),a("span",{staticClass:"line-number"},[s._v("226")]),a("br"),a("span",{staticClass:"line-number"},[s._v("227")]),a("br"),a("span",{staticClass:"line-number"},[s._v("228")]),a("br"),a("span",{staticClass:"line-number"},[s._v("229")]),a("br"),a("span",{staticClass:"line-number"},[s._v("230")]),a("br"),a("span",{staticClass:"line-number"},[s._v("231")]),a("br"),a("span",{staticClass:"line-number"},[s._v("232")]),a("br"),a("span",{staticClass:"line-number"},[s._v("233")]),a("br"),a("span",{staticClass:"line-number"},[s._v("234")]),a("br"),a("span",{staticClass:"line-number"},[s._v("235")]),a("br"),a("span",{staticClass:"line-number"},[s._v("236")]),a("br"),a("span",{staticClass:"line-number"},[s._v("237")]),a("br"),a("span",{staticClass:"line-number"},[s._v("238")]),a("br"),a("span",{staticClass:"line-number"},[s._v("239")]),a("br"),a("span",{staticClass:"line-number"},[s._v("240")]),a("br"),a("span",{staticClass:"line-number"},[s._v("241")]),a("br")])])])}),[],!1,null,null,null);t.default=r.exports}}]); |
||
analyze_E017+020.py | import os
from distutils.dir_util import copy_tree
import warnings
import IPython
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import torch
from context import utils
import utils.filesystem as fs
import utils.plotting as plot
from utils.data_analysis import invert_signs, load_stats
from utils.misc import get_equal_dicts, length_of_longest
def create_plots(stats_list, keys_to_plot, groups, result_dir, include_val=True):
n_keys = len(keys_to_plot)
n_chars = len(str(n_keys))
f = ' {:' + str(n_chars) + 'd}/{:' + str(n_chars) + 'd} monitored keys plotted'
groups_org = groups.copy()
for i_key, k in enumerate(keys_to_plot):
# Get data and subset only those series that are done (or the one that is the longest)
groups = groups_org.copy()
list_of_series = [s[k].tolist() for s in stats_list if k in s]
list_of_genera = [s['generations'].tolist() for s in stats_list if k in s]
l = length_of_longest(list_of_series)
indices = [i for i, series in enumerate(list_of_series) if len(series) == l]
groups = groups[indices]
list_of_series = [list_of_series[i] for i in indices]
list_of_genera = [list_of_genera[i] for i in indices]
# Validation series
if include_val:
val_k = k[:-4] + '_val'
list_of_series_val = [s[val_k].tolist() for i, s in enumerate(stats_list) if val_k in s and i in indices]
if include_val and not len(list_of_series_val) == 0:
list_of_genera_val = [np.where(~np.isnan(l))[0].tolist() for l in list_of_series_val]
list_of_genera.extend(list_of_genera_val)
list_of_series_val = [np.array(l) for l in list_of_series_val]
list_of_series_val = [l[~np.isnan(l)].tolist() for l in list_of_series_val]
list_of_series.extend(list_of_series_val)
groups_val = np.array([g + ', validation' for g in groups])
groups = np.append(groups, groups_val)
if k is 'return_val':
IPython.embed()
# Sort
list_of_genera = [x for _,x in sorted(zip(groups.tolist(), list_of_genera))]
list_of_series = [x for _,x in sorted(zip(groups.tolist(), list_of_series))]
groups.sort()
# Plot
plot.timeseries_mean_grouped(list_of_genera, list_of_series, groups, xlabel='generations', ylabel=k, map_labels='supervised')
if 'return' in k:
plt.gca().set_ylim(0, 1.5)
elif 'accuracy' in k:
plt.gca().set_ylim(0.4, 1)
plt.savefig(os.path.join(result_dir, k + '-all-series-mean-sd' + '.pdf'), bbox_inches='tight')
plt.close()
# Progress
if i_key + 1 == n_keys:
print(f.format(i_key+1, n_keys), end='\n')
else:
print(f.format(i_key+1, n_keys), end='\r')
def get_directories(experiment_id):
# Get directories to analyze
this_file_dir_local = os.path.dirname(os.path.abspath(__file__))
package_root_this_file = fs.get_parent(this_file_dir_local, 'es-rl')
d = os.path.join(package_root_this_file, 'experiments', 'checkpoints', experiment_id)
directories = [os.path.join(d, di) for di in os.listdir(d) if os.path.isdir(os.path.join(d, di))]
directories = [d for d in directories if 'monitoring' not in d and 'analysis' not in d]
# Create result directory
result_dir = os.path.join(d, str(experiment_id[:4]))
dst_dir = '/home/jakob/Dropbox/Apps/ShareLaTeX/Master\'s Thesis/graphics/' + experiment_id[:4]
if not os.path.exists(result_dir + '-bn-analysis'):
os.mkdir(result_dir + '-bn-analysis'),
if not os.path.exists(result_dir + '-init-analysis'):
os.mkdir(result_dir + '-init-analysis')
return directories, result_dir, dst_dir
def | (experiment_id, optimizer):
stats_init = []
stats_bn = []
groups_init = np.array([])
groups_bn = np.array([])
for d in directories:
try:
st = pd.read_csv(os.path.join(d, 'stats.csv'))
with open(os.path.join(d, 'init.log'), 'r') as f:
s = f.read()
if 'MNISTNetNoInit' in s:
groups_init = np.append(groups_init, 'Default init' + optimizer) # Has BN
stats_init.append(st)
elif 'MNISTNetNoBN' in s:
groups_bn = np.append(groups_bn, 'No Batchnorm' + optimizer) # Has Xavier Glorot
stats_bn.append(st)
else:
groups_bn = np.append(groups_bn, 'Batchnorm' + optimizer) # Has Xavier Glorot
groups_init = np.append(groups_init, 'Xavier-Glorot' + optimizer) # Has BN
stats_init.append(st)
stats_bn.append(st)
except:
print("None in: " + d)
return stats_init, stats_bn, groups_init, groups_bn
if __name__ == '__main__':
# Ignore warnings from matplotlib
warnings.filterwarnings("ignore", module="matplotlib")
# Font setting
matplotlib.rcParams.update({'font.size': 12})
# Experiment IDs
experiment_ids = ['E017-bn-init', 'E020-bn-init']
# Optimizer labels
# optimizers = [', SGD', ', ADAM']
optimizers = ['', '']
# Keys to analyze
keys_to_plot = {'return_unp', 'return_avg', 'accuracy_unp', 'accuracy_avg', 'sigma'}
# Analyze
for experiment_id, optimizer in zip(experiment_ids, optimizers):
# Get directories
directories, result_dir, dst_dir = get_directories(experiment_id)
if len(directories) == 0:
print('No results for {}'.format(experiment_id))
continue
# Load data
stats_init, stats_bn, groups_init, groups_bn = load(experiment_id, optimizer)
# Plot
invert_signs(stats_init)
invert_signs(stats_bn)
create_plots(stats_init, keys_to_plot, groups_init, result_dir + '-init-analysis', include_val=True)
create_plots(stats_bn, keys_to_plot, groups_bn, result_dir + '-bn-analysis', include_val=True)
copy_tree(result_dir + '-init-analysis', dst_dir + '-init-analysis')
copy_tree(result_dir + '-bn-analysis', dst_dir + '-bn-analysis')
| load |
mem.rs | //! Memory information.
pub use crate::data::mem::SystemMemory;
impl SystemMemory {
///Fetches system information, if not available returns with all members set to 0.
pub fn new() -> Self {
let (total_count, avail_count, size) = unsafe {
let total_count = libc::sysconf(libc::_SC_PHYS_PAGES);
let avail_count = libc::sysconf(libc::_SC_AVPHYS_PAGES);
let size = libc::sysconf(libc::_SC_PAGE_SIZE);
if total_count == -1 || avail_count == -1 || size == -1 |
(total_count as u64, avail_count as u64, size as u64)
};
Self {
total: total_count.saturating_mul(size),
avail: avail_count.saturating_mul(size),
}
}
}
| {
return Self {
total: 0,
avail: 0,
}
} |
consume-record.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { ConsumeRecordComponent } from './consume-record.component';
describe('ConsumeRecordComponent', () => {
let component: ConsumeRecordComponent;
let fixture: ComponentFixture<ConsumeRecordComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ ConsumeRecordComponent ] | .compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(ConsumeRecordComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
}); | }) |
login_status.js | 'use strict';
function | (name, value, expire_days) {
let cookie_str = name + "=" + escape(value);
if (expire_days != null) {
cookie_str += ";path=/;max-age=" + expire_days * 24 * 3600;
}
document.cookie = cookie_str;
}
function getCookie(name) {
if (document.cookie.length > 0) {
let cookies = document.cookie.split(';');
for (let x of cookies) {
let key = x.split('=')[0], value = x.split('=')[1];
if (key == name) return value;
}
}
return "";
}
| setCookie |
KnxNetIpMessageCodec.go | //
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package knxnetip
import (
"errors"
"fmt"
"github.com/apache/plc4x/plc4go/internal/plc4go/knxnetip/readwrite/model"
"github.com/apache/plc4x/plc4go/internal/plc4go/spi"
"github.com/apache/plc4x/plc4go/internal/plc4go/spi/transports"
"github.com/apache/plc4x/plc4go/internal/plc4go/spi/utils"
"time"
)
type KnxNetIpExpectation struct {
expiration time.Time
acceptsMessage spi.AcceptsMessage
handleMessage spi.HandleMessage
}
type KnxNetIpMessageCodec struct {
sequenceCounter int32
transportInstance transports.TransportInstance
messageInterceptor func(message interface{})
defaultIncomingMessageChannel chan interface{}
expectations []KnxNetIpExpectation
}
func NewKnxNetIpMessageCodec(transportInstance transports.TransportInstance, messageInterceptor func(message interface{})) *KnxNetIpMessageCodec {
codec := &KnxNetIpMessageCodec{
sequenceCounter: 0,
transportInstance: transportInstance,
messageInterceptor: messageInterceptor,
defaultIncomingMessageChannel: make(chan interface{}),
expectations: []KnxNetIpExpectation{},
}
// Start a worker that handles processing of responses
go work(codec)
return codec
}
func (m *KnxNetIpMessageCodec) Connect() error {
// "connect" to the remote UDP server
return m.transportInstance.Connect()
}
func (m *KnxNetIpMessageCodec) Disconnect() error {
return m.transportInstance.Close()
}
func (m *KnxNetIpMessageCodec) Send(message interface{}) error {
// Cast the message to the correct type of struct
knxMessage := model.CastKnxNetIpMessage(message)
// Serialize the request
wb := utils.NewWriteBuffer()
err := knxMessage.Serialize(*wb)
if err != nil {
return errors.New("error serializing request " + err.Error())
}
// Send it to the PLC
err = m.transportInstance.Write(wb.GetBytes())
if err != nil {
return errors.New("error sending request " + err.Error())
}
return nil
}
func (m *KnxNetIpMessageCodec) Expect(acceptsMessage spi.AcceptsMessage, handleMessage spi.HandleMessage, ttl time.Duration) error {
expectation := KnxNetIpExpectation{
expiration: time.Now().Add(ttl),
acceptsMessage: acceptsMessage,
handleMessage: handleMessage,
}
m.expectations = append(m.expectations, expectation)
return nil
}
func (m *KnxNetIpMessageCodec) SendRequest(message interface{}, acceptsMessage spi.AcceptsMessage, handleMessage spi.HandleMessage, ttl time.Duration) error {
// Send the actual message
err := m.Send(message)
if err != nil {
return err
}
return m.Expect(acceptsMessage, handleMessage, ttl)
}
func (m *KnxNetIpMessageCodec) GetDefaultIncomingMessageChannel() chan interface{} {
return m.defaultIncomingMessageChannel
}
func (m *KnxNetIpMessageCodec) receive() (interface{}, error) {
// We need at least 6 bytes in order to know how big the packet is in total
if num, err := m.transportInstance.GetNumReadableBytes(); (err == nil) && (num >= 6) {
data, err := m.transportInstance.PeekReadableBytes(6)
if err != nil {
fmt.Printf("Got error reading: %s\n", err.Error())
// TODO: Possibly clean up ...
return nil, nil
}
// Get the size of the entire packet
packetSize := (uint32(data[4]) << 8) + uint32(data[5])
if num >= packetSize {
data, err = m.transportInstance.Read(packetSize)
if err != nil {
fmt.Printf("Got error reading: %s\n", err.Error())
// TODO: Possibly clean up ...
return nil, nil
}
rb := utils.NewReadBuffer(data)
knxMessage, err := model.KnxNetIpMessageParse(rb)
if err != nil {
// TODO: Possibly clean up ...
return nil, nil
}
return knxMessage, nil | } else {
fmt.Printf("Not enough bytes. Got: %d Need: %d\n", num, packetSize)
}
} else if err != nil {
fmt.Printf("Got error reading: %s\n", err.Error())
} else {
fmt.Printf("Only %d bytes available\n", num)
}
return nil, nil
}
func work(m *KnxNetIpMessageCodec) {
// Start an endless loop
// TODO: Provide some means to terminate this ...
for {
message, err := m.receive()
if err != nil {
fmt.Printf("got an error reading from transport %s", err.Error())
} else if message != nil {
// If this message is a simple KNXNet/IP UDP Ack, ignore it for now
// TODO: In the future use these to see if a packet needs to be received
tunnelingResponse := model.CastTunnelingResponse(message)
if tunnelingResponse != nil {
continue
}
// If this is an incoming KNXNet/IP UDP Packet, automatically send an ACK
tunnelingRequest := model.CastTunnelingRequest(message)
if tunnelingRequest != nil {
response := model.NewTunnelingResponse(
model.NewTunnelingResponseDataBlock(
tunnelingRequest.TunnelingRequestDataBlock.CommunicationChannelId,
tunnelingRequest.TunnelingRequestDataBlock.SequenceCounter,
model.Status_NO_ERROR),
)
_ = m.Send(response)
}
// Otherwise handle it
now := time.Now()
// Give a message interceptor a chance to intercept
if m.messageInterceptor != nil {
m.messageInterceptor(message)
}
// Go through all expectations
messageHandled := false
for index, expectation := range m.expectations {
// Check if this expectation has expired.
if now.After(expectation.expiration) {
// Remove this expectation from the list.
m.expectations = append(m.expectations[:index], m.expectations[index+1:]...)
break
}
// Check if the current message matches the expectations
// If it does, let it handle the message.
if accepts := expectation.acceptsMessage(message); accepts {
err = expectation.handleMessage(message)
if err == nil {
messageHandled = true
// Remove this expectation from the list.
m.expectations = append(m.expectations[:index], m.expectations[index+1:]...)
}
break
}
}
// If the message has not been handled and a default handler is provided, call this ...
if !messageHandled {
m.defaultIncomingMessageChannel <- message
}
} else {
// Sleep for 10ms
time.Sleep(10 * time.Millisecond)
}
}
}
func (m KnxNetIpMessageCodec) GetTransportInstance() transports.TransportInstance {
return m.transportInstance
} | |
data_test.go | package dmr
import (
"bytes"
"encoding/hex"
"fmt"
"sort"
"testing"
)
func TestDataBlock(t *testing.T) {
want := &DataBlock{
Serial: 123,
Data: []byte{0x17, 0x2a},
Length: 2,
}
data := want.Bytes(Rate34Data, true)
if data == nil {
t.Fatal("encode failed")
}
// Size is the user-data + two octets of serial/crc
size := int(dataBlockLength(Rate34Data, true)) + 2
if len(data) != size {
t.Fatalf("encode failed: expected %d bytes, got %d", size, len(data))
}
// Decoding is tested in the DataFragment test
}
func TestDataFragment(t *testing.T) {
msg, err := BuildMessageData("CQCQCQ PD0MZ", DDFormatUTF16, true)
if err != nil {
t.Fatalf("build message failed: %v", err)
}
want := &DataFragment{Data: msg}
blocks, err := want.DataBlocks(Rate34Data, true)
if err != nil {
t.Fatalf("encode failed: %v", err)
}
if blocks == nil |
if len(blocks) != 2 {
t.Fatalf("encode failed: expected 2 blocks, got %d", len(blocks))
}
for i, block := range blocks {
t.Log(fmt.Sprintf("block %02d:\n%s", i, hex.Dump(block.Data)))
}
test, err := CombineDataBlocks(blocks)
if err != nil {
t.Fatalf("decode failed: %v", err)
}
if !bytes.Equal(test.Data[:len(want.Data)], want.Data) {
t.Log(fmt.Sprintf("want:\n%s", hex.Dump(want.Data)))
t.Log(fmt.Sprintf("got:\n%s", hex.Dump(test.Data)))
t.Fatal("decode failed: data is wrong")
}
}
func TestMessage(t *testing.T) {
msg := "CQCQCQ PD0MZ"
var encodings = []int{}
for i := range encodingMap {
encodings = append(encodings, int(i))
}
sort.Sort(sort.IntSlice(encodings))
for _, i := range encodings {
e := encodingMap[uint8(i)]
n := DDFormatName[uint8(i)]
t.Logf("testing %s encoding", n)
enc := e.NewDecoder()
str, err := enc.String(msg)
if err != nil {
t.Fatalf("error encoding to %s: %v", n, err)
}
dec := e.NewDecoder()
out, err := dec.String(str)
if err != nil {
t.Fatalf("error decoding from %s: %v", n, err)
}
t.Log(fmt.Sprintf("encoder:\n%s", hex.Dump([]byte(str))))
t.Log(fmt.Sprintf("decoder:\n%s", hex.Dump([]byte(out))))
}
}
| {
t.Fatal("encode failed: blocks is nil")
} |
testable.rs | use std::fmt::Debug;
///! Contains Basic setup for testing, testable trait and its result type
use anyhow::{bail, Error, Result};
#[derive(Debug)]
/// Enum indicating result of the test. This is like an extended std::result,
/// which includes a Skip variant to indicate that a test was skipped, and the Ok variant has no associated value
pub enum | {
/// Test was ok
Passed,
/// Test needed to be skipped
Skipped,
/// Test was error
Failed(Error),
}
impl<T> From<Result<T>> for TestResult {
fn from(result: Result<T>) -> Self {
match result {
Ok(_) => TestResult::Passed,
Err(err) => TestResult::Failed(err),
}
}
}
/// This trait indicates that something can be run as a test, or is 'testable'
/// This forms the basis of the framework, as all places where tests are done,
/// expect structs which implement this
pub trait Testable<'a> {
fn get_name(&self) -> &'a str;
fn can_run(&self) -> bool {
true
}
fn run(&self) -> TestResult;
}
/// This trait indicates that something forms a group of tests.
/// Test groups are used to group tests in sensible manner as well as provide namespacing to tests
pub trait TestableGroup<'a> {
fn get_name(&self) -> &'a str;
fn run_all(&'a self) -> Vec<(&'a str, TestResult)>;
fn run_selected(&'a self, selected: &[&str]) -> Vec<(&'a str, TestResult)>;
}
#[macro_export]
macro_rules! test_result {
($e:expr $(,)?) => {
match $e {
core::result::Result::Ok(val) => val,
core::result::Result::Err(err) => {
return $crate::testable::TestResult::Failed(err);
}
}
};
}
#[macro_export]
macro_rules! assert_result_eq {
($expected:expr, $actual:expr $(,)?) => ({
match (&$expected, &$actual) {
(expected_val, actual_val) => {
if !(*expected_val == *actual_val) {
test_framework::testable::assert_failed(&*expected_val, &*actual_val, std::option::Option::None)
} else {
Ok(())
}
}
}
});
($expected:expr, $actual:expr, $($arg:tt)+) => ({
match (&$expected, &$actual) {
(expected_val, actual_val) => {
if !(*expected_val == *actual_val) {
test_framework::testable::assert_failed(&*expected_val, &*actual_val, std::option::Option::Some(format_args!($($arg)+)))
} else {
Ok(())
}
}
}
});
}
#[doc(hidden)]
pub fn assert_failed<T, U>(
expected: &T,
actual: &U,
args: Option<std::fmt::Arguments<'_>>,
) -> Result<()>
where
T: Debug + ?Sized,
U: Debug + ?Sized,
{
match args {
Some(args) => {
bail!(
r#"assertion failed:
expected: `{:?}`,
actual: `{:?}`: {}"#,
expected,
actual,
args
)
}
None => {
bail!(
r#"assertion failed:
expected: `{:?}`,
actual: `{:?}`"#,
expected,
actual
)
}
}
}
| TestResult |
tooltip.rs | //! Display a widget over another.
use crate::event;
use crate::layout;
use crate::mouse;
use crate::renderer;
use crate::text;
use crate::widget::container;
use crate::widget::text::Text;
use crate::{
Clipboard, Element, Event, Layout, Length, Padding, Point, Rectangle,
Shell, Size, Vector, Widget,
};
/// An element to display a widget over another.
#[allow(missing_debug_implementations)]
pub struct Tooltip<'a, Message, Renderer: text::Renderer> {
content: Element<'a, Message, Renderer>,
tooltip: Text<Renderer>,
position: Position,
style_sheet: Box<dyn container::StyleSheet + 'a>,
gap: u16,
padding: u16,
}
impl<'a, Message, Renderer> Tooltip<'a, Message, Renderer>
where
Renderer: text::Renderer,
{
/// The default padding of a [`Tooltip`] drawn by this renderer.
const DEFAULT_PADDING: u16 = 5;
/// Creates an empty [`Tooltip`].
///
/// [`Tooltip`]: struct.Tooltip.html
pub fn new(
content: impl Into<Element<'a, Message, Renderer>>,
tooltip: impl ToString,
position: Position,
) -> Self {
Tooltip {
content: content.into(),
tooltip: Text::new(tooltip.to_string()),
position,
style_sheet: Default::default(),
gap: 0,
padding: Self::DEFAULT_PADDING,
}
}
/// Sets the size of the text of the [`Tooltip`].
pub fn size(mut self, size: u16) -> Self {
self.tooltip = self.tooltip.size(size);
self
}
/// Sets the font of the [`Tooltip`].
///
/// [`Font`]: Renderer::Font
pub fn | (mut self, font: impl Into<Renderer::Font>) -> Self {
self.tooltip = self.tooltip.font(font);
self
}
/// Sets the gap between the content and its [`Tooltip`].
pub fn gap(mut self, gap: u16) -> Self {
self.gap = gap;
self
}
/// Sets the padding of the [`Tooltip`].
pub fn padding(mut self, padding: u16) -> Self {
self.padding = padding;
self
}
/// Sets the style of the [`Tooltip`].
pub fn style(
mut self,
style_sheet: impl Into<Box<dyn container::StyleSheet + 'a>>,
) -> Self {
self.style_sheet = style_sheet.into();
self
}
}
/// The position of the tooltip. Defaults to following the cursor.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Position {
/// The tooltip will follow the cursor.
FollowCursor,
/// The tooltip will appear on the top of the widget.
Top,
/// The tooltip will appear on the bottom of the widget.
Bottom,
/// The tooltip will appear on the left of the widget.
Left,
/// The tooltip will appear on the right of the widget.
Right,
}
impl<'a, Message, Renderer> Widget<Message, Renderer>
for Tooltip<'a, Message, Renderer>
where
Renderer: text::Renderer,
{
fn width(&self) -> Length {
self.content.width()
}
fn height(&self) -> Length {
self.content.height()
}
fn layout(
&self,
renderer: &Renderer,
limits: &layout::Limits,
) -> layout::Node {
self.content.layout(renderer, limits)
}
fn on_event(
&mut self,
event: Event,
layout: Layout<'_>,
cursor_position: Point,
renderer: &Renderer,
clipboard: &mut dyn Clipboard,
shell: &mut Shell<'_, Message>,
) -> event::Status {
self.content.widget.on_event(
event,
layout,
cursor_position,
renderer,
clipboard,
shell,
)
}
fn mouse_interaction(
&self,
layout: Layout<'_>,
cursor_position: Point,
viewport: &Rectangle,
renderer: &Renderer,
) -> mouse::Interaction {
self.content.mouse_interaction(
layout,
cursor_position,
viewport,
renderer,
)
}
fn draw(
&self,
renderer: &mut Renderer,
inherited_style: &renderer::Style,
layout: Layout<'_>,
cursor_position: Point,
viewport: &Rectangle,
) {
self.content.draw(
renderer,
inherited_style,
layout,
cursor_position,
viewport,
);
let bounds = layout.bounds();
if bounds.contains(cursor_position) {
let gap = f32::from(self.gap);
let style = self.style_sheet.style();
let defaults = renderer::Style {
text_color: style
.text_color
.unwrap_or(inherited_style.text_color),
};
let text_layout = Widget::<(), Renderer>::layout(
&self.tooltip,
renderer,
&layout::Limits::new(Size::ZERO, viewport.size())
.pad(Padding::new(self.padding)),
);
let padding = f32::from(self.padding);
let text_bounds = text_layout.bounds();
let x_center = bounds.x + (bounds.width - text_bounds.width) / 2.0;
let y_center =
bounds.y + (bounds.height - text_bounds.height) / 2.0;
let mut tooltip_bounds = {
let offset = match self.position {
Position::Top => Vector::new(
x_center,
bounds.y - text_bounds.height - gap - padding,
),
Position::Bottom => Vector::new(
x_center,
bounds.y + bounds.height + gap + padding,
),
Position::Left => Vector::new(
bounds.x - text_bounds.width - gap - padding,
y_center,
),
Position::Right => Vector::new(
bounds.x + bounds.width + gap + padding,
y_center,
),
Position::FollowCursor => Vector::new(
cursor_position.x,
cursor_position.y - text_bounds.height,
),
};
Rectangle {
x: offset.x - padding,
y: offset.y - padding,
width: text_bounds.width + padding * 2.0,
height: text_bounds.height + padding * 2.0,
}
};
if tooltip_bounds.x < viewport.x {
tooltip_bounds.x = viewport.x;
} else if viewport.x + viewport.width
< tooltip_bounds.x + tooltip_bounds.width
{
tooltip_bounds.x =
viewport.x + viewport.width - tooltip_bounds.width;
}
if tooltip_bounds.y < viewport.y {
tooltip_bounds.y = viewport.y;
} else if viewport.y + viewport.height
< tooltip_bounds.y + tooltip_bounds.height
{
tooltip_bounds.y =
viewport.y + viewport.height - tooltip_bounds.height;
}
renderer.with_layer(*viewport, |renderer| {
container::draw_background(renderer, &style, tooltip_bounds);
Widget::<(), Renderer>::draw(
&self.tooltip,
renderer,
&defaults,
Layout::with_offset(
Vector::new(
tooltip_bounds.x + padding,
tooltip_bounds.y + padding,
),
&text_layout,
),
cursor_position,
viewport,
);
});
}
}
}
impl<'a, Message, Renderer> From<Tooltip<'a, Message, Renderer>>
for Element<'a, Message, Renderer>
where
Renderer: 'a + text::Renderer,
Message: 'a,
{
fn from(
column: Tooltip<'a, Message, Renderer>,
) -> Element<'a, Message, Renderer> {
Element::new(column)
}
}
| font |
outpoint.go | package util
import (
"bytes"
"io"
"github.com/elastos/Elastos.ELA/common"
)
type OutPoint struct {
TxID common.Uint256
Index uint16
}
func (op *OutPoint) IsEqual(o OutPoint) bool {
if !op.TxID.IsEqual(o.TxID) |
if op.Index != o.Index {
return false
}
return true
}
func (op *OutPoint) Serialize(w io.Writer) error {
return common.WriteElements(w, &op.TxID, op.Index)
}
func (op *OutPoint) Deserialize(r io.Reader) error {
return common.ReadElements(r, &op.TxID, &op.Index)
}
func (op *OutPoint) Bytes() []byte {
buf := new(bytes.Buffer)
op.Serialize(buf)
return buf.Bytes()
}
func NewOutPoint(txId common.Uint256, index uint16) *OutPoint {
return &OutPoint{
TxID: txId,
Index: index,
}
}
func OutPointFromBytes(value []byte) (*OutPoint, error) {
outPoint := new(OutPoint)
err := outPoint.Deserialize(bytes.NewReader(value))
if err != nil {
return nil, err
}
return outPoint, nil
}
| {
return false
} |
v1beta1_event.py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1Event(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'action': 'str',
'api_version': 'str',
'deprecated_count': 'int',
'deprecated_first_timestamp': 'datetime',
'deprecated_last_timestamp': 'datetime',
'deprecated_source': 'V1EventSource',
'event_time': 'datetime',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'note': 'str',
'reason': 'str',
'regarding': 'V1ObjectReference',
'related': 'V1ObjectReference',
'reporting_controller': 'str',
'reporting_instance': 'str',
'series': 'V1beta1EventSeries',
'type': 'str'
}
attribute_map = {
'action': 'action',
'api_version': 'apiVersion',
'deprecated_count': 'deprecatedCount',
'deprecated_first_timestamp': 'deprecatedFirstTimestamp',
'deprecated_last_timestamp': 'deprecatedLastTimestamp',
'deprecated_source': 'deprecatedSource',
'event_time': 'eventTime',
'kind': 'kind',
'metadata': 'metadata',
'note': 'note',
'reason': 'reason',
'regarding': 'regarding',
'related': 'related',
'reporting_controller': 'reportingController',
'reporting_instance': 'reportingInstance',
'series': 'series',
'type': 'type'
}
def __init__(self, action=None, api_version=None, deprecated_count=None, deprecated_first_timestamp=None, deprecated_last_timestamp=None, deprecated_source=None, event_time=None, kind=None, metadata=None, note=None, reason=None, regarding=None, related=None, reporting_controller=None, reporting_instance=None, series=None, type=None):
"""
V1beta1Event - a model defined in Swagger
"""
self._action = None
self._api_version = None
self._deprecated_count = None
self._deprecated_first_timestamp = None
self._deprecated_last_timestamp = None
self._deprecated_source = None
self._event_time = None
self._kind = None
self._metadata = None
self._note = None
self._reason = None
self._regarding = None
self._related = None
self._reporting_controller = None
self._reporting_instance = None
self._series = None
self._type = None
self.discriminator = None
if action is not None:
self.action = action
if api_version is not None:
self.api_version = api_version
if deprecated_count is not None:
self.deprecated_count = deprecated_count
if deprecated_first_timestamp is not None:
self.deprecated_first_timestamp = deprecated_first_timestamp
if deprecated_last_timestamp is not None:
self.deprecated_last_timestamp = deprecated_last_timestamp
if deprecated_source is not None:
self.deprecated_source = deprecated_source
self.event_time = event_time
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if note is not None:
self.note = note
if reason is not None:
self.reason = reason
if regarding is not None:
self.regarding = regarding
if related is not None:
self.related = related
if reporting_controller is not None:
self.reporting_controller = reporting_controller
if reporting_instance is not None:
self.reporting_instance = reporting_instance
if series is not None:
self.series = series
if type is not None:
self.type = type
@property
def action(self):
"""
Gets the action of this V1beta1Event.
What action was taken/failed regarding to the regarding object.
:return: The action of this V1beta1Event.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this V1beta1Event.
What action was taken/failed regarding to the regarding object.
:param action: The action of this V1beta1Event.
:type: str
"""
self._action = action
@property
def api_version(self):
"""
Gets the api_version of this V1beta1Event.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta1Event.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1Event.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1Event.
:type: str
"""
self._api_version = api_version
@property
def deprecated_count(self):
"""
Gets the deprecated_count of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_count of this V1beta1Event.
:rtype: int
"""
return self._deprecated_count
@deprecated_count.setter
def deprecated_count(self, deprecated_count):
"""
Sets the deprecated_count of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_count: The deprecated_count of this V1beta1Event.
:type: int
"""
self._deprecated_count = deprecated_count
@property
def deprecated_first_timestamp(self):
"""
Gets the deprecated_first_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_first_timestamp of this V1beta1Event.
:rtype: datetime
"""
return self._deprecated_first_timestamp
@deprecated_first_timestamp.setter
def deprecated_first_timestamp(self, deprecated_first_timestamp):
"""
Sets the deprecated_first_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_first_timestamp: The deprecated_first_timestamp of this V1beta1Event.
:type: datetime
"""
self._deprecated_first_timestamp = deprecated_first_timestamp
@property
def deprecated_last_timestamp(self):
"""
Gets the deprecated_last_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_last_timestamp of this V1beta1Event.
:rtype: datetime
"""
return self._deprecated_last_timestamp
@deprecated_last_timestamp.setter
def deprecated_last_timestamp(self, deprecated_last_timestamp):
"""
Sets the deprecated_last_timestamp of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_last_timestamp: The deprecated_last_timestamp of this V1beta1Event.
:type: datetime
"""
self._deprecated_last_timestamp = deprecated_last_timestamp
@property
def deprecated_source(self):
"""
Gets the deprecated_source of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:return: The deprecated_source of this V1beta1Event.
:rtype: V1EventSource
"""
return self._deprecated_source
@deprecated_source.setter
def deprecated_source(self, deprecated_source):
"""
Sets the deprecated_source of this V1beta1Event.
Deprecated field assuring backward compatibility with core.v1 Event type
:param deprecated_source: The deprecated_source of this V1beta1Event.
:type: V1EventSource
"""
self._deprecated_source = deprecated_source
@property
def event_time(self):
"""
Gets the event_time of this V1beta1Event.
Required. Time when this Event was first observed.
:return: The event_time of this V1beta1Event.
:rtype: datetime
"""
return self._event_time
@event_time.setter
def event_time(self, event_time):
"""
Sets the event_time of this V1beta1Event.
Required. Time when this Event was first observed.
:param event_time: The event_time of this V1beta1Event.
:type: datetime
"""
if event_time is None:
raise ValueError("Invalid value for `event_time`, must not be `None`")
self._event_time = event_time
@property
def kind(self):
"""
Gets the kind of this V1beta1Event.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1Event.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1Event.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1Event.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1Event.
:return: The metadata of this V1beta1Event.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1Event.
:param metadata: The metadata of this V1beta1Event.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def note(self):
"""
Gets the note of this V1beta1Event.
Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
:return: The note of this V1beta1Event.
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""
Sets the note of this V1beta1Event.
Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.
:param note: The note of this V1beta1Event.
:type: str
"""
self._note = note
@property
def reason(self):
"""
Gets the reason of this V1beta1Event.
Why the action was taken.
:return: The reason of this V1beta1Event.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1beta1Event.
Why the action was taken.
:param reason: The reason of this V1beta1Event.
:type: str
"""
self._reason = reason
@property
def regarding(self):
"""
Gets the regarding of this V1beta1Event.
The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
:return: The regarding of this V1beta1Event.
:rtype: V1ObjectReference
"""
return self._regarding
@regarding.setter
def regarding(self, regarding):
"""
Sets the regarding of this V1beta1Event.
The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.
:param regarding: The regarding of this V1beta1Event.
:type: V1ObjectReference
"""
self._regarding = regarding
@property
def related(self):
"""
Gets the related of this V1beta1Event.
Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
:return: The related of this V1beta1Event.
:rtype: V1ObjectReference
"""
return self._related
@related.setter
def related(self, related):
"""
Sets the related of this V1beta1Event.
Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.
:param related: The related of this V1beta1Event.
:type: V1ObjectReference
"""
self._related = related
@property
def reporting_controller(self):
"""
Gets the reporting_controller of this V1beta1Event.
Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
:return: The reporting_controller of this V1beta1Event.
:rtype: str
"""
return self._reporting_controller
@reporting_controller.setter
def reporting_controller(self, reporting_controller):
"""
Sets the reporting_controller of this V1beta1Event.
Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
:param reporting_controller: The reporting_controller of this V1beta1Event.
:type: str
"""
self._reporting_controller = reporting_controller
@property
def reporting_instance(self):
"""
Gets the reporting_instance of this V1beta1Event.
ID of the controller instance, e.g. `kubelet-xyzf`.
:return: The reporting_instance of this V1beta1Event.
:rtype: str
"""
return self._reporting_instance
@reporting_instance.setter
def reporting_instance(self, reporting_instance):
"""
Sets the reporting_instance of this V1beta1Event.
ID of the controller instance, e.g. `kubelet-xyzf`.
:param reporting_instance: The reporting_instance of this V1beta1Event.
:type: str
"""
self._reporting_instance = reporting_instance
@property
def series(self):
"""
Gets the series of this V1beta1Event.
Data about the Event series this event represents or nil if it's a singleton Event.
:return: The series of this V1beta1Event.
:rtype: V1beta1EventSeries
"""
return self._series
@series.setter
def series(self, series):
"""
Sets the series of this V1beta1Event.
Data about the Event series this event represents or nil if it's a singleton Event.
:param series: The series of this V1beta1Event.
:type: V1beta1EventSeries
"""
self._series = series
@property
def type(self):
"""
Gets the type of this V1beta1Event.
Type of this event (Normal, Warning), new types could be added in the future.
:return: The type of this V1beta1Event.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1beta1Event.
Type of this event (Normal, Warning), new types could be added in the future.
:param type: The type of this V1beta1Event.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def | (self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1Event):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| __eq__ |
actions.go | package main
import (
"bytes"
"regexp"
)
type Action func([]byte) []byte
type Actioner func(...string) (Action, error)
type ActionDef struct {
NArgs int
Actioner Actioner
}
var ActionDefs = map[string]ActionDef{
"g": {1, grep},
"s": {2, sed},
"gs": {2, grepsed},
"r": {2, replace},
}
func withPattern(pattern string, f func(*regexp.Regexp, []byte) []byte) (Action, error) {
rx, err := regexp.Compile(pattern)
if err != nil {
return nil, err
}
return func(b []byte) []byte {
return f(rx, b)
}, nil
}
func grep(args ...string) (Action, error) {
return withPattern(args[0], func(rx *regexp.Regexp, b []byte) []byte {
matches := rx.FindAll(b, -1)
if len(matches) > 0 {
matches = append(matches, []byte{})
}
return bytes.Join(matches, []byte{'\n'})
})
}
func sed(args ...string) (Action, error) {
replacement := []byte(args[1])
return withPattern(args[0], func(rx *regexp.Regexp, b []byte) []byte {
return rx.ReplaceAll(b, replacement)
})
}
func grepsed(args ...string) (Action, error) {
replacement := []byte(args[1] + "\n")
return withPattern(args[0], func(rx *regexp.Regexp, b []byte) []byte {
var output []byte
for _, match := range rx.FindAllSubmatchIndex(b, -1) {
output = rx.Expand(output, replacement, b, match)
}
return output
})
}
func replace(args ...string) (Action, error) { | old, new := []byte(args[0]), []byte(args[1])
return func(b []byte) []byte {
return bytes.Replace(b, old, new, -1)
}, nil
} | |
OtherUsers.js | import React, { useEffect, useState } from "react";
import axios from "axios";
const OtherUsers = () => {
const [userData, setUserData] = useState([]);
useEffect(() => {
axios
.get("https://wunderlistdb.herokuapp.com/api/auth/users")
.then(res => setUserData(res.data))
.catch(err => console.log(err));
}, []);
return (
<div className="other-users">
<span>Sign up to join users </span>
{userData.map((user, index) => {
let userSpanContent = user.username[0].toUpperCase() + user.username.slice(1);
if (index < 3) {
index === 2 ? userSpanContent += " " : userSpanContent += ", ";
return <span key={user.id} className="other-user">{userSpanContent}</span>
}
return null;
})}
<span> and many more!</span> | </div>
)
}
export default OtherUsers; | |
doi_10_1093_nar_gks1019.py | import pandas as pd
from datanator.util import rna_halflife_util
import datetime
import datanator.config.core
import datetime
from pymongo.collation import Collation, CollationStrength
class Halflife(rna_halflife_util.RnaHLUtil):
def __init__(self, cache_dir=None, server=None, src_db=None, protein_col=None,
authDB=None, readPreference=None, username=None, password=None,
verbose=None, max_entries=None, des_db=None, rna_col=None):
"""Init
Args:
cache_dir (:obj:`str`, optional): Cache directory for logs. Defaults to None.
server (:obj:`str`, optional): MongoDB server address. Defaults to None.
db (:obj:`str`, optional): Database where initial uniprot collection resides. Defaults to None.
collection_str (:obj:`str`, optional): name of collection. Defaults to None.
authDB (:obj:`str`, optional): MongoDB authentication database. Defaults to None.
readPreference (:obj:`str`, optional): MongoDB read preference. Defaults to None.
username (:obj:`str`, optional): MongoDB username. Defaults to None.
password (:obj:`str`, optional): MongoDB password. Defaults to None.
verbose (:obj:`bool`, optional): Wheter to display verbose messages. Defaults to None.
max_entries (:obj:`int`, optional): Number of records to be processed. Defaults to None.
uniprot_col_db (:obj:`int`, optional): Database to which new uniprot records will be inserted. Defaults to None.
"""
super().__init__(server=server, username=username, password=password, src_db=src_db,
des_db=des_db, protein_col=protein_col, rna_col=rna_col, authDB=authDB, readPreference=readPreference,
max_entries=max_entries, verbose=verbose)
self.collation = Collation('en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
def fill_uniprot(self, url, sheet_name, usercols='B:D', skiprows=[0,1,2],
insertion=True):
"""Fill uniprot colleciton with ordered_locus_name
from excel sheet
Args:
url (:obj:`str`): URL for Excel sheet.
sheet_name (:obj:`str`): sheet name within Excel.
usecols (:obj:`int` or :obj:`list` or :obj:`str`): Return a subset of the columns.
skiprows (:obj:`list`): rows to skip (0-indexed)
insertion (:obj:`bool`): whether to insert new records to uniprot collection.
Return:
(:obj:`pandas.DataFrame`): Dataframe
"""
df = self.make_df(url, sheet_name, usecols=usercols, skiprows=skiprows,
names=['ordered_locus_name', 'half_life', 'r_squared'])
row_count = len(df.index)
if insertion:
for index, row in df.iterrows():
if index == self.max_entries:
break
if index % 10 == 0 and self.verbose:
print("Inserting locus {} out of {} into uniprot collection.".format(index, row_count))
oln = row['ordered_locus_name']
self.fill_uniprot_by_oln(oln)
return df
def fill_rna_halflife(self, df, species):
|
def main():
src_db = 'datanator'
des_db = 'datanator'
rna_col = 'rna_halflife'
protein_col = 'uniprot'
username = datanator.config.core.get_config()[
'datanator']['mongodb']['user']
password = datanator.config.core.get_config(
)['datanator']['mongodb']['password']
server = datanator.config.core.get_config(
)['datanator']['mongodb']['server']
src = Halflife(server=server, src_db=src_db,
protein_col=protein_col, authDB='admin', readPreference='nearest',
username=username, password=password, verbose=True, max_entries=float('inf'),
des_db=des_db, rna_col=rna_col)
url = 'https://oup.silverchair-cdn.com/oup/backfile/Content_public/Journal/nar/41/1/10.1093/nar/gks1019/2/gks1019-nar-00676-a-2012-File003.xlsx?Expires=1578425844&Signature=ZRFUxLdn4-vaBt5gQci~0o56KqyR9nJj9i32ig5X6YcfqiJeV3obEq8leHGdDxx6w~KABgewiQ66HTB7gmuG~2GL-YgxPKYSjt17WrYMkc-0ibw6TMlTvWZZfvw-lPe~wvpmVfNEXnTbP7jHyNLu9jeJ6yhoXvgIyQtzA5PbEI1fyXEgeZzOKMltmITqL3g3APsPsagCTC66rwrBT23Aghh6D314uilT2DZHCc68MH2nyV~qAhFqIQiOj-7VTEKqkDPvPYvuE2KNKXdvW23gk100YV~58ozbt8ijRz5Gr5gPtE~f1Ab5l260EIbWHJNabMRleInJQqUIDPFN4C38PQ__&Key-Pair-Id=APKAIE5G5CRDK6RD3PGA'
# df = src.fill_uniprot(url, 'Supplementary Table 1', insertion=False)
# src.fill_rna_halflife(df, ['Mycobacterium tuberculosis H37Rv', 83332])
df = src.fill_uniprot(url, 'Supplementary Table 2', skiprows=list(range(0,6)))
src.fill_rna_halflife(df, ['Mycolicibacterium smegmatis MC2 155', 246196])
if __name__ == '__main__':
main() | """load data into rna_halflife collection
Args:
df (:obj:`pandas.DataFrame`): dataframe to be loaded into the database
species (:obj:`list`): species name and ncbi_id
"""
row_count = len(df.index)
for i, row in df.iterrows():
if i == self.max_entries:
break
if i % 10 == 0 and self.verbose:
print("Processing locus {} out {}".format(i, row_count))
halflives = {}
oln = row['ordered_locus_name']
halflives['halflife'] = row['half_life'] * 60
halflives['r_squared'] = row['r_squared']
halflives['unit'] = 's'
halflives['reference'] = [{'doi': '10.1093/nar/gks1019', 'pubmed_id': '23125364'}]
halflives['growth_medium'] = 'Middlebrook 7H9 with the ADC supplement (Difco) and 0.05% Tween80, at 37 degree celcius.'
halflives['ordered_locus_name'] = oln
halflives['species'] = species[0]
halflives['ncbi_taxonomy_id'] = species[1]
gene_name, protein_name = self.uniprot_query_manager.get_gene_protein_name_by_oln(oln)
if gene_name is not None: # record exists in uniprot collection with gene_name
self.rna_hl_collection.update_one({'gene_name': gene_name},
{'$set': {'modified': datetime.datetime.utcnow()},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
elif (gene_name is None and protein_name is not None and
protein_name != 'Uncharacterized protein'): # record exists in uniprot collection with non-filler protein_name
self.rna_hl_collection.update_one({'protein_name': protein_name},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
query = {'halflives.ordered_locus_name': oln}
doc = self.rna_hl_collection.find_one(filter=query, collation=self.collation)
if doc is not None:
self.rna_hl_collection.update_one({'halflives.ordered_locus_name': oln},
{'$set': {'modified': datetime.datetime.utcnow(),
'gene_name': gene_name},
'$addToSet': {'halflives': halflives,
'protein_synonyms': protein_name}},
collation=self.collation, upsert=True)
else:
doc = {'halflives': [halflives], 'modified': datetime.datetime.utcnow(),
'gene_name': gene_name, 'protein_name': protein_name}
self.rna_hl_collection.insert_one(doc) |
solution121.py | class Solution:
|
if __name__ == "__main__":
print(Solution().maxProfit([7, 1, 5, 3, 6, 4]))
print(Solution().maxProfit([7, 6, 4, 3, 1]))
| def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) == 0:
return 0
min_price = prices[0]
result = 0
for price in prices[1:]:
result = max(price - min_price, result)
min_price = min(price, min_price)
return result |
actions.js | /* eslint-disable no-undef */
/* eslint-disable no-unused-vars */
/* eslint-disable no-console */
import React, { useState, useEffect } from "react";
import axios from "axios";
import { getUserToken } from "../../../utils/authUtils";
import { AuthUrls } from "../../../constants/urls";
// get list of employees of current user (the manager)
export function retrieveEmployers(setEmployerList) {
axios
// .get("http://localhost:8000/api/employers/", {
.get(AuthUrls.API_EMPLOYERS, {
headers: {
authorization: "Token " + getUserToken(),
},
})
.then((response) => {
setEmployerList(response.data);
})
.catch((error) => {
console.log(error);
console.log(error.response);
});
}
// delete employer
export function deleteEmployer(employer, onClose) {
if (employer.id) {
axios
// .delete(`http://localhost:8000/api/employers/${employer.id}/`, employer, {
.delete(AuthUrls.API_EMPLOYERS + employer.id + "/", employer, {
headers: {
authorization: "Token " + getUserToken(),
},
})
.then((response) => {
onClose();
});
return;
}
}
export function updateEmployer(employer, onSave) {
if (employer.id) {
axios
// .put(`http://localhost:8000/api/employers/${employer.id}/`, employer, {
.put(AuthUrls.API_EMPLOYERS + employer.id + "/", employer, {
headers: {
authorization: "Token " + getUserToken(),
},
})
.then((response) => {
onSave();
})
.catch((error) => {
console.log(error.response);
});
return;
}
}
export function | (
employer,
onClose,
setEmailError,
setPasswordError
) {
axios
.post(AuthUrls.API_EMPLOYERS, employer, {
headers: {
authorization: "Token " + getUserToken(),
},
})
.then((response) => {
console.log("handle response");
console.log(response.data);
onClose();
})
.catch((error) => {
const response = error.response;
console.log(response.data);
if (response.data.email) {
setEmailError(response.data.email);
} else {
setEmailError(null);
}
if (response.data.password) {
setPasswordError(response.data.password);
} else {
setPasswordError(null);
}
});
}
| submitEmployer |
ex096.py | def area(c, la):
|
# Programa principal
print(f'{"Controle de Terrenos" :^30}\n'
f'{"-" * 30}')
comp = float(input('Comprimento (m)): '))
larg = float(input('Largura (m): '))
area(comp, larg)
| print(f'A area de um terreno {c :.2f}m x {la :.2f}m é de {c * la :.2f}m².')
|
test_postgresql_psycopg2.py | # -*- coding: utf8 -*-
from __future__ import unicode_literals
import locale
import os
from django.db.backends.postgresql.client import DatabaseClient
from django.test import SimpleTestCase, mock
from django.utils import six
from django.utils.encoding import force_bytes, force_str
class PostgreSqlDbshellCommandTestCase(SimpleTestCase):
def _run_it(self, dbinfo):
"""
That function invokes the runshell command, while mocking
subprocess.call. It returns a 2-tuple with:
- The command line list
- The binary content of file pointed by environment PGPASSFILE, or
None.
"""
def _mock_subprocess_call(*args):
self.subprocess_args = list(*args)
if 'PGPASSFILE' in os.environ:
with open(os.environ['PGPASSFILE'], 'rb') as f:
self.pgpass = f.read().strip() # ignore line endings
else:
self.pgpass = None
return 0
self.subprocess_args = None
self.pgpass = None
with mock.patch('subprocess.call', new=_mock_subprocess_call):
DatabaseClient.runshell_db(dbinfo)
return self.subprocess_args, self.pgpass
def test_basic(self):
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'someuser',
'PASSWORD': 'somepassword',
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
b'somehost:444:dbname:someuser:somepassword',
)
)
def test_nopass(self):
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'someuser',
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', 'someuser', '-h', 'somehost', '-p', '444', 'dbname'],
None,
)
)
def test_column(self):
|
def test_escape_characters(self):
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'some\\user',
'PASSWORD': 'some\\password',
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', 'some\\user', '-h', 'somehost', '-p', '444', 'dbname'],
b'somehost:444:dbname:some\\\\user:some\\\\password',
)
)
def test_accent(self):
# The pgpass temporary file needs to be encoded using the system locale.
encoding = locale.getpreferredencoding()
username = 'rôle'
password = 'sésame'
try:
username_str = force_str(username, encoding)
password_str = force_str(password, encoding)
pgpass_bytes = force_bytes(
'somehost:444:dbname:%s:%s' % (username, password),
encoding=encoding,
)
except UnicodeEncodeError:
if six.PY2:
self.skipTest("Your locale can't run this test.")
self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': username_str,
'PASSWORD': password_str,
'HOST': 'somehost',
'PORT': 444,
}), (
['psql', '-U', username_str, '-h', 'somehost', '-p', '444', 'dbname'],
pgpass_bytes,
)
)
| self.assertEqual(
self._run_it({
'NAME': 'dbname',
'USER': 'some:user',
'PASSWORD': 'some:password',
'HOST': '::1',
'PORT': 444,
}), (
['psql', '-U', 'some:user', '-h', '::1', '-p', '444', 'dbname'],
b'\\:\\:1:444:dbname:some\\:user:some\\:password',
)
) |
asyncore.py | # -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <[email protected]>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import warnings
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
errorcode
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
try:
socket_map
except NameError:
socket_map = {}
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" % err
class ExitNow(Exception):
pass
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
def read(obj):
try:
obj.handle_read_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def _exception(obj):
try:
obj.handle_expt_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_expt_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []
w = []
e = []
for fd, obj in map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout * 1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
if obj.writable():
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=False, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self.del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__ + "." + self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock.fileno()
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.addr = address
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as why:
if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error as why:
if why.args[0] == EWOULDBLOCK:
return 0
elif why.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error as why:
# winsock sometimes throws ENOTCONN
if why.args[0] in _DISCONNECTED:
self.handle_close()
return ''
else:
raise
def close(self):
self.connected = False
self.accepting = False
self.del_channel()
try:
self.socket.close()
except socket.error as why:
if why.args[0] not in (ENOTCONN, EBADF):
raise
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
# def __getattr__(self, attr):
# try:
# print "attr:%s"%attr
# retattr = getattr(self.socket, attr)
# except AttributeError:
# raise AttributeError("%s instance has no attribute '%s'"
# %(self.__class__.__name__, attr))
# else:
# msg = "%(me)s.%(attr)s is deprecated. Use %(me)s.socket.%(attr)s " \
# "instead." % {'me': self.__class__.__name__, 'attr':attr}
# warnings.warn(msg, DeprecationWarning, stacklevel=2)
# return retattr
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if type not in self.ignore_log_types:
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
# check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
# handle_expt_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.handle_close()
def handle_expt(self):
self.log_info('unhandled incoming priority event', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
if not tb: # Must have a traceback
raise AssertionError("traceback does not exist")
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None, ignore_all=False):
if map is None:
map = socket_map
for x in map.values():
try:
x.close()
except OSError as x:
if x.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _reraised_exceptions:
raise
except:
if not ignore_all:
raise
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# Here we override just enough to make a file
# look like a socket for the purposes of asyncore.
# The passed fd is automatically os.dup()'d
def __init__(self, fd):
self.fd = os.dup(fd)
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
def getsockopt(self, level, optname, buflen=None):
if (level == socket.SOL_SOCKET and
optname == socket.SO_ERROR and
not buflen):
return 0
raise NotImplementedError("Only asyncore specific behaviour "
"implemented.")
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class | (dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
try:
fd = fd.fileno()
except AttributeError:
pass
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self.socket = file_wrapper(fd)
self._fileno = self.socket.fileno()
self.add_channel()
| file_dispatcher |
test_summary_collector.py | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test SummaryCollector."""
import os
import re
import shutil
import tempfile
from collections import Counter
import pytest
from mindspore import nn, Tensor, context
from mindspore.common.initializer import Normal
from mindspore.nn.metrics import Loss
from mindspore.nn.optim import Momentum
from mindspore.ops import operations as P
from mindspore.train import Model
from mindspore.train.callback import SummaryCollector
from tests.st.summary.dataset import create_mnist_dataset
from tests.summary_utils import SummaryReader
from tests.security_utils import security_off_wrap
class LeNet5(nn.Cell):
"""
Lenet network
Args:
num_class (int): Number of classes. Default: 10.
num_channel (int): Number of channels. Default: 1.
Returns:
| Examples:
>>> LeNet(num_class=10)
"""
def __init__(self, num_class=10, num_channel=1, include_top=True):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
self.include_top = include_top
if self.include_top:
self.flatten = nn.Flatten()
self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))
self.scalar_summary = P.ScalarSummary()
self.image_summary = P.ImageSummary()
self.histogram_summary = P.HistogramSummary()
self.tensor_summary = P.TensorSummary()
self.channel = Tensor(num_channel)
def construct(self, x):
"""construct."""
self.image_summary('image', x)
x = self.conv1(x)
self.histogram_summary('histogram', x)
x = self.relu(x)
self.tensor_summary('tensor', x)
x = self.relu(x)
x = self.max_pool2d(x)
self.scalar_summary('scalar', self.channel)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool2d(x)
if not self.include_top:
return x
x = self.flatten(x)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
class TestSummary:
"""Test summary collector the basic function."""
base_summary_dir = ''
@classmethod
def setup_class(cls):
"""Run before test this class."""
device_id = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0
context.set_context(mode=context.GRAPH_MODE, device_id=device_id)
cls.base_summary_dir = tempfile.mkdtemp(suffix='summary')
@classmethod
def teardown_class(cls):
"""Run after test this class."""
if os.path.exists(cls.base_summary_dir):
shutil.rmtree(cls.base_summary_dir)
def _run_network(self, dataset_sink_mode=False, num_samples=2, **kwargs):
"""run network."""
lenet = LeNet5()
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
optim = Momentum(lenet.trainable_params(), learning_rate=0.1, momentum=0.9)
model = Model(lenet, loss_fn=loss, optimizer=optim, metrics={'loss': Loss()})
summary_dir = tempfile.mkdtemp(dir=self.base_summary_dir)
summary_collector = SummaryCollector(summary_dir=summary_dir, collect_freq=2, **kwargs)
ds_train = create_mnist_dataset("train", num_samples=num_samples)
model.train(1, ds_train, callbacks=[summary_collector], dataset_sink_mode=dataset_sink_mode)
ds_eval = create_mnist_dataset("test")
model.eval(ds_eval, dataset_sink_mode=dataset_sink_mode, callbacks=[summary_collector])
return summary_dir
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@security_off_wrap
def test_summary_with_sink_mode_false(self):
"""Test summary with sink mode false, and num samples is 64."""
summary_dir = self._run_network(num_samples=10)
tag_list = self._list_summary_tags(summary_dir)
expected_tag_set = {'conv1.weight/auto', 'conv2.weight/auto', 'fc1.weight/auto', 'fc1.bias/auto',
'fc2.weight/auto', 'input_data/auto', 'loss/auto',
'histogram', 'image', 'scalar', 'tensor'}
assert set(expected_tag_set) == set(tag_list)
# num samples is 10, batch size is 2, so step is 5, collect freq is 2,
# SummaryCollector will collect the first step and 2th, 4th step
tag_count = 3
for value in Counter(tag_list).values():
assert value == tag_count
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@security_off_wrap
def test_summary_with_sink_mode_true(self):
"""Test summary with sink mode true, and num samples is 64."""
summary_dir = self._run_network(dataset_sink_mode=True, num_samples=10)
tag_list = self._list_summary_tags(summary_dir)
# There will not record input data when dataset sink mode is True
expected_tags = {'conv1.weight/auto', 'conv2.weight/auto', 'fc1.weight/auto', 'fc1.bias/auto',
'fc2.weight/auto', 'loss/auto', 'histogram', 'image', 'scalar', 'tensor'}
assert set(expected_tags) == set(tag_list)
tag_count = 1
for value in Counter(tag_list).values():
assert value == tag_count
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
@security_off_wrap
def test_summarycollector_user_defind(self):
"""Test SummaryCollector with user-defined."""
summary_dir = self._run_network(dataset_sink_mode=True, num_samples=2,
custom_lineage_data={'test': 'self test'},
export_options={'tensor_format': 'npy'})
tag_list = self._list_summary_tags(summary_dir)
file_list = self._list_tensor_files(summary_dir)
# There will not record input data when dataset sink mode is True
expected_tags = {'conv1.weight/auto', 'conv2.weight/auto', 'fc1.weight/auto', 'fc1.bias/auto',
'fc2.weight/auto', 'loss/auto', 'histogram', 'image', 'scalar', 'tensor'}
assert set(expected_tags) == set(tag_list)
expected_files = {'tensor_1.npy'}
assert set(expected_files) == set(file_list)
@staticmethod
def _list_summary_tags(summary_dir):
"""list summary tags."""
summary_file_path = ''
for file in os.listdir(summary_dir):
if re.search("_MS", file):
summary_file_path = os.path.join(summary_dir, file)
break
assert summary_file_path
tags = list()
with SummaryReader(summary_file_path) as summary_reader:
while True:
summary_event = summary_reader.read_event()
if not summary_event:
break
for value in summary_event.summary.value:
tags.append(value.tag)
return tags
@staticmethod
def _list_tensor_files(summary_dir):
"""list tensor tags."""
export_file_path = ''
for file in os.listdir(summary_dir):
if re.search("export_", file):
export_file_path = os.path.join(summary_dir, file)
break
assert export_file_path
tensor_file_path = os.path.join(export_file_path, "tensor")
assert tensor_file_path
tensors = list()
for file in os.listdir(tensor_file_path):
tensors.append(file)
return tensors | Tensor, output tensor
|
signal.go | //go:build linux || darwin || freebsd
// +build linux darwin freebsd
package kcpclient
import (
"log"
"os"
"os/signal"
"syscall"
kcp "github.com/xtaci/kcp-go/v5"
)
func | () {
go sigHandler()
}
func sigHandler() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGUSR1)
signal.Ignore(syscall.SIGPIPE)
for {
switch <-ch {
case syscall.SIGUSR1:
log.Printf("KCP SNMP:%+v", kcp.DefaultSnmp.Copy())
}
}
}
| init |
issue-79099.rs | struct Bug {
V1: [(); {
let f: impl core::future::Future<Output = u8> = async { 1 };
//~^ `impl Trait` not allowed outside of function and method return types
//~| expected identifier
1
}], | }
fn main() {} | |
test_gini.py | import torch
from torch_geometric.nn.functional import gini
def test_gini():
| w = torch.tensor(
[
[0., 0., 0., 0.],
[0., 0., 0., 1000.0]
]
)
assert torch.isclose(gini(w), torch.tensor(0.5)) |
|
pages_tests.py | # -*- coding: utf-8 -*-
"""Django page CMS test suite module"""
import django
from django.conf import settings
from django.test.client import Client
from django.template import Template, RequestContext, TemplateDoesNotExist
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from pages.models import Page, Content, PageAlias
from pages.tests.testcase import TestCase
class PagesTestCase(TestCase):
"""Django page CMS test suite class"""
def test_01_add_page(self):
"""Test that the add admin page could be displayed via the
admin"""
c = Client()
c.login(username= 'batiste', password='b')
response = c.get('/admin/pages/page/add/')
self.assertEqual(response.status_code, 200)
def test_02_create_page(self):
"""Test that a page can be created via the admin."""
#setattr(settings, "SITE_ID", 2)
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
slug_content = Content.objects.get_content_slug_by_slug(
page_data['slug']
)
assert(slug_content is not None)
page = slug_content.page
self.assertEqual(page.title(), page_data['title'])
self.assertEqual(page.slug(), page_data['slug'])
self.assertNotEqual(page.last_modification_date, None)
def test_03_slug_collision(self):
"""Test a slug collision."""
setattr(settings, "PAGE_UNIQUE_SLUG_REQUIRED", True)
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
setattr(settings, "PAGE_UNIQUE_SLUG_REQUIRED", False)
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(response.status_code, 200)
page1 = Content.objects.get_content_slug_by_slug(page_data['slug']).page
page_data['position'] = 'first-child'
page_data['target'] = page1.id
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
page2 = Content.objects.get_content_slug_by_slug(page_data['slug']).page
self.assertNotEqual(page1.id, page2.id)
def test_04_details_view(self):
"""Test the details view"""
c = Client()
c.login(username= 'batiste', password='b')
try:
response = c.get('/pages/')
except TemplateDoesNotExist, e:
if e.args != ('404.html',):
raise
page_data = self.get_new_page_data()
page_data['status'] = Page.DRAFT
response = c.post('/admin/pages/page/add/', page_data)
try:
response = c.get('/pages/')
except TemplateDoesNotExist, e:
if e.args != ('404.html',):
raise
page_data = self.get_new_page_data()
page_data['status'] = Page.PUBLISHED
page_data['slug'] = 'test-page-2'
page_data['template'] = 'pages/index.html'
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
response = c.get('/pages/test-page-2/')
self.assertEqual(response.status_code, 200)
def test_05_edit_page(self):
"""Test that a page can edited via the admin"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
page = Page.objects.all()[0]
response = c.get('/admin/pages/page/%d/' % page.id)
self.assertEqual(response.status_code, 200)
page_data['title'] = 'changed title'
page_data['body'] = 'changed body'
response = c.post('/admin/pages/page/%d/' % page.id, page_data)
self.assertRedirects(response, '/admin/pages/page/')
page = Page.objects.get(id=page.id)
self.assertEqual(page.title(), 'changed title')
body = Content.objects.get_content(page, 'en-us', 'body')
self.assertEqual(body, 'changed body')
def test_06_site_framework(self):
"""Test the site framework, and test if it's possible to
disable it"""
# this is necessary to make the test pass
from pages import settings as pages_settings
setattr(pages_settings, "SITE_ID", 2)
setattr(pages_settings, "PAGE_USE_SITE_ID", True)
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data["sites"] = [2]
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
page = Content.objects.get_content_slug_by_slug(page_data['slug']).page
self.assertEqual(page.sites.count(), 1)
self.assertEqual(page.sites.all()[0].id, 2)
page_data = self.get_new_page_data()
page_data["sites"] = [3]
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
# we cannot get a slug that doesn't exist
content = Content.objects.get_content_slug_by_slug("this doesn't exist")
self.assertEqual(content, None)
# we cannot get the data posted on another site
content = Content.objects.get_content_slug_by_slug(page_data['slug'])
self.assertEqual(content, None)
setattr(pages_settings, "SITE_ID", 3)
page = Content.objects.get_content_slug_by_slug(page_data['slug']).page
self.assertEqual(page.sites.count(), 1)
self.assertEqual(page.sites.all()[0].id, 3)
| # with param
self.assertEqual(Page.objects.on_site(2).count(), 1)
self.assertEqual(Page.objects.on_site(3).count(), 1)
# without param
self.assertEqual(Page.objects.on_site().count(), 1)
setattr(pages_settings, "SITE_ID", 2)
self.assertEqual(Page.objects.on_site().count(), 1)
page_data = self.get_new_page_data()
page_data["sites"] = [2, 3]
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
self.assertEqual(Page.objects.on_site(3).count(), 2)
self.assertEqual(Page.objects.on_site(2).count(), 2)
self.assertEqual(Page.objects.on_site().count(), 2)
setattr(pages_settings, "PAGE_USE_SITE_ID", False)
# we should get everything
self.assertEqual(Page.objects.on_site().count(), 3)
def test_07_languages(self):
"""Test post a page with different languages
and test that the admin views works correctly."""
c = Client()
user = c.login(username= 'batiste', password='b')
# test that the client language setting is used in add page admin
c.cookies["django_language"] = 'de'
response = c.get('/admin/pages/page/add/')
self.assertContains(response, 'value="de" selected="selected"')
c.cookies["django_language"] = 'fr-ch'
response = c.get('/admin/pages/page/add/')
self.assertContains(response, 'value="fr-ch" selected="selected"')
page_data = self.get_new_page_data()
page_data["title"] = 'english title'
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
page = Page.objects.all()[0]
self.assertEqual(page.get_languages(), ['en-us'])
# this test only works in version superior of 1.0.2
django_version = django.get_version().rsplit()[0].split('.')
if len(django_version) > 2:
major, middle, minor = [int(v) for v in django_version]
else:
major, middle = [int(v) for v in django_version]
if major >= 1 and middle > 0:
response = c.get('/admin/pages/page/%d/?language=de' % page.id)
self.assertContains(response, 'value="de" selected="selected"')
# add a french version of the same page
page_data["language"] = 'fr-ch'
page_data["title"] = 'french title'
response = c.post('/admin/pages/page/%d/' % page.id, page_data)
self.assertRedirects(response, '/admin/pages/page/')
#setattr(settings, "PAGE_DEFAULT_LANGUAGE", 'en-us')
# test that the frontend view use the good parameters
# I cannot find a way of setting the accept-language HTTP
# header so I used django_language cookie instead
c = Client()
c.cookies["django_language"] = 'en-us'
response = c.get('/pages/')
self.assertContains(response, 'english title')
self.assertContains(response, 'lang="en-us"')
self.assertNotContains(response, 'french title')
c = Client()
c.cookies["django_language"] = 'fr-ch'
response = c.get('/pages/')
self.assertContains(response, 'french title')
self.assertContains(response, 'lang="fr-ch"')
self.assertNotContains(response, 'english title')
# this should be mapped to the fr-ch content
c = Client()
c.cookies["django_language"] = 'fr-fr'
response = c.get('/pages/')
self.assertContains(response, 'french title')
self.assertContains(response, 'lang="fr-ch"')
def test_08_revision(self):
"""Test that a page can edited several times."""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
response = c.post('/admin/pages/page/add/', page_data)
page = Page.objects.all()[0]
page_data['body'] = 'changed body'
response = c.post('/admin/pages/page/%d/' % page.id, page_data)
self.assertEqual(Content.objects.get_content(page, 'en-us', 'body'), 'changed body')
page_data['body'] = 'changed body 2'
response = c.post('/admin/pages/page/%d/' % page.id, page_data)
self.assertEqual(Content.objects.get_content(page, 'en-us', 'body'), 'changed body 2')
response = c.get('/pages/')
self.assertContains(response, 'changed body 2', 1)
setattr(settings, "PAGE_CONTENT_REVISION", False)
self.assertEqual(Content.objects.get_content(page, 'en-us', 'body'), 'changed body 2')
def test_09_placeholder(self):
"""
Test that the placeholder is correctly displayed in
the admin
"""
setattr(settings, "SITE_ID", 2)
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data['template'] = 'pages/nice.html'
response = c.post('/admin/pages/page/add/', page_data)
page = Page.objects.all()[0]
response = c.get('/admin/pages/page/%d/' % page.id)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'name="right-column"', 1)
def test_10_directory_slug(self):
"""
Test diretory slugs
"""
setattr(settings, "PAGE_UNIQUE_SLUG_REQUIRED", False)
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data['title'] = 'parent title'
page_data['slug'] = 'same-slug'
response = c.post('/admin/pages/page/add/', page_data)
# the redirect tell that the page has been create correctly
self.assertRedirects(response, '/admin/pages/page/')
response = c.get('/pages/same-slug/')
self.assertEqual(response.status_code, 200)
page = Page.objects.all()[0]
response = c.post('/admin/pages/page/add/', page_data)
# we cannot create 2 root page with the same slug
# this assert test that the creation fails as wanted
self.assertEqual(response.status_code, 200)
page1 = Content.objects.get_content_slug_by_slug(page_data['slug']).page
self.assertEqual(page1.id, page.id)
page_data['title'] = 'children title'
page_data['target'] = page1.id
page_data['position'] = 'first-child'
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
# finaly test that we can get every page according the path
response = c.get('/pages/same-slug')
self.assertContains(response, "parent title", 2)
response = c.get('/pages/same-slug/same-slug')
self.assertContains(response, "children title", 2)
def test_11_show_content_tag(self):
"""
Test the {% show_content %} template tag
"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
response = c.post('/admin/pages/page/add/', page_data)
page = Page.objects.all()[0]
class request:
REQUEST = {'language': 'en'}
GET = {}
context = RequestContext(request, {'page': page, 'lang':'en-us',
'path':'/page-1/'})
template = Template('{% load pages_tags %}'
'{% show_content page "title" "en-us" %}')
self.assertEqual(template.render(context), page_data['title'])
template = Template('{% load pages_tags %}'
'{% show_content page "title" %}')
self.assertEqual(template.render(context), page_data['title'])
def test_12_get_content_tag(self):
"""
Test the {% get_content %} template tag
"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
response = c.post('/admin/pages/page/add/', page_data)
page = Page.objects.all()[0]
class request:
REQUEST = {'language': 'en'}
GET = {}
context = RequestContext(request, {'page': page})
template = Template('{% load pages_tags %}'
'{% get_content page "title" "en-us" as content %}'
'{{ content }}')
self.assertEqual(template.render(context), page_data['title'])
template = Template('{% load pages_tags %}'
'{% get_content page "title" as content %}'
'{{ content }}')
self.assertEqual(template.render(context), page_data['title'])
def test_17_request_mockup(self):
from pages.utils import get_request_mock
request = get_request_mock()
self.assertEqual(hasattr(request, 'session'), True)
def test_18_tree_admin_interface(self):
"""
Test that moving/creating page in the tree is working properly
using the admin interface
"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data['slug'] = 'root'
response = c.post('/admin/pages/page/add/', page_data)
root_page = Content.objects.get_content_slug_by_slug('root').page
self.assertTrue(root_page.is_first_root())
page_data['position'] = 'first-child'
page_data['target'] = root_page.id
page_data['slug'] = 'child-1'
response = c.post('/admin/pages/page/add/', page_data)
child_1 = Content.objects.get_content_slug_by_slug('child-1').page
self.assertFalse(child_1.is_first_root())
page_data['slug'] = 'child-2'
response = c.post('/admin/pages/page/add/', page_data)
child_2 = Content.objects.get_content_slug_by_slug('child-2').page
self.assertEqual(str(Page.objects.all()),
"[<Page: root>, <Page: child-2>, <Page: child-1>]")
# move page 1 in the first position
response = c.post('/admin/pages/page/%d/move-page/' % child_1.id,
{'position':'first-child', 'target':root_page.id})
self.assertEqual(str(Page.objects.all()),
"[<Page: root>, <Page: child-1>, <Page: child-2>]")
# move page 2 in the first position
response = c.post('/admin/pages/page/%d/move-page/' % child_2.id,
{'position': 'left', 'target': child_1.id})
self.assertEqual(str(Page.objects.all()),
"[<Page: root>, <Page: child-2>, <Page: child-1>]")
# try to create a sibling with the same slug, via left, right
from pages import settings as pages_settings
setattr(pages_settings, "PAGE_UNIQUE_SLUG_REQUIRED", False)
page_data['target'] = child_2.id
page_data['position'] = 'left'
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(response.status_code, 200)
# try to create a sibling with the same slug, via first-child
page_data['target'] = root_page.id
page_data['position'] = 'first-child'
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(response.status_code, 200)
# try to create a second page 2 in root
del page_data['target']
del page_data['position']
setattr(pages_settings, "PAGE_UNIQUE_SLUG_REQUIRED", True)
# cannot create because slug exists
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(response.status_code, 200)
# Now it should work beause the page is not a sibling
setattr(pages_settings, "PAGE_UNIQUE_SLUG_REQUIRED", False)
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 4)
# Should not work because we already have sibling at the same level
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(response.status_code, 200)
# try to change the page 2 slug into page 1
page_data['slug'] = 'child-1'
response = c.post('/admin/pages/page/%d/' % child_2.id, page_data)
self.assertEqual(response.status_code, 200)
setattr(pages_settings, "PAGE_UNIQUE_SLUG_REQUIRED", True)
response = c.post('/admin/pages/page/%d/' % child_2.id, page_data)
self.assertEqual(response.status_code, 200)
def test_19_tree(self):
"""
Test that the navigation tree works properly with mptt
"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data['slug'] = 'page1'
response = c.post('/admin/pages/page/add/', page_data)
page_data['slug'] = 'page2'
response = c.post('/admin/pages/page/add/', page_data)
page_data['slug'] = 'page3'
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(str(Page.objects.navigation()),
"[<Page: page1>, <Page: page2>, <Page: page3>]")
p1 = Content.objects.get_content_slug_by_slug('page1').page
p2 = Content.objects.get_content_slug_by_slug('page2').page
p3 = Content.objects.get_content_slug_by_slug('page3').page
p2.move_to(p1, 'left')
p2.save()
self.assertEqual(str(Page.objects.navigation()),
"[<Page: page2>, <Page: page1>, <Page: page3>]")
p3.move_to(p2, 'left')
p3.save()
self.assertEqual(str(Page.objects.navigation()),
"[<Page: page3>, <Page: page2>, <Page: page1>]")
p1 = Content.objects.get_content_slug_by_slug('page1').page
p2 = Content.objects.get_content_slug_by_slug('page2').page
p3 = Content.objects.get_content_slug_by_slug('page3').page
p3.move_to(p1, 'first-child')
p2.move_to(p1, 'first-child')
self.assertEqual(str(Page.objects.navigation()),
"[<Page: page1>]")
p3 = Content.objects.get_content_slug_by_slug('page3').page
p3.move_to(p1, 'left')
self.assertEqual(str(Page.objects.navigation()),
"[<Page: page3>, <Page: page1>]")
def test_20_ajax_language(self):
"""Test that language is working properly"""
c = Client()
c.login(username= 'batiste', password='b')
# Activate a language other than settings.LANGUAGE_CODE
response = c.post('/i18n/setlang/', {'language':'fr-ch' })
self.assertEqual(c.session.get('django_language', False), 'fr-ch')
# Make sure we're in french
response = c.get('/admin/pages/page/')
self.assertEqual(response.status_code, 200)
self.assertTrue('Auteur' in response.content)
# Create some pages (taken from test_18_tree_admin_interface)
page_data = self.get_new_page_data()
page_data['slug'] = 'root'
response = c.post('/admin/pages/page/add/', page_data)
root_page = Content.objects.get_content_slug_by_slug('root').page
page_data['position'] = 'first-child'
page_data['target'] = root_page.id
page_data['slug'] = 'child-1'
response = c.post('/admin/pages/page/add/', page_data)
child_1 = Content.objects.get_content_slug_by_slug('child-1').page
page_data['slug'] = 'child-2'
response = c.post('/admin/pages/page/add/', page_data)
child_2 = Content.objects.get_content_slug_by_slug('child-2').page
self.assertEqual(str(Page.objects.all()),
"[<Page: root>, <Page: child-2>, <Page: child-1>]")
"""
The relevant bit, fixed by rev 501: the response issued by a move
command returns content localized in settings.LANGUAGE_CODE (i.e. 'en´)
even though the original AJAX request passed in a the correct
session ID localizing this client as fr-ch
This is probably because the LocaleMiddleware gets instantiated
with a couple request_mocks which have no real connection to the
AJAX request *but* django.utils.translation caches the active
language on a per thread basis.
This means that the first "bogus" call to LocaleMiddleware.process_request
will "kill" the localization data for the AJAX request.
Rev. 501 fixes this by passing in the language code from the original request.
"""
response = c.post('/admin/pages/page/%d/move-page/' % child_1.id,
{'position':'first-child', 'target':root_page.id})
# Make sure the content response we got was in french
self.assertTrue('Auteur' in response.content)
def test_21_view_context(self):
"""
Test that the default view can only return the context
"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data['slug'] = 'page1'
# create a page for the example otherwise you will get a Http404 error
response = c.post('/admin/pages/page/add/', page_data)
page1 = Content.objects.get_content_slug_by_slug('page1').page
from pages.views import details
from pages.utils import get_request_mock
request = get_request_mock()
context = details(request, only_context=True)
self.assertEqual(context['current_page'], page1)
def test_24_page_valid_targets(self):
"""Test page valid_targets method"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data['slug'] = 'root'
response = c.post('/admin/pages/page/add/', page_data)
root_page = Content.objects.get_content_slug_by_slug('root').page
page_data['position'] = 'first-child'
page_data['target'] = root_page.id
page_data['slug'] = 'child-1'
response = c.post('/admin/pages/page/add/', page_data)
self.assertEqual(response.status_code, 302)
c1 = Content.objects.get_content_slug_by_slug('child-1').page
root_page = Content.objects.get_content_slug_by_slug('root').page
self.assertEqual(len(root_page.valid_targets()), 0)
self.assertEqual(str(c1.valid_targets()),
"[<Page: root>]")
def test_25_page_admin_view(self):
"""Test page admin view"""
c = Client()
c.login(username= 'batiste', password='b')
page_data = self.get_new_page_data()
page_data['slug'] = 'page-1'
response = c.post('/admin/pages/page/add/', page_data)
page = Content.objects.get_content_slug_by_slug('page-1').page
self.assertEqual(page.status, 1)
response = c.post('/admin/pages/page/%d/change-status/' %
page.id, {'status':Page.DRAFT})
page = Content.objects.get_content_slug_by_slug('page-1').page
self.assertEqual(page.status, Page.DRAFT)
url = '/admin/pages/page/%d/modify-content/title/en-us/' % page.id
response = c.post(url, {'content': 'test content'})
self.assertEqual(page.title(), 'test content')
# TODO: realy test these methods
url = '/admin/pages/page/%d/traduction/en-us/' % page.id
response = c.get(url)
self.assertEqual(response.status_code, 200)
url = '/admin/pages/page/%d/sub-menu/' % page.id
response = c.get(url)
self.assertEqual(response.status_code, 200)
url = '/admin/pages/page/%d/get-content/1/' % page.id
response = c.get(url)
self.assertEqual(response.status_code, 200)
def test_26_page_alias(self):
"""Test page aliasing system"""
c = Client()
c.login(username= 'batiste', password='b')
# create some pages
page_data = self.get_new_page_data()
page_data['title'] = 'home-page-title'
page_data['slug'] = 'home-page'
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
page_data['title'] = 'downloads-page-title'
page_data['slug'] = 'downloads-page'
response = c.post('/admin/pages/page/add/', page_data)
self.assertRedirects(response, '/admin/pages/page/')
# create aliases for the pages
page = Page.objects.from_path('home-page', None)
self.assertTrue(page)
p = PageAlias(page=page, url='/index.php')
p.save()
page = Page.objects.from_path('downloads-page', None)
self.assertTrue(page)
p = PageAlias(page=page, url='index.php?page=downloads')
p.save()
# now check whether we can retrieve the pages.
# is the homepage available from is alias
response = c.get('/pages/index.php')
self.assertRedirects(response, '/pages/home-page', 301)
# for the download page, the slug is canonical
response = c.get('/pages/downloads-page/')
self.assertContains(response, "downloads-page-title", 2)
# calling via its alias must cause redirect
response = c.get('/pages/index.php?page=downloads')
self.assertRedirects(response, '/pages/downloads-page', 301)
def test_27_page_redirect_to(self):
"""Test page redirected to an other page."""
client = Client()
client.login(username= 'batiste', password='b')
# create some pages
page1 = self.create_new_page(client)
page2 = self.create_new_page(client)
page1.redirect_to = page2
page1.save()
# now check whether you go to the target page.
response = client.get(page1.get_absolute_url())
self.assertRedirects(response, page2.get_absolute_url(), 301)
def test_28_page_redirect_to_url(self):
"""Test page redirected to external url."""
client = Client()
client.login(username= 'batiste', password='b')
page1 = self.create_new_page(client)
url = 'http://code.google.com/p/django-page-cms/'
page1.redirect_to_url = url
page1.save()
# now check whether we can retrieve the page.
response = client.get(page1.get_absolute_url())
self.assertTrue(response.status_code == 301)
self.assertTrue(response['Location'] == url) | |
test_dumps.py | import json
from decimal import Decimal
from django.db import models
from django.utils.timezone import now
import pytest
from django_unicorn import serializer
from django_unicorn.utils import dicts_equal
from example.coffee.models import Flavor
class SimpleTestModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
app_label = "tests"
class ComplicatedTestModel(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey("self", blank=True, null=True, on_delete=models.SET_NULL)
class Meta:
app_label = "tests"
def test_int():
expected = '{"name":123}'
actual = serializer.dumps({"name": 123})
assert expected == actual
def test_decimal():
expected = '{"name":"123.1"}'
actual = serializer.dumps({"name": Decimal("123.1")})
assert expected == actual
def test_string():
expected = '{"name":"abc"}'
actual = serializer.dumps({"name": "abc"})
assert expected == actual
def test_list():
expected = '{"name":["abc","def"]}'
actual = serializer.dumps({"name": ["abc", "def",]})
assert expected == actual
def test_simple_model():
simple_test_model = SimpleTestModel(id=1, name="abc")
expected = '{"simple_test_model":{"name":"abc","pk":1}}'
actual = serializer.dumps({"simple_test_model": simple_test_model})
assert expected == actual
def test_model_with_datetime(db):
datetime = now()
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3],
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def | (db):
datetime = now().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", datetime=datetime)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": datetime,
"time": None,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_time_as_string(db):
time = now().strftime("%H:%M:%S.%f")[:-3]
flavor = Flavor(name="name1", time=time)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": time,
"duration": None,
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_with_duration_as_string(db):
duration = "-1 day, 19:00:00"
flavor = Flavor(name="name1", duration=duration)
expected = {
"flavor": {
"name": "name1",
"label": "",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": "-1 19:00:00",
"pk": None,
}
}
actual = serializer.dumps({"flavor": flavor})
assert dicts_equal(expected, json.loads(actual))
def test_model_foreign_key():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
def test_model_foreign_key_recursive_parent():
test_model_one = ComplicatedTestModel(id=1, name="abc")
test_model_two = ComplicatedTestModel(id=2, name="def", parent=test_model_one)
test_model_one.parent = test_model_two
expected = '{"test_model_two":{"name":"def","parent":1,"pk":2}}'
actual = serializer.dumps({"test_model_two": test_model_two})
assert expected == actual
@pytest.mark.django_db
def test_dumps_queryset(db):
flavor_one = Flavor(name="name1", label="label1")
flavor_one.save()
flavor_two = Flavor(name="name2", label="label2", parent=flavor_one)
flavor_two.save()
flavors = Flavor.objects.all()
expected_data = {
"flavors": [
{
"name": "name1",
"label": "label1",
"parent": None,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 1,
},
{
"name": "name2",
"label": "label2",
"parent": 1,
"float_value": None,
"decimal_value": None,
"uuid": str(flavor_two.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
"pk": 2,
},
]
}
actual = serializer.dumps({"flavors": flavors})
assert expected_data == json.loads(actual)
def test_get_model_dict():
flavor_one = Flavor(name="name1", label="label1")
actual = serializer._get_model_dict(flavor_one)
expected = {
"pk": None,
"name": "name1",
"label": "label1",
"parent": None,
"decimal_value": None,
"float_value": None,
"uuid": str(flavor_one.uuid),
"date": None,
"datetime": None,
"time": None,
"duration": None,
}
assert expected == actual
def test_float():
expected = '{"name":"0.0"}'
actual = serializer.dumps({"name": 0.0})
assert expected == actual
def test_dict_float():
expected = '{"name":{"another":"0.0"}}'
actual = serializer.dumps({"name": {"another": 0.0}})
assert expected == actual
def test_list_float():
expected = '{"name":[1,2,"0.0"]}'
actual = serializer.dumps({"name": [1, 2, 0.0]})
assert expected == actual
def test_nested_list_float():
expected = '{"name":{"blob":[1,2,"0.0"]}}'
actual = serializer.dumps({"name": {"blob": [1, 2, 0.0]}})
assert expected == actual
def test_nested_list_float_complicated():
expected = '{"name":{"blob":[1,2,"0.0"]},"more":["1.9",2,5],"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps(
{
"name": {"blob": [1, 2, 0.0]},
"more": [1.9, 2, 5],
"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],
}
)
assert expected == actual
def test_nested_list_float_less_complicated():
expected = '{"another":[{"great":"1.0","ok":["1.6","0.0",4]}]}'
actual = serializer.dumps({"another": [{"great": 1.0, "ok": [1.6, 0.0, 4]}],})
assert expected == actual
def test_pydantic():
from pydantic import BaseModel
class Book(BaseModel):
title = "The Grapes of Wrath"
author = "John Steinbeck"
expected = '{"title":"The Grapes of Wrath","author":"John Steinbeck"}'
actual = serializer.dumps(Book())
assert expected == actual
| test_model_with_datetime_as_string |
environment.py | import gym
from gym import spaces
from gym.envs.registration import EnvSpec
import numpy as np
from mpe.multi_discrete import MultiDiscrete
import copy
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class MultiAgentEnv(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, world, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None, shared_viewer=True):
world = copy.deepcopy(world)
self.world = world
self.agents = self.world.policy_agents
# set required vectorized gym env property
self.n = len(world.policy_agents)
# scenario callbacks
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
self.done_callback = done_callback
# environment parameters
self.discrete_action_space = True
# if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
self.discrete_action_input = False
# if true, even the action is continuous, action will be performed discretely
self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False
# if true, every agent has the same reward
self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False
self.time = 0
# configure spaces
self.action_space = []
self.observation_space = []
for agent in self.agents:
total_action_space = []
# physical action space
if self.discrete_action_space:
u_action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)
if agent.movable:
total_action_space.append(u_action_space)
# communication action space
if self.discrete_action_space:
c_action_space = spaces.Discrete(world.dim_c)
else:
c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)
if not agent.silent:
total_action_space.append(c_action_space)
# total action space
if len(total_action_space) > 1:
# all action spaces are discrete, so simplify to MultiDiscrete action space
if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):
act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])
else:
act_space = spaces.Tuple(total_action_space)
self.action_space.append(act_space)
else:
self.action_space.append(total_action_space[0])
# observation space
obs_dim = len(observation_callback(agent, self.world))
self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
agent.action.c = np.zeros(self.world.dim_c)
self.action_space = spaces.Tuple(tuple(self.action_space))
self.observation_space = spaces.Tuple(tuple(self.observation_space))
self.n_agents = self.n
# rendering
self.shared_viewer = shared_viewer
if self.shared_viewer:
self.viewers = [None]
else:
self.viewers = [None] * self.n
self._reset_render()
def seed(self, seed):
self.world.seed(seed)
def step(self, action_n):
one_hot_actions = []
for act, acsp in zip(action_n, self.action_space):
one_hot = np.zeros(acsp.n)
one_hot[act] = 1.0
one_hot_actions.append(one_hot)
action_n = one_hot_actions
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
self.agents = self.world.policy_agents
# set action for each agent
for i, agent in enumerate(self.agents):
self._set_action(action_n[i], agent, self.action_space[i])
# advance world state
self.world.step()
# record observation for each agent
for agent in self.agents:
obs_n.append(self._get_obs(agent))
reward_n.append(self._get_reward(agent))
done_n.append(self._get_done(agent))
info_n['n'].append(self._get_info(agent))
# all agents get total reward in cooperative case
reward = np.sum(reward_n)
if self.shared_reward:
reward_n = [reward] * self.n
return tuple(obs_n), reward_n, done_n, info_n
def reset(self):
# reset world
self.reset_callback(self.world)
# reset renderer
self._reset_render()
# record observations for each agent
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
return tuple(obs_n)
# get info used for benchmarking
def _get_info(self, agent):
if self.info_callback is None:
return {}
return self.info_callback(agent, self.world)
# get observation for a particular agent
def _get_obs(self, agent):
if self.observation_callback is None:
return np.zeros(0)
return self.observation_callback(agent, self.world).astype(np.float32)
# get dones for a particular agent
# unused right now -- agents are allowed to go beyond the viewing screen
def _get_done(self, agent):
if self.done_callback is None:
return False
return self.done_callback(agent, self.world)
# get reward for a particular agent
def _get_reward(self, agent):
if self.reward_callback is None:
return 0.0
return self.reward_callback(agent, self.world)
# set env action for a particular agent
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = np.zeros(self.world.dim_p)
agent.action.c = np.zeros(self.world.dim_c)
# process action
if isinstance(action_space, MultiDiscrete):
act = []
size = action_space.high - action_space.low + 1
index = 0
for s in size:
act.append(action[index:(index+s)])
index += s
action = act
else:
action = [action]
if agent.movable:
# physical action
if self.discrete_action_input:
agent.action.u = np.zeros(self.world.dim_p)
# process discrete action
if action[0] == 1: agent.action.u[0] = -1.0
if action[0] == 2: agent.action.u[0] = +1.0
if action[0] == 3: agent.action.u[1] = -1.0
if action[0] == 4: agent.action.u[1] = +1.0
else:
if self.force_discrete_action:
d = np.argmax(action[0])
action[0][:] = 0.0
action[0][d] = 1.0
if self.discrete_action_space:
agent.action.u[0] += action[0][1] - action[0][2]
agent.action.u[1] += action[0][3] - action[0][4]
else:
agent.action.u = action[0]
sensitivity = 5.0
if agent.accel is not None:
sensitivity = agent.accel
agent.action.u *= sensitivity
action = action[1:]
if not agent.silent:
# communication action
if self.discrete_action_input:
agent.action.c = np.zeros(self.world.dim_c)
agent.action.c[action[0]] = 1.0
else:
agent.action.c = action[0]
action = action[1:]
# make sure we used all elements of action
assert len(action) == 0
# reset rendering assets
def | (self):
self.render_geoms = None
self.render_geoms_xform = None
# render environment
def render(self, mode='human'):
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent: continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' + agent.name + ': ' + word + ' ')
print(message)
for i in range(len(self.viewers)):
# create viewers (if necessary)
if self.viewers[i] is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from mpe import rendering
self.viewers[i] = rendering.Viewer(700,700)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from mpe import rendering
self.render_geoms = []
self.render_geoms_xform = []
for entity in self.world.entities:
geom = rendering.make_circle(entity.size)
xform = rendering.Transform()
if 'agent' in entity.name:
geom.set_color(*entity.color, alpha=0.5)
else:
geom.set_color(*entity.color)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
# add geoms to viewer
for viewer in self.viewers:
viewer.geoms = []
for geom in self.render_geoms:
viewer.add_geom(geom)
results = []
for i in range(len(self.viewers)):
from mpe import rendering
# update bounds to center around agent
cam_range = 1
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)
# update geometry positions
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
# render to display or array
results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))
if self.shared_viewer:
assert len(results) == 1
return results[0]
return results
# create receptor field locations in local coordinate frame
def _make_receptor_locations(self, agent):
receptor_type = 'polar'
range_min = 0.05 * 2.0
range_max = 1.00
dx = []
# circular receptive field
if receptor_type == 'polar':
for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
for distance in np.linspace(range_min, range_max, 3):
dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))
# add origin
dx.append(np.array([0.0, 0.0]))
# grid receptive field
if receptor_type == 'grid':
for x in np.linspace(-range_max, +range_max, 5):
for y in np.linspace(-range_max, +range_max, 5):
dx.append(np.array([x,y]))
return dx
def close(self):
for viewer in self.viewers:
if viewer:
viewer.close()
# vectorized wrapper for a batch of multi-agent environments
# assumes all environments have the same observation and action space
class BatchMultiAgentEnv(gym.Env):
metadata = {
'runtime.vectorized': True,
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, env_batch):
self.env_batch = env_batch
@property
def n(self):
return np.sum([env.n for env in self.env_batch])
@property
def action_space(self):
return self.env_batch[0].action_space
@property
def observation_space(self):
return self.env_batch[0].observation_space
def step(self, action_n, time):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
i = 0
for env in self.env_batch:
obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)
i += env.n
obs_n += obs
# reward = [r / len(self.env_batch) for r in reward]
reward_n += reward
done_n += done
return obs_n, reward_n, done_n, info_n
def reset(self):
obs_n = []
for env in self.env_batch:
obs_n += env.reset()
return obs_n
# render environment
def render(self, mode='human', close=True):
results_n = []
for env in self.env_batch:
results_n += env.render(mode, close)
return results_n
| _reset_render |
id_sg.go | package cliaws
import (
"context"
"fmt"
"github.com/alexflint/go-arg"
"github.com/nathants/cli-aws/lib"
)
func init() {
lib.Commands["ec2-id-sg"] = ec2IdSg
lib.Args["ec2-id-sg"] = ec2IdSgArgs{}
}
type ec2IdSgArgs struct {
VpcName string `arg:"positional,required"`
SgName string `arg:"positional,required"`
}
func (ec2IdSgArgs) Description() string {
return "\nsg id\n"
}
func | () {
var args ec2IdSgArgs
arg.MustParse(&args)
ctx := context.Background()
id, err := lib.EC2SgID(ctx, args.VpcName, args.SgName)
if err != nil {
lib.Logger.Fatal("error: ", err)
}
fmt.Println(id)
}
| ec2IdSg |
simple.py | # -*- coding: utf-8 -*-
import datetime
import numpy as np
from ..base import Property
from ..models.measurement import MeasurementModel
from ..models.transition import TransitionModel
from ..reader import GroundTruthReader
from ..types.detection import TrueDetection, Clutter
from ..types.groundtruth import GroundTruthPath, GroundTruthState
from ..types.numeric import Probability
from ..types.state import GaussianState, State
from .base import DetectionSimulator, GroundTruthSimulator
from stonesoup.buffered_generator import BufferedGenerator
class SingleTargetGroundTruthSimulator(GroundTruthSimulator):
"""Target simulator that produces a single target"""
transition_model = Property(
TransitionModel, doc="Transition Model used as propagator for track.")
initial_state = Property(
State,
doc="Initial state to use to generate ground truth")
timestep = Property(
datetime.timedelta,
default=datetime.timedelta(seconds=1),
doc="Time step between each state. Default one second.")
number_steps = Property(
int, default=100, doc="Number of time steps to run for")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.index = 0
@BufferedGenerator.generator_method
def groundtruth_paths_gen(self):
time = self.initial_state.timestamp or datetime.datetime.now()
gttrack = GroundTruthPath([
GroundTruthState(self.initial_state.state_vector, timestamp=time,
metadata={"index": self.index})])
yield time, {gttrack}
for _ in range(self.number_steps - 1):
time += self.timestep
# Move track forward
trans_state_vector = self.transition_model.function(
gttrack[-1].state_vector,
time_interval=self.timestep)
gttrack.append(GroundTruthState(
trans_state_vector, timestamp=time,
metadata={"index": self.index}))
yield time, {gttrack}
class SwitchOneTargetGroundTruthSimulator(SingleTargetGroundTruthSimulator):
"""Target simulator that produces a single target. This target switches
between multiple transition models based on a markov matrix
(:attr:`model_probs`)"""
transition_models = Property(
[TransitionModel], doc="List of transition models to be used, ensure\
that they all have the same dimensions.")
model_probs = Property([float], doc="A matrix of probabilities.\
The element in the ith row and the jth column is the probability of\
switching from the ith transition model in :attr:`transition_models`\
to the jth")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.index = 0
@property
def transition_model(self):
self.index = np.random.choice(range(0, len(self.transition_models)),
p=self.model_probs[self.index])
return self.transition_models[self.index]
class MultiTargetGroundTruthSimulator(SingleTargetGroundTruthSimulator):
"""Target simulator that produces multiple targets.
Targets are created and destroyed randomly, as defined by the birth rate
and death probability."""
transition_model = Property(
TransitionModel, doc="Transition Model used as propagator for track.")
initial_state = Property(
GaussianState,
doc="Initial state to use to generate states")
birth_rate = Property(
float, default=1.0, doc="Rate at which tracks are born. Expected "
"number of occurrences (λ) in Poisson distribution. Default 1.0.")
death_probability = Property(
Probability, default=0.1,
doc="Probability of track dying in each time step. Default 0.1.")
@BufferedGenerator.generator_method
def g | self):
groundtruth_paths = set()
time = self.initial_state.timestamp or datetime.datetime.now()
for _ in range(self.number_steps):
# Random drop tracks
groundtruth_paths.difference_update(
gttrack
for gttrack in groundtruth_paths.copy()
if np.random.rand() <= self.death_probability)
# Move tracks forward
for gttrack in groundtruth_paths:
self.index = gttrack[-1].metadata.get("index")
trans_state_vector = self.transition_model.function(
gttrack[-1].state_vector,
time_interval=self.timestep)
gttrack.append(GroundTruthState(
trans_state_vector, timestamp=time,
metadata={"index": self.index}))
# Random create
for _ in range(np.random.poisson(self.birth_rate)):
self.index = 0
gttrack = GroundTruthPath()
gttrack.append(GroundTruthState(
self.initial_state.state_vector +
np.sqrt(self.initial_state.covar) @
np.random.randn(self.initial_state.ndim, 1),
timestamp=time, metadata={"index": self.index}))
groundtruth_paths.add(gttrack)
yield time, groundtruth_paths
time += self.timestep
class SwitchMultiTargetGroundTruthSimulator(MultiTargetGroundTruthSimulator):
"""Functions identically to :class:`~.MultiTargetGroundTruthSimulator`,
but has the transition model switching ability from
:class:`.SwitchOneTargetGroundTruthSimulator`"""
transition_models = Property(
[TransitionModel], doc="List of transition models to be used, ensure\
that they all have the same dimensions.")
model_probs = Property([float], doc="A matrix of probabilities.\
The element in the ith row and the jth column is the probability of\
switching from the ith transition model in :attr:`transition_models`\
to the jth")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.index = 0
@property
def transition_model(self):
self.index = np.random.choice(range(0, len(self.transition_models)),
p=self.model_probs[self.index])
return self.transition_models[self.index]
class SimpleDetectionSimulator(DetectionSimulator):
"""A simple detection simulator.
Parameters
----------
groundtruth : GroundTruthReader
Source of ground truth tracks used to generate detections for.
measurement_model : MeasurementModel
Measurement model used in generating detections.
"""
groundtruth = Property(GroundTruthReader)
measurement_model = Property(MeasurementModel)
meas_range = Property(np.ndarray)
detection_probability = Property(Probability, default=0.9)
clutter_rate = Property(float, default=2.0)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.real_detections = set()
self.clutter_detections = set()
self.index = 0
@property
def clutter_spatial_density(self):
"""returns the clutter spatial density of the measurement space - num
clutter detections per unit volume per timestep"""
return self.clutter_rate/np.prod(np.diff(self.meas_range))
@BufferedGenerator.generator_method
def detections_gen(self):
for time, tracks in self.groundtruth:
self.real_detections.clear()
self.clutter_detections.clear()
for track in tracks:
self.index = track[-1].metadata.get("index")
if np.random.rand() < self.detection_probability:
detection = TrueDetection(
self.measurement_model.function(
track[-1].state_vector),
timestamp=track[-1].timestamp,
groundtruth_path=track)
detection.clutter = False
self.real_detections.add(detection)
# generate clutter
for _ in range(np.random.poisson(self.clutter_rate)):
detection = Clutter(
np.random.rand(self.measurement_model.ndim_meas, 1) *
np.diff(self.meas_range) + self.meas_range[:, :1],
timestamp=time)
self.clutter_detections.add(detection)
yield time, self.real_detections | self.clutter_detections
class SwitchDetectionSimulator(SimpleDetectionSimulator):
"""Functions identically as the :class:`SimpleDetectionSimulator`, but for
ground truth paths formed using multiple transition models it allows the
user to assign a detection probability to each transition models.
For example, if you wanted a higher detection probability when the
simulated object makes a turn"""
detection_probabilities = Property([Probability], doc="List of\
probabilities that correspond to the detection probability of the\
simulated object while undergoing each transition model")
@property
def detection_probability(self):
return self.detection_probabilities[self.index]
| roundtruth_paths_gen( |
lvm.py | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import os
import re
from os_brick import executor
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
from six import moves
from cinder import exception
from cinder import utils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
_supports_pvs_ignoreskippedcluster = None
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None,
suppress_fd_warn=False):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
:param suppress_fd_warn: Add suppress FD Warn to LVM env
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
# Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX
# before the first LVM command is executed, and use the directory
# where the specified lvm_conf file is located as the value.
# NOTE(jdg): We use the temp var here because LVM_CMD_PREFIX is a
# class global and if you use append here, you'll literally just keep
# appending values to the global.
_lvm_cmd_prefix = ['env', 'LC_ALL=C']
if lvm_conf and os.path.isfile(lvm_conf):
lvm_sys_dir = os.path.dirname(lvm_conf)
_lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir)
if suppress_fd_warn:
_lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1')
LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume Group')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error('Unable to locate Volume Group %s', vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
try:
self.create_thin_pool(pool_name)
except putils.ProcessExecutionError:
# Maybe we just lost the race against another copy of
# this driver being in init in parallel - e.g.
# cinder-volume and cinder-backup starting in parallel
if self.get_volume(pool_name) is None:
raise
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
|
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception('Error querying thin pool about data_percent')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def supports_pvs_ignoreskippedcluster(root_helper):
"""Property indicating whether pvs supports --ignoreskippedcluster
Check for LVM version >= 2.02.103.
(LVM2 git: baf95bbff cmdline: Add --ignoreskippedcluster.
"""
if LVM._supports_pvs_ignoreskippedcluster is not None:
return LVM._supports_pvs_ignoreskippedcluster
LVM._supports_pvs_ignoreskippedcluster = (
LVM.get_lvm_version(root_helper) >= (2, 2, 103))
return LVM._supports_pvs_ignoreskippedcluster
@staticmethod
def get_lv_info(root_helper, vg_name=None, lv_name=None):
"""Retrieve info about LVs (all, in a VG, or a single LV).
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:param lv_name: optional, gathers info for only the specified LV
:returns: List of Dictionaries with LV info
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr or "Failed to find" in err.stderr:
ctx.reraise = False
LOG.info("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s",
{'vg': vg_name, 'lv': lv_name})
out = None
lv_list = []
if out is not None:
volumes = out.split()
iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101
for vg, name, size in iterator:
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
if LVM.supports_pvs_ignoreskippedcluster(root_helper):
cmd.append('--ignoreskippedcluster')
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error('Unable to find VG: %s', self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.')
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str,
vg_pool_name]
LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of "
"total %(free)sg", {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n',
name, pool_path]
else:
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name,
'-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
LOG.error('Current state: %s',
self.get_all_volume_groups(self._root_helper))
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error("Trying to create snapshot by non-existent LV: %s",
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
'%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating snapshot')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def _lv_is_active(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[4] == 'a'):
return True
return False
def deactivate_lv(self, name):
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
cmd = ['lvchange', '-a', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error deactivating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
# Wait until lv is deactivated to return in
# order to prevent a race condition.
self._wait_for_volume_deactivation(name)
@utils.retry(exceptions=exception.VolumeNotDeactivated, retries=5,
backoff_rate=2)
def _wait_for_volume_deactivation(self, name):
LOG.debug("Checking to see if volume %s has been deactivated.",
name)
if self._lv_is_active(name):
LOG.debug("Volume %s is still active.", name)
raise exception.VolumeNotDeactivated(name=name)
else:
LOG.debug("Volume %s has been deactivated.", name)
def activate_lv(self, name, is_snapshot=False, permanent=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:param is_snapshot: whether LV is a snapshot
:param permanent: whether we should drop skipactivation flag
:raises putils.ProcessExecutionError:
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
# If permanent=True is specified, drop the skipactivation flag in
# order to make this LV automatically activated after next reboot.
if permanent:
cmd += ['-k', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error activating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.debug('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s',
{'command': err.cmd, 'response': err.stderr})
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def lv_is_snapshot(self, name):
"""Return True if LV is a snapshot, False otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
if (out[0] == 's'):
return True
return False
def lv_is_open(self, name):
"""Return True if LV is currently open, False otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
if (out[5] == 'o'):
return True
return False
def lv_get_origin(self, name):
"""Return the origin of an LV that is a snapshot, None otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Origin', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
return out
return None
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
# Volumes with snaps have attributes 'o' or 'O' and will be
# deactivated, but Thin Volumes with snaps have attribute 'V'
# and won't be deactivated because the lv_has_snapshot method looks
# for 'o' or 'O'
has_snapshot = self.lv_has_snapshot(lv_name)
if has_snapshot:
self.deactivate_lv(lv_name)
try:
cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name)]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error extending Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
if has_snapshot:
self.activate_lv(lv_name)
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error renaming logical volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
| cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) |
base_task_runner.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base task runner"""
import os
import subprocess
import threading
from pwd import getpwnam
from tempfile import NamedTemporaryFile
from typing import Optional, Union
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
from airflow.models.taskinstance import load_error_file
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.platform import getuser
PYTHONPATH_VAR = 'PYTHONPATH'
class BaseTaskRunner(LoggingMixin):
"""
Runs Airflow task instances by invoking the `airflow tasks run` command with raw
mode enabled in a subprocess.
:param local_task_job: The local task job associated with running the
associated task instance.
:type local_task_job: airflow.jobs.local_task_job.LocalTaskJob
"""
def __init__(self, local_task_job):
# Pass task instance context into log handlers to setup the logger.
super().__init__(local_task_job.task_instance)
self._task_instance = local_task_job.task_instance
popen_prepend = []
if self._task_instance.run_as_user:
self.run_as_user = self._task_instance.run_as_user
else:
try:
self.run_as_user = conf.get('core', 'default_impersonation')
except AirflowConfigException:
self.run_as_user = None
# Add sudo commands to change user if we need to. Needed to handle SubDagOperator
# case using a SequentialExecutor.
self.log.debug("Planning to run as the %s user", self.run_as_user)
if self.run_as_user and (self.run_as_user != getuser()):
# We want to include any environment variables now, as we won't
# want to have to specify them in the sudo call - they would show
# up in `ps` that way! And run commands now, as the other user
# might not be able to run the cmds to get credentials
cfg_path = tmp_configuration_copy(chmod=0o600)
# Give ownership of file to user; only they can read and write
subprocess.call(['sudo', 'chown', self.run_as_user, cfg_path], close_fds=True)
# propagate PYTHONPATH environment variable
pythonpath_value = os.environ.get(PYTHONPATH_VAR, '')
popen_prepend = ['sudo', '-E', '-H', '-u', self.run_as_user]
if pythonpath_value:
popen_prepend.append(f'{PYTHONPATH_VAR}={pythonpath_value}')
else:
# Always provide a copy of the configuration file settings. Since
# we are running as the same user, and can pass through environment
# variables then we don't need to include those in the config copy
# - the runner can read/execute those values as it needs
cfg_path = tmp_configuration_copy(chmod=0o600)
self._error_file = NamedTemporaryFile(delete=True)
if self.run_as_user:
try:
os.chown(self._error_file.name, getpwnam(self.run_as_user).pw_uid, -1)
except KeyError:
# No user `run_as_user` found
pass
self._cfg_path = cfg_path
self._command = (
popen_prepend
+ self._task_instance.command_as_list(
raw=True,
pickle_id=local_task_job.pickle_id,
mark_success=local_task_job.mark_success,
job_id=local_task_job.id,
pool=local_task_job.pool,
cfg_path=cfg_path,
)
+ ["--error-file", self._error_file.name]
)
self.process = None
def deserialize_run_error(self) -> Optional[Union[str, Exception]]:
"""Return task runtime error if its written to provided error file."""
return load_error_file(self._error_file)
def | (self, stream):
while True:
line = stream.readline()
if isinstance(line, bytes):
line = line.decode('utf-8')
if not line:
break
self.log.info(
'Job %s: Subtask %s %s',
self._task_instance.job_id,
self._task_instance.task_id,
line.rstrip('\n'),
)
def run_command(self, run_with=None):
"""
Run the task command.
:param run_with: list of tokens to run the task command with e.g. ``['bash', '-c']``
:type run_with: list
:return: the process that was run
:rtype: subprocess.Popen
"""
run_with = run_with or []
full_cmd = run_with + self._command
self.log.info("Running on host: %s", get_hostname())
self.log.info('Running: %s', full_cmd)
proc = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
close_fds=True,
env=os.environ.copy(),
preexec_fn=os.setsid,
)
# Start daemon thread to read subprocess logging output
log_reader = threading.Thread(
target=self._read_task_logs,
args=(proc.stdout,),
)
log_reader.daemon = True
log_reader.start()
return proc
def start(self):
"""Start running the task instance in a subprocess."""
raise NotImplementedError()
def return_code(self) -> Optional[int]:
"""
:return: The return code associated with running the task instance or
None if the task is not yet done.
:rtype: int
"""
raise NotImplementedError()
def terminate(self) -> None:
"""Force kill the running task instance."""
raise NotImplementedError()
def on_finish(self) -> None:
"""A callback that should be called when this is done running."""
if self._cfg_path and os.path.isfile(self._cfg_path):
if self.run_as_user:
subprocess.call(['sudo', 'rm', self._cfg_path], close_fds=True)
else:
os.remove(self._cfg_path)
try:
self._error_file.close()
except FileNotFoundError:
# The subprocess has deleted this file before we do
# so we ignore
pass
| _read_task_logs |
data.py | class Result(object):
def __init__(self, result):
self._result = result
self._teams = []
self._scores = []
def parse(self):
"""
Parse a results file entry
Result format is Team_Name Score, Team_Name Score
Parameters:
self.result (str): line of text in result entry format
Returns:
None
"""
for team_pair in self._result.split(','):
name, score = self.team_data(team_pair)
self._teams.append(name)
self._scores.append(score)
def team_data(self, team_score):
"""
Extract team name and score
Parameters:
team_score (str): text containing a team score pair
(e.g. Team_Name Score) | """
*name, score = team_score.split()
return ' '.join(name), int(score)
def draw(self):
"""
Determine if match was a draw
Returns:
bool
"""
return self._scores.count(self._scores[0]) == 2
def winning_team(self):
"""
Find winning team name
Returns:
str: winning team name
"""
return self._teams[self._scores.index(max(self._scores))]
def teams(self):
"""
Return extracted team names
Returns:
list[str]: team names
"""
return self._teams
class ResultsParser(object):
def __init__(self, infile):
self._infile = infile
def __iter__(self):
return self
# Python 3 compatibility
def __next__(self):
return self.next()
def next(self):
text = self._infile.readline().strip()
if not len(text):
raise StopIteration()
result = Result(text)
result.parse()
return result |
Returns:
tuple: team_name, score |
file.go | package filewatcher
import (
"context"
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/pkg/errors"
)
type State int
const (
Unknown State = iota
Error
Skipped
Unchanged
Changed
)
var stateNames = map[State]string{
Unknown: "unknown",
Error: "error",
Skipped: "skipped",
Unchanged: "unchanged",
Changed: "changed",
}
type Machine interface {
State() string
Name() string
Identity() string
InstanceID() string
Version() string
TimeStampSeconds() int64
Directory() string
Transition(t string, args ...interface{}) error
NotifyWatcherState(string, interface{})
Debugf(name string, format string, args ...interface{})
Infof(name string, format string, args ...interface{})
Errorf(name string, format string, args ...interface{})
}
type Watcher struct {
name string
states []string
failEvent string
successEvent string
path string
machine Machine
mtime time.Time
initial bool
interval time.Duration
announceInterval time.Duration
statechg chan struct{}
previous State
sync.Mutex
}
func | (machine Machine, name string, states []string, failEvent string, successEvent string, interval string, ai time.Duration, properties map[string]interface{}) (watcher *Watcher, err error) {
w := &Watcher{
name: name,
successEvent: successEvent,
failEvent: failEvent,
states: states,
machine: machine,
interval: 5 * time.Second,
announceInterval: ai,
statechg: make(chan struct{}, 1),
}
err = w.setProperties(properties)
if err != nil {
return nil, errors.Wrap(err, "could not set properties")
}
if interval != "" {
w.interval, err = time.ParseDuration(interval)
if err != nil {
return nil, errors.Wrap(err, "invalid interval")
}
}
if w.interval < 500*time.Millisecond {
return nil, errors.Errorf("interval %v is too small", w.interval)
}
return w, err
}
func (w *Watcher) Type() string {
return "file"
}
func (w *Watcher) AnnounceInterval() time.Duration {
return w.announceInterval
}
func (w *Watcher) Name() string {
return w.name
}
func (w *Watcher) NotifyStateChance() {
if len(w.statechg) < cap(w.statechg) {
w.statechg <- struct{}{}
}
}
func (w *Watcher) Run(ctx context.Context, wg *sync.WaitGroup) {
defer wg.Done()
w.machine.Infof(w.name, "file watcher for %s starting", w.path)
tick := time.NewTicker(w.interval)
if w.initial {
stat, err := os.Stat(w.path)
if err == nil {
w.mtime = stat.ModTime()
}
}
for {
select {
case <-tick.C:
w.performWatch(ctx)
case <-w.statechg:
w.performWatch(ctx)
case <-ctx.Done():
tick.Stop()
w.machine.Infof(w.name, "Stopping on context interrupt")
return
}
}
}
func (w *Watcher) performWatch(ctx context.Context) {
state, err := w.watch()
err = w.handleCheck(state, err)
if err != nil {
w.machine.Errorf(w.name, "could not handle watcher event: %s", err)
}
}
func (w *Watcher) CurrentState() interface{} {
w.Lock()
defer w.Unlock()
s := &StateNotification{
Protocol: "io.choria.machine.watcher.file.v1.state",
Type: "file",
Name: w.name,
Identity: w.machine.Identity(),
ID: w.machine.InstanceID(),
Version: w.machine.Version(),
Timestamp: w.machine.TimeStampSeconds(),
Machine: w.machine.Name(),
Path: w.path,
PreviousOutcome: stateNames[w.previous],
}
return s
}
func (w *Watcher) setPreviousState(s State) {
w.Lock()
defer w.Unlock()
w.previous = s
}
func (w *Watcher) handleCheck(s State, err error) error {
w.machine.Debugf(w.name, "handling check for %s %v %v", w.path, s, err)
w.setPreviousState(s)
switch s {
case Error:
w.machine.NotifyWatcherState(w.name, w.CurrentState())
return w.machine.Transition(w.failEvent)
case Changed:
w.machine.NotifyWatcherState(w.name, w.CurrentState())
return w.machine.Transition(w.successEvent)
case Unchanged:
// not notifying, regular announces happen
case Skipped, Unknown:
// clear the time so that next time after once being skipped or unknown
// it will treat the file as not seen before and detect changes, but if
// its set to do initial check it specifically will not do that because
// the behavior of the first run in that case would be to only wait for
// future changes, this retains that behavior on becoming valid again
if !w.initial {
w.mtime = time.Time{}
}
}
return nil
}
func (w *Watcher) shouldCheck() bool {
if len(w.states) == 0 {
return true
}
for _, e := range w.states {
if e == w.machine.State() {
return true
}
}
return false
}
func (w *Watcher) watch() (state State, err error) {
if !w.shouldCheck() {
return Skipped, nil
}
stat, err := os.Stat(w.path)
if err != nil {
w.mtime = time.Time{}
return Error, fmt.Errorf("does not exist")
}
if stat.ModTime().After(w.mtime) {
w.mtime = stat.ModTime()
return Changed, nil
}
return Unchanged, err
}
func (w *Watcher) setProperties(p map[string]interface{}) error {
path, ok := p["path"]
if !ok {
return fmt.Errorf("path is required")
}
w.path, ok = path.(string)
if !ok {
return fmt.Errorf("path should be a string")
}
if !filepath.IsAbs(w.path) {
w.path = filepath.Join(w.machine.Directory(), w.path)
}
initial, ok := p["gather_initial_state"]
if ok {
w.initial, ok = initial.(bool)
if !ok {
return fmt.Errorf("gather_initial_state should be bool")
}
}
return nil
}
| New |
future.rs | /*!
* A type representing values that may be computed concurrently and
* operations for working with them.
*
* # Example
*
* ~~~
* let delayed_fib = future::spawn {|| fib(5000) };
* make_a_sandwich();
* io::println(#fmt("fib(5000) = %?", delayed_fib.get()))
* ~~~
*/
import either::either;
import pipes::recv;
export future;
export extensions;
export from_value;
export from_port;
export from_fn;
export get;
export with;
export spawn;
// for task.rs
export future_pipe;
#[doc = "The future type"]
enum future<A> = {
mut v: either<@A, fn@() -> A>
};
/// Methods on the `future` type
impl extensions<A:copy send> for future<A> {
fn get() -> A {
//! Get the value of the future
get(self)
}
fn with<B>(blk: fn(A) -> B) -> B {
//! Work with the value without copying it
with(self, blk)
}
}
fn from_value<A>(+val: A) -> future<A> {
/*!
* Create a future from a value
*
* The value is immediately available and calling `get` later will
* not block.
*/
future({
mut v: either::left(@val)
})
}
fn macros() {
#macro[
[#move[x],
unsafe { let y <- *ptr::addr_of(x); y }]
];
}
fn from_port<A:send>(-port: future_pipe::client::waiting<A>) -> future<A> {
#[doc = "
Create a future from a port
The first time that the value is requested the task will block
waiting for the result to be received on the port.
"];
let port = ~mut some(port);
do from_fn |move port| {
let mut port_ = none;
port_ <-> *port;
let port = option::unwrap(port_);
alt recv(port) {
future_pipe::completed(data) { #move(data) }
}
}
}
fn from_fn<A>(f: fn@() -> A) -> future<A> {
/*!
* Create a future from a function.
*
* The first time that the value is requested it will be retreived by
* calling the function. Note that this function is a local
* function. It is not spawned into another task.
*/
future({
mut v: either::right(f)
})
}
fn spawn<A:send>(+blk: fn~() -> A) -> future<A> {
/*!
* Create a future from a unique closure.
*
* The closure will be run in a new task and its result used as the
* value of the future.
*/
from_port(pipes::spawn_service_recv(future_pipe::init, |ch| {
future_pipe::server::completed(ch, blk());
}))
}
fn get<A:copy>(future: future<A>) -> A {
//! Get the value of the future
do with(future) |v| { v }
}
fn with<A,B>(future: future<A>, blk: fn(A) -> B) -> B {
//! Work with the value without copying it
let v = alt copy future.v {
either::left(v) { v }
either::right(f) {
let v = @f();
future.v = either::left(v);
v
}
};
blk(*v)
}
proto! future_pipe {
waiting:recv<T:send> {
completed(T) -> !
}
}
#[test]
fn test_from_value() {
let f = from_value(~"snail");
assert get(f) == ~"snail";
}
#[test]
fn test_from_port() {
let (po, ch) = future_pipe::init();
future_pipe::server::completed(ch, ~"whale");
let f = from_port(po);
assert get(f) == ~"whale";
}
#[test]
fn test_from_fn() {
let f = fn@() -> ~str { ~"brail" };
let f = from_fn(f);
assert get(f) == ~"brail";
}
#[test]
fn test_iface_get() {
let f = from_value(~"fail");
assert f.get() == ~"fail";
}
#[test]
fn test_with() {
let f = from_value(~"nail");
assert with(f, |v| v) == ~"nail";
}
#[test]
fn test_iface_with() {
let f = from_value(~"kale");
assert f.with(|v| v) == ~"kale";
}
#[test]
fn test_spawn() { | let f = spawn(|| ~"bale");
assert get(f) == ~"bale";
}
#[test]
#[should_fail]
#[ignore(cfg(target_os = "win32"))]
fn test_futurefail() {
let f = spawn(|| fail);
let _x: ~str = get(f);
} | |
Hla.py | import sys
sys.path.insert(0, "../") # our fake sigrokdecode lives one dir upper
from pd import Decoder
class DS1307():
def __init__(self):
self.sigrokDecoder = Decoder()
def get_capabilities(self):
settings = {}
for option in self.sigrokDecoder.options :
settingType = ''
choices = []
if ("values" not in option) :
# TODO sigrok docs does not mention that default is mandatory
if (isinstance(option['default'], str)) :
|
elif (isinstance(option['default'], int) or isinstance(option['default'], float)) :
settingType = 'number'
else :
print("Cannot determine the type of the " + option['desc'] + " parameter from it's default value: " + option['default'])
settings[option["desc"]] = {
'type': settingType
}
if ("values" in option) :
settings[option["desc"]]['choices'] = option["values"]
return {
'settings': settings
}
def set_settings(self, settings):
# TODO handle the settings
# convert sigrok's
# annotations = (
# ('warning', 'Warning'),
# ....
#
# format annotations to Logic's format
self.sigrokDecoder.reset()
resultTypes = {}
for annotation in self.sigrokDecoder.annotations :
resultTypes[annotation[0]] = annotation[1] + "{{data.data}}"
return {
"result_types": resultTypes
}
def decode(self, data):
self.sigrokDecoder.processI2C(data)
if (not self.packet == {}) :
ret = self.generate_logic_result()
self.packet = {}
return ret | settingType = 'string' |
index.stories.js | // @flow
import * as React from 'react'
import * as Sb from '../../stories/storybook'
import * as Kb from '../../common-adapters'
import * as Styles from '../../styles'
import EditProfile from '.'
const props = {
bio: 'Co-founder of Keybase, OkCupid, SparkNotes, and some random other junk. I like making things.',
bioLengthLeft: 200,
fullname: 'Chris Coyne',
location: 'NYC & Maine',
onBack: () => Sb.action('onBack'),
onBioChange: () => Sb.action('onBioChange'),
onCancel: () => Sb.action('onCancel'),
onEditAvatarClick: () => Sb.action('onEditAvatarClick'),
onEditProfile: () => Sb.action('onEditProfile'),
onFullnameChange: () => Sb.action('onFullnameChange'),
onLocationChange: () => Sb.action('onLocationChange'),
onSubmit: () => Sb.action('onSubmit'),
title: 'Edit profile',
}
const Wrapper = ({children}) => (
<Kb.Box style={{display: 'flex', height: 580, minWidth: Styles.isMobile ? undefined : 640}}>
{children} | </Kb.Box>
)
const load = () => {
Sb.storiesOf('Profile/EditProfile', module)
.add('Normal', () => (
<Wrapper>
<EditProfile {...props} />
</Wrapper>
))
.add('Too long', () => (
<Wrapper>
<EditProfile
{...props}
bio={
'Over 256 characters for this bioaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
}
bioLengthLeft={-2}
/>
</Wrapper>
))
}
export default load | |
dump.js | /*
YUI 3.18.1 (build f7e7bcb)
Copyright 2014 Yahoo! Inc. All rights reserved.
Licensed under the BSD License.
http://yuilibrary.com/license/
*/
YUI.add('dump', function (Y, NAME) {
/**
* Returns a simple string representation of the object or array.
* Other types of objects will be returned unprocessed. Arrays
* are expected to be indexed. Use object notation for
* associative arrays.
*
* If included, the dump method is added to the YUI instance.
*
* @module dump
*/
var L = Y.Lang,
OBJ = '{...}',
FUN = 'f(){...}',
COMMA = ', ',
ARROW = ' => ',
/**
* Returns a simple string representation of the object or array.
* Other types of objects will be returned unprocessed. Arrays
* are expected to be indexed.
*
* @method dump
* @param {Object} o The object to dump.
* @param {Number} d How deep to recurse child objects, default 3.
* @return {String} the dump result.
* @for YUI
*/
dump = function(o, d) {
var i, len, s = [], type = L.type(o);
// Cast non-objects to string
// Skip dates because the std toString is what we want
// Skip HTMLElement-like objects because trying to dump
// an element will cause an unhandled exception in FF 2.x
if (!L.isObject(o)) {
return o + '';
} else if (type == 'date') {
return o;
} else if (o.nodeType && o.tagName) {
return o.tagName + '#' + o.id;
} else if (o.document && o.navigator) {
return 'window';
} else if (o.location && o.body) {
return 'document';
} else if (type == 'function') {
return FUN;
}
// dig into child objects the depth specifed. Default 3
d = (L.isNumber(d)) ? d : 3;
// arrays [1, 2, 3]
if (type == 'array') {
s.push('[');
for (i = 0, len = o.length; i < len; i = i + 1) {
if (L.isObject(o[i])) {
s.push((d > 0) ? L.dump(o[i], d - 1) : OBJ);
} else {
s.push(o[i]);
}
s.push(COMMA);
}
if (s.length > 1) {
s.pop();
}
s.push(']');
// regexp /foo/
} else if (type == 'regexp') {
s.push(o.toString()); | // objects {k1 => v1, k2 => v2}
} else {
s.push('{');
for (i in o) {
if (o.hasOwnProperty(i)) {
try {
s.push(i + ARROW);
if (L.isObject(o[i])) {
s.push((d > 0) ? L.dump(o[i], d - 1) : OBJ);
} else {
s.push(o[i]);
}
s.push(COMMA);
} catch (e) {
s.push('Error: ' + e.message);
}
}
}
if (s.length > 1) {
s.pop();
}
s.push('}');
}
return s.join('');
};
Y.dump = dump;
L.dump = dump;
}, '3.18.1', {"requires": ["yui-base"]}); | |
gh.py | """Base Github use case."""
from __future__ import annotations
import traceback
from typing import Any
import git_portfolio.config_manager as cm
import git_portfolio.github_service as gs
import git_portfolio.responses as res
class GhUseCase:
"""Github use case."""
def | (
self,
config_manager: cm.ConfigManager,
github_service: gs.AbstractGithubService,
github_repo: str = "",
) -> None:
"""Initializer."""
self.config_manager = config_manager
self.github_service = github_service
self.github_repo = github_repo
self.responses: list[res.Response] = []
def call_github_service(
self, method: str, *args: Any, **kwargs: Any
) -> res.Response:
"""Handle error from github_service and return response."""
response: res.Response
try:
method_to_call = getattr(self.github_service, method)
output = method_to_call(*args, **kwargs)
response = res.ResponseSuccess(output)
except gs.GithubServiceError as gse:
response = res.ResponseFailure(res.ResponseTypes.RESOURCE_ERROR, str(gse))
except Exception:
error_msg = (
"An unexpected error occured. Please report at "
"https://github.com/staticdev/git-portfolio/issues/new "
f"with the following info:\n{traceback.format_exc()}"
)
response = res.ResponseFailure(res.ResponseTypes.SYSTEM_ERROR, error_msg)
self.responses.append(response)
return response
def action(self, github_repo: str, *args: Any, **kwargs: Any) -> None:
"""Execute some action in a repo."""
raise NotImplementedError # pragma: no cover
def execute(self, *args: Any, **kwargs: Any) -> list[res.Response]:
"""Execute GitHubUseCase."""
if self.github_repo:
self.action(self.github_repo, *args, **kwargs)
else:
for github_repo in self.config_manager.config.github_selected_repos:
self.action(github_repo, *args, **kwargs)
return self.responses
| __init__ |
utils.js | const { THEMENAME } = require("../env.config");
const fs = require("fs-extra");
const readline = require("readline");
const fileHound = require("filehound");
module.exports = {
addMainCss,
getEnv,
getFilesByExtension,
getScreenshot
};
async function addMainCss() {
const ENV = getEnv();
const rl = readline.createInterface({
input: await fs.createReadStream("./style.css")
});
let modifiedData = "";
// This switch a theme name to test and deploy built theme easily.
rl.on("line", line => {
let regExp = /\Theme Name:/;
if (regExp.exec(line) !== null && ENV == "development") {
modifiedData += `Theme Name: ${THEMENAME}-DEV\n`;
} else if (regExp.exec(line) !== null && ENV == "production") {
modifiedData += `Theme Name: ${THEMENAME}\n`;
} else {
modifiedData += `${line}\n`;
}
});
rl.on("close", async () => {
if (ENV == "production") await fs.copy("./style.css", "./style.tmp");
if (ENV == "development")
await fs.copy("./style.css", "./compiled/main.css");
await fs.writeFile("./style.css", modifiedData, "utf8");
});
}
function getEnv() {
const target = process.env.npm_lifecycle_event; | case "start":
return "development";
case "build":
return "production";
default:
return "development";
}
}
function getFilesByExtension(path, ext) {
return fileHound
.create()
.paths(path)
.discard("node_modules")
.discard("build")
.ext(ext)
.depth(99)
.find();
}
function getScreenshot(path) {
return fileHound
.create()
.paths(path)
.depth(0)
.glob("screenshot.png")
.find();
} |
switch (target) { |
pca.py | import hail as hl
from hail.typecheck import typecheck
from hail.expr.expressions import expr_call, expr_numeric, expr_array, \
check_entry_indexed, check_row_indexed
@typecheck(call_expr=expr_call,
loadings_expr=expr_array(expr_numeric),
af_expr=expr_numeric)
def | (call_expr, loadings_expr, af_expr):
"""Projects genotypes onto pre-computed PCs. Requires loadings and
allele-frequency from a reference dataset (see example). Note that
`loadings_expr` must have no missing data and reflect the rows
from the original PCA run for this method to be accurate.
Example
-------
>>> # Compute loadings and allele frequency for reference dataset
>>> _, _, loadings_ht = hl.hwe_normalized_pca(mt.GT, k=10, compute_loadings=True) # doctest: +SKIP
>>> mt = mt.annotate_rows(af=hl.agg.mean(mt.GT.n_alt_alleles()) / 2) # doctest: +SKIP
>>> loadings_ht = loadings_ht.annotate(af=mt.rows()[loadings_ht.key].af) # doctest: +SKIP
>>> # Project new genotypes onto loadings
>>> ht = pc_project(mt_to_project.GT, loadings_ht.loadings, loadings_ht.af) # doctest: +SKIP
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression for genotypes
to project onto loadings.
loadings_expr : :class:`.ArrayNumericExpression`
Location of expression for loadings
af_expr : :class:`.Float64Expression`
Location of expression for allele frequency
Returns
-------
:class:`.Table`
Table with scores calculated from loadings in column `scores`
"""
check_entry_indexed('pc_project', call_expr)
check_row_indexed('pc_project', loadings_expr)
check_row_indexed('pc_project', af_expr)
gt_source = call_expr._indices.source
loadings_source = loadings_expr._indices.source
af_source = af_expr._indices.source
loadings_expr = _get_expr_or_join(loadings_expr, loadings_source, gt_source, '_loadings')
af_expr = _get_expr_or_join(af_expr, af_source, gt_source, '_af')
mt = gt_source._annotate_all(row_exprs={'_loadings': loadings_expr, '_af': af_expr},
entry_exprs={'_call': call_expr})
if isinstance(loadings_source, hl.MatrixTable):
n_variants = loadings_source.count_rows()
else:
n_variants = loadings_source.count()
mt = mt.filter_rows(hl.is_defined(mt._loadings) & hl.is_defined(mt._af) & (mt._af > 0) & (mt._af < 1))
gt_norm = (mt._call.n_alt_alleles() - 2 * mt._af) / hl.sqrt(n_variants * 2 * mt._af * (1 - mt._af))
return mt.select_cols(scores=hl.agg.array_sum(mt._loadings * gt_norm)).cols()
def _get_expr_or_join(expr, source, other_source, loc):
if source != other_source:
if isinstance(source, hl.MatrixTable):
source = source.annotate_rows(**{loc: expr})
else:
source = source.annotate(**{loc: expr})
expr = source[other_source.row_key][loc]
return expr
| pc_project |
pcap_gen.py | from scapy.all import *
def basic_flows():
flow_numbers = [
#1,
#100,
#5000,
10000,
50000,
75000,
85000,
95000,
#100000
]
for f_n in flow_numbers:
pkts = []
rules = []
for i in range(f_n):
a, b, c = ((i >> 16) & 0xff, (i >> 8) & 0xff, i & 0xff) | r = f"in_port=1,ip,nw_dst={dst},nw_src={src},tcp,tp_src=1,tp_dst=1,actions=output:2"
rules.append(r)
wrpcap(f'test_data/flows_{f_n}.pcap', pkts)
with open(f"test_data/rules_{f_n}.txt", "w") as f:
for r in rules:
f.write(r + "\n")
print(f"done {f_n}")
def lpm_flows():
for i in range(1, 32 + 32 + 16 + 16 + 1):
pass
basic_flows() | src = f"2.{a}.{b}.{c}"
dst = f"1.{a}.{b}.{c}"
pkt = Ether(dst="FF:FF:FF:FF:FF:FF") / IP(dst=dst, src=src) / TCP(sport=1, dport=1) / "0000"
pkts.append(pkt) |
ordersqueue.go | package hftorderbook
// Doubly linked orders queue
// TODO: this should be compared with ring buffer queue performance
type ordersQueue struct {
head *Order
tail *Order
size int
}
func NewOrdersQueue() ordersQueue |
func (this *ordersQueue) Size() int {
return this.size
}
func (this *ordersQueue) IsEmpty() bool {
return this.size == 0
}
func (this *ordersQueue) Enqueue(o *Order) {
tail := this.tail
this.tail = o
if tail != nil {
tail.Next = o
}
if this.head == nil {
this.head = o
}
this.size++
}
func (this *ordersQueue) Dequeue() *Order {
if this.size == 0 {
return nil
}
head := this.head
if this.tail == this.head {
this.tail = nil
}
this.head = this.head.Next
this.size--
return head
}
func (this *ordersQueue) Delete(o *Order) {
prev := o.Prev
next := o.Next
if prev != nil {
prev.Next = next
}
if next != nil {
next.Prev = prev
}
o.Next = nil
o.Prev = nil
this.size--
if this.head == o {
this.head = next
}
if this.tail == o {
this.tail = prev
}
}
| {
return ordersQueue{}
} |
mod.rs | pub mod moss_seeded_tiles;
pub use moss_seeded_tiles::MOSS_SEEDED_TILES;
pub mod moss_tiles;
pub use moss_tiles::MOSS_TILES;
pub mod occupied_tiles;
pub use occupied_tiles::OCCUPIED_TILES;
pub mod revealed_tiles;
pub use revealed_tiles::REVEALED_TILES;
pub mod tile_backgrounds;
pub use tile_backgrounds::TILE_BACKGROUNDS;
pub mod tile_entities;
pub use tile_entities::TILE_ENTITIES;
pub mod tile_lighting;
pub use tile_lighting::TILE_LIGHTING;
pub mod tile_occupants;
pub use tile_occupants::TILE_OCCUPANTS;
pub fn set_spatial_index_dimensions(width: usize, length: usize) | {
moss_seeded_tiles::set_dimensions(width, length);
moss_tiles::set_dimensions(width, length);
occupied_tiles::set_dimensions(width, length);
revealed_tiles::set_dimensions(width, length);
tile_backgrounds::set_dimensions(width, length);
tile_entities::set_dimensions(width, length);
tile_lighting::set_dimensions(width, length);
tile_occupants::set_dimensions(width, length);
} |
|
elements.py | # Protean
from protean.core.field.basic import String
from protean.utils.container import BaseContainer
class CustomBaseContainer(BaseContainer):
def __new__(cls, *args, **kwargs): |
class CustomContainer(CustomBaseContainer):
foo = String(max_length=50, required=True)
bar = String(max_length=50, required=True) | if cls is CustomBaseContainer:
raise TypeError("CustomBaseContainer cannot be instantiated")
return super().__new__(cls) |
lock-simple-thin.js | import { h } from 'vue'
export default {
name: "LockSimpleThin",
vendor: "Ph", | return h(
"svg",
{"xmlns":"http://www.w3.org/2000/svg","viewBox":"0 0 256 256","class":"v-icon","fill":"currentColor","data-name":"ph-lock-simple-thin","innerHTML":" <rect width='256' height='256' fill='none'/> <rect x='39.99414' y='88' width='176' height='128' rx='8' stroke-width='8' stroke='#000' stroke-linecap='round' stroke-linejoin='round' fill='none'/> <path d='M91.99414,88V52a36,36,0,1,1,72,0V88' fill='none' stroke='#000' stroke-linecap='round' stroke-linejoin='round' stroke-width='8'/>"},
)
}
} | type: "",
tags: ["lock","simple","thin"],
render() { |
ripplenet_data_loader.py | # -*- coding: utf-8 -*-
# DISCLAIMER
# This code file is forked and adapted from https://github.com/tezignlab/RippleNet-TF2/blob/master/tools/load_data.py, which is under an MIT license.
""" Utilities for data loading for RippleNet. """
# import libraries
import os
import numpy as np
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
# import custom code
from src.util.logger import setup_logging
from src.util.caching import create_cache, load_cache
from src.config import FILENAME_RATINGS_FINAL_TXT, FILENAME_RATINGS_FINAL_NPY, FILENAME_KG_FINAL_TXT, FILENAME_KG_FINAL_NPY, FILENAME_TRAIN_RATINGS, FILENAME_USER_HISTORY_DICT
from src.config import FILENAME_TEST_RATINGS, FILENAME_TEST_RATINGS_RANDOM, FILENAME_TEST_RATINGS_NO_TFIDF, FILENAME_TEST_RATINGS_NO_WORD2VEC, FILENAME_TEST_RATINGS_NO_TRANSFORMER
class LoadData:
def __init__(self, args):
self.args = args
self.logger = setup_logging(name=__file__, log_level='info')
def | (self) -> Tuple[np.ndarray, np.ndarray, int, int, Dict[int, List[Tuple[int, int, int]]]]:
"""
Loads and returns the data needed in RippleNet.
Returns:
- :obj:`np.ndarray`:
Training set of ratings.
- :obj:`np.ndarray`:
Test set of ratings.
- :obj:`int`:
Number of entities.
- :obj:`int`:
Number of relations.
- :obj:`Dict[int, List[Tuple[int, int, int]]]`:
Ripple sets of each user.
"""
train_data, test_data, user_history_dict = self.load_rating()
n_entity, n_relation, kg = self.load_kg()
ripple_set = self.get_ripple_set(kg, user_history_dict)
return train_data, test_data, n_entity, n_relation, ripple_set
def get_test_file(self, test_set_type: str) -> Path:
"""
Retrieves the filepath of a test set given its type.
Args:
test_set_type (:obj:`str`):
The type of test set.
Returns:
:obj:`Path`:
The filepath of the test set.
"""
test_set_type2file = {
'complete': FILENAME_TEST_RATINGS,
'random': FILENAME_TEST_RATINGS_RANDOM,
'no_tfidf_ratings': FILENAME_TEST_RATINGS_NO_TFIDF,
'no_word2vec_ratings': FILENAME_TEST_RATINGS_NO_WORD2VEC,
'no_transformer_ratings': FILENAME_TEST_RATINGS_NO_TRANSFORMER
}
return test_set_type2file[test_set_type]
def load_rating(self) -> Tuple[np.ndarray, np.ndarray, Dict[int, List[int]]]:
"""
It loads the training and test data, and the user history, if they exist.
Otherwise, it loads the user ratings, processes them to construct the training and test sets, and user history, and caches them to disk.
Returns:
- :obj:`np.ndarray`:
Training set of ratings.
- :obj:`np.ndarray`:
Test set of ratings.
- :obj:`Dict[int, List[int]]`:
User history dictionary.
"""
self.logger.info('Reading rating file.')
test_file = self.get_test_file(self.args.test_set)
if os.path.exists(FILENAME_TRAIN_RATINGS) and os.path.exists(test_file) and os.path.exists(FILENAME_USER_HISTORY_DICT):
self.logger.info('Loading training and test data.')
train_data = np.load(FILENAME_TRAIN_RATINGS)
test_data = np.load(test_file)
user_history_dict = load_cache(FILENAME_USER_HISTORY_DICT)
self.logger.info(f'Size training data: {train_data.shape}.')
self.logger.info(f'Size test data: {test_data.shape}.')
else:
# Read rating file
if os.path.exists(FILENAME_RATINGS_FINAL_NPY):
rating_np = np.load(FILENAME_RATINGS_FINAL_NPY)
else:
rating_np = np.loadtxt(FILENAME_RATINGS_FINAL_TXT, dtype=np.int32)
np.save(FILENAME_RATINGS_FINAL_NPY, rating_np)
# Split dataset
self.logger.info('Splitting dataset.')
test_ratio = 0.2
n_ratings = rating_np.shape[0]
test_indices = np.random.choice(n_ratings, size=int(n_ratings * test_ratio), replace=False)
train_indices = set(range(n_ratings)) - set(test_indices)
# Traverse training data, only keeping the users with positive ratings
user_history_dict = dict()
for i in train_indices:
user = rating_np[i][0]
item = rating_np[i][1]
rating = rating_np[i][2]
if rating == 1:
if user not in user_history_dict:
user_history_dict[user] = []
user_history_dict[user].append(item)
train_indices = [i for i in train_indices if rating_np[i][0] in user_history_dict]
test_indices = [i for i in test_indices if rating_np[i][0] in user_history_dict]
train_data = rating_np[train_indices]
test_data = rating_np[test_indices]
self.logger.info(f'Size training data: {train_data.shape}.')
self.logger.info(f'Size test data: {test_data.shape}.')
# Cache test and train data
np.save(FILENAME_TRAIN_RATINGS, train_data)
np.save(FILENAME_TEST_RATINGS, test_data)
create_cache(user_history_dict, FILENAME_USER_HISTORY_DICT)
self.logger.info('Finished.\n')
return train_data, test_data, user_history_dict
def load_kg(self) -> Tuple[int, int, Dict[int, List[Tuple[int, int]]]]:
"""
Loads the knowledge graph if already cached as :obj:`np.ndarray`, otherwise it constructs it from the text file.
Returns:
- :obj:`int`:
Number of entities.
- :obj:`int`:
Number of relations.
- :obj:`Dict[int, List[Tuple[int, int]]]`:
The knowledge graph as a dictionary which maps each head entity to a tuple of the form (tail, relation).
"""
self.logger.info('Reading KG file.')
# Reading KG file
if os.path.exists(FILENAME_KG_FINAL_NPY):
kg_np = np.load(FILENAME_KG_FINAL_NPY)
else:
kg_np = np.loadtxt(FILENAME_KG_FINAL_TXT, dtype=np.int32)
np.save(FILENAME_KG_FINAL_NPY, kg_np)
n_entity = len(set(kg_np[:, 0]) | set(kg_np[:, 2]))
n_relation = len(set(kg_np[:, 1]))
self.logger.info('Constructing knowledge graph.')
kg = defaultdict(list)
for head, relation, tail in kg_np:
kg[head].append((tail, relation))
self.logger.info('Finished.\n')
return n_entity, n_relation, kg
def get_ripple_set(self, kg: Dict[int, List[Tuple[int, int]]], user_history_dict: Dict[int, List[int]]) -> Dict[int, List[Tuple[int, int, int]]]:
"""
Creates the ripple set for each user.
Args:
kg (:obj:`Dict[int, List[Tuple[int, int]]]`):
The knowledge graph as a dictionary which maps each head entity to a tuple of the form (tail, relation).
user_history_dict (:obj:`Dict[int, List[int]]`):
User history dictionary.
Returns:
:obj:`Dict[int, List[Tuple[int, int, int]]]`:
Ripple sets of each user.
"""
self.logger.info('Constructing ripple set.')
# user -> [(hop_0_heads, hop_0_relations, hop_0_tails), (hop_1_heads, hop_1_relations, hop_1_tails), ...]
ripple_set = defaultdict(list)
for user in user_history_dict:
for h in range(self.args.n_hop):
memories_h = []
memories_r = []
memories_t = []
if h == 0:
tails_of_last_hop = user_history_dict[user]
else:
tails_of_last_hop = ripple_set[user][-1][2]
for entity in tails_of_last_hop:
for tail_and_relation in kg[entity]:
memories_h.append(entity)
memories_r.append(tail_and_relation[1])
memories_t.append(tail_and_relation[0])
"""
If the current ripple set of the given user is empty, we simply copy the ripple set of the last hop here
This won't happen for h = 0, because only the items that appear in the KG have been selected.
"""
if len(memories_h) == 0:
ripple_set[user].append(ripple_set[user][-1])
else:
# Sample a fixed-size 1-hop memory for each user
replace = len(memories_h) < self.args.n_memory
indices = np.random.choice(len(memories_h), size=self.args.n_memory, replace=replace)
memories_h = [memories_h[i] for i in indices]
memories_r = [memories_r[i] for i in indices]
memories_t = [memories_t[i] for i in indices]
ripple_set[user].append((memories_h, memories_r, memories_t))
self.logger.info('Finished.\n')
return ripple_set
| load_data |
notifier.py | from direct.directnotify.DirectNotifyGlobal import directNotify
class Notifier:
def __init__(self, name):
"""
@param name: The name of the notifier. Be sure to add it to your config/Config.prc!
@type name: str | self.notify = directNotify.newCategory(name) | """ |
bundles.go | package sbctl
import (
"encoding/json"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"github.com/foxboron/sbctl/logging"
)
type Bundle struct {
Output string `json:"output"`
IntelMicrocode string `json:"intel_microcode"`
AMDMicrocode string `json:"amd_microcode"`
KernelImage string `json:"kernel_image"`
Initramfs string `json:"initramfs"`
Cmdline string `json:"cmdline"`
Splash string `json:"splash"`
OSRelease string `json:"os_release"`
EFIStub string `json:"efi_stub"`
ESP string `json:"esp"`
}
type Bundles map[string]*Bundle
var BundleDBPath = filepath.Join(DatabasePath, "bundles.db")
func ReadBundleDatabase(dbpath string) (Bundles, error) {
f, err := ReadOrCreateFile(dbpath)
if err != nil {
return nil, err
}
bundles := make(Bundles)
json.Unmarshal(f, &bundles)
return bundles, nil
}
func WriteBundleDatabase(dbpath string, bundles Bundles) error {
data, err := json.MarshalIndent(bundles, "", " ")
if err != nil {
return err
}
err = os.WriteFile(dbpath, data, 0644)
if err != nil {
return err
}
return nil
}
func BundleIter(fn func(s *Bundle) error) error {
files, err := ReadBundleDatabase(BundleDBPath)
if err != nil {
return err
}
for _, s := range files {
if err := fn(s); err != nil {
return err
}
}
return nil
}
func GetEfistub() string {
candidates := []string{
"/lib/systemd/boot/efi/linuxx64.efi.stub",
"/lib/gummiboot/linuxx64.efi.stub",
}
for _, f := range candidates {
if _, err := os.Stat(f); err == nil {
return f
}
}
return ""
}
func NewBundle() *Bundle |
func GenerateBundle(bundle *Bundle) (bool, error) {
args := []string{
"--add-section", fmt.Sprintf(".osrel=%s", bundle.OSRelease), "--change-section-vma", ".osrel=0x20000",
"--add-section", fmt.Sprintf(".cmdline=%s", bundle.Cmdline), "--change-section-vma", ".cmdline=0x30000",
"--add-section", fmt.Sprintf(".linux=%s", bundle.KernelImage), "--change-section-vma", ".linux=0x2000000",
"--add-section", fmt.Sprintf(".initrd=%s", bundle.Initramfs), "--change-section-vma", ".initrd=0x3000000",
}
if bundle.Splash != "" {
args = append(args, "--add-section", fmt.Sprintf(".splash=%s", bundle.Splash), "--change-section-vma", ".splash=0x40000")
}
args = append(args, bundle.EFIStub, bundle.Output)
cmd := exec.Command("objcopy", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
if errors.Is(err, exec.ErrNotFound) {
return false, err
}
if exitError, ok := err.(*exec.ExitError); ok {
return exitError.ExitCode() == 0, nil
}
}
logging.Print("Wrote EFI bundle %s\n", bundle.Output)
return true, nil
}
| {
esp := GetESP()
stub := GetEfistub()
if stub == "" {
panic("No EFISTUB file found. Please install systemd-boot or gummiboot!")
}
return &Bundle{
Output: "",
IntelMicrocode: "",
AMDMicrocode: "",
KernelImage: "/boot/vmlinuz-linux",
Initramfs: "/boot/initramfs-linux.img",
Cmdline: "/etc/kernel/cmdline",
Splash: "",
OSRelease: "/usr/lib/os-release",
EFIStub: stub,
ESP: esp,
}
} |
menu.selectors.ts | import { createFeatureSelector, createSelector } from "@ngrx/store";
import { AppState } from "../app.state";
import { MenuState } from "./menu.state";
export const menuFeatureName = "menu";
export const selectMenuFeature = createFeatureSelector<AppState, MenuState>( | menuFeatureName
);
export const selectIsMenuDrawerOpened = createSelector(
selectMenuFeature,
(state: MenuState) => state.isMenuDrawerOpened
); | |
test_model.py | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.core.properties import Int, List, String
from bokeh.models import * # NOQA
from bokeh.models import CustomJS
from bokeh.plotting import * # NOQA
from bokeh.document import document # isort:skip
# Module under test
from bokeh.model import Model # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class SomeModel(Model):
a = Int(12)
b = String("hello")
c = List(Int, [1, 2, 3])
class Test_js_on_change:
def test_exception_for_no_callbacks(self) -> None:
m = SomeModel()
with pytest.raises(ValueError):
m.js_on_change('foo')
def test_exception_for_bad_callbacks(self) -> None:
m = SomeModel()
for val in [10, "bar", None, [1], {}, 10.2]:
with pytest.raises(ValueError):
m.js_on_change('foo', val)
def test_with_propname(self) -> None:
cb = CustomJS(code="")
m0 = SomeModel()
for name in m0.properties():
m = SomeModel()
m.js_on_change(name, cb)
assert m.js_property_callbacks == {"change:%s" % name: [cb]}
def test_with_non_propname(self) -> None:
cb = CustomJS(code="")
m1 = SomeModel()
m1.js_on_change('foo', cb)
assert m1.js_property_callbacks == {"foo": [cb]}
m2 = SomeModel()
m2.js_on_change('change:b', cb)
assert m2.js_property_callbacks == {"change:b": [cb]}
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1, cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="")
cb2 = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb1)
assert m.js_property_callbacks == {"foo": [cb1]}
m.js_on_change('foo', cb2)
assert m.js_property_callbacks == {"foo": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="")
m = SomeModel()
m.js_on_change('foo', cb, cb)
assert m.js_property_callbacks == {"foo": [cb]}
class Test_js_on_event:
def test_with_multple_callbacks(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("some", cb1, cb2)
assert m.js_event_callbacks == {"some": [cb1, cb2]}
def test_with_multple_callbacks_separately(self) -> None:
cb1 = CustomJS(code="foo")
cb2 = CustomJS(code="bar")
m = SomeModel()
m.js_on_event("some", cb1)
assert m.js_event_callbacks == {"some": [cb1]}
m.js_on_event("some", cb2)
assert m.js_event_callbacks == {"some": [cb1, cb2]}
def test_ignores_dupe_callbacks(self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("some", cb, cb)
assert m.js_event_callbacks == {"some": [cb]}
def | (self) -> None:
cb = CustomJS(code="foo")
m = SomeModel()
m.js_on_event("some", cb)
assert m.js_event_callbacks == {"some": [cb]}
m.js_on_event("some", cb)
assert m.js_event_callbacks == {"some": [cb]}
class Test_js_link:
def test_value_error_on_bad_attr(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('junk', m2, 'b')
assert str(e.value).endswith("%r is not a property of self (%r)" % ("junk", m1))
def test_value_error_on_bad_other(self) -> None:
m1 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('a', 'junk', 'b')
assert str(e.value).endswith("'other' is not a Bokeh model: %r" % "junk")
def test_value_error_on_bad_other_attr(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
with pytest.raises(ValueError) as e:
m1.js_link('a', m2, 'junk')
assert str(e.value).endswith("%r is not a property of other (%r)" % ("junk", m2))
def test_creates_customjs(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b')
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a"
def test_attr_selector_creates_customjs_int(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', 1)
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a[1]"
def test_attr_selector_creates_customjs_with_zero(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', 0)
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a[0]"
def test_attr_selector_creates_customjs_str(self) -> None:
m1 = SomeModel()
m2 = SomeModel()
assert len(m1.js_property_callbacks) == 0
m1.js_link('a', m2, 'b', "test")
assert len(m1.js_property_callbacks) == 1
assert "change:a" in m1.js_property_callbacks
cbs = m1.js_property_callbacks["change:a"]
assert len(cbs) == 1
cb = cbs[0]
assert isinstance(cb, CustomJS)
assert cb.args == dict(other=m2)
assert cb.code == "other.b = this.a['test']"
def test_all_builtin_models_default_constructible() -> None:
bad = []
for name, cls in Model.model_class_reverse_map.items():
try:
cls()
except Exception:
bad.append(name)
assert bad == []
def test_select() -> None:
# we aren't trying to replace test_query here, only test
# our wrappers around it, so no need to try every kind of
# query
d = document.Document()
root1 = SomeModel(a=42, name='a')
root2 = SomeModel(a=43, name='c')
root3 = SomeModel(a=44, name='d')
root4 = SomeModel(a=45, name='d')
d.add_root(root1)
d.add_root(root2)
d.add_root(root3)
d.add_root(root4)
# select()
assert {root1} == set(root1.select(dict(a=42)))
assert {root1} == set(root1.select(dict(name="a")))
assert {root2} == set(root2.select(dict(name="c")))
assert set() == set(root1.select(dict(name="nope")))
# select() on object
assert set() == set(root3.select(dict(name='a')))
assert {root3} == set(root3.select(dict(a=44)))
# select_one()
assert root3 == root3.select_one(dict(name='d'))
assert root1.select_one(dict(name='nope')) is None
with pytest.raises(ValueError) as e:
d.select_one(dict(name='d'))
assert 'Found more than one' in repr(e)
# select_one() on object
assert root3.select_one(dict(name='a')) is None
assert root3.select_one(dict(name='c')) is None
# set_select()
root1.set_select(dict(a=42), dict(name="c", a=44))
assert {root1} == set(root1.select(dict(name="c")))
assert {root1} == set(root1.select(dict(a=44)))
# set_select() on object
root3.set_select(dict(name='d'), dict(a=57))
assert {root3} == set(root3.select(dict(a=57)))
# set_select() on class
root2.set_select(SomeModel, dict(name='new_name'))
assert {root2} == set(root2.select(dict(name="new_name")))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| test_ignores_dupe_callbacks_separately |
hello.rs | extern crate pancurses;
use pancurses::{initscr, endwin};
fn | () {
let window = initscr();
window.printw("Hello Rust");
window.refresh();
window.getch();
endwin();
}
| main |
wallet_balance.py | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The BitRub Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet balance RPC methods."""
from decimal import Decimal
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE as ADDRESS_WATCHONLY
from test_framework.test_framework import BitRubTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
sync_blocks,
)
def create_transactions(node, address, amt, fees):
# Create and sign raw transactions from node to address for amt.
# Creates a transaction for each fee and returns an array
# of the raw transactions.
utxos = [u for u in node.listunspent(0) if u['spendable']]
# Create transactions
inputs = []
ins_total = 0
for utxo in utxos:
inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]})
ins_total += utxo['amount']
if ins_total >= amt + max(fees):
break
# make sure there was enough utxos
assert ins_total >= amt + max(fees)
txs = []
for fee in fees:
outputs = {address: amt}
# prevent 0 change output
if ins_total > amt + fee:
outputs[node.getrawchangeaddress()] = ins_total - amt - fee
raw_tx = node.createrawtransaction(inputs, outputs, 0, True)
raw_tx = node.signrawtransactionwithwallet(raw_tx)
assert_equal(raw_tx['complete'], True)
txs.append(raw_tx)
return txs
class WalletTest(BitRubTestFramework):
def | (self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [
['-limitdescendantcount=3'], # Limit mempool descendants as a hack to have wallet txs rejected from the mempool
[],
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].importaddress(ADDRESS_WATCHONLY)
# Check that nodes don't own any UTXOs
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
self.log.info("Check that only node 0 is watching an address")
assert 'watchonly' in self.nodes[0].getbalances()
assert 'watchonly' not in self.nodes[1].getbalances()
self.log.info("Mining blocks ...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[1].generate(1)
self.nodes[1].generatetoaddress(101, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getwalletinfo()['balance'], 50)
assert_equal(self.nodes[1].getbalances()['mine']['trusted'], 50)
assert_equal(self.nodes[0].getbalances()['watchonly']['immature'], 5000)
assert 'watchonly' not in self.nodes[1].getbalances()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
self.log.info("Test getbalance with different arguments")
assert_equal(self.nodes[0].getbalance("*"), 50)
assert_equal(self.nodes[0].getbalance("*", 1), 50)
assert_equal(self.nodes[0].getbalance("*", 1, True), 100)
assert_equal(self.nodes[0].getbalance(minconf=1), 50)
assert_equal(self.nodes[0].getbalance(minconf=0, include_watchonly=True), 100)
assert_equal(self.nodes[1].getbalance(minconf=0, include_watchonly=True), 50)
# Send 40 BTR from 0 to 1 and 60 BTR from 1 to 0.
txs = create_transactions(self.nodes[0], self.nodes[1].getnewaddress(), 40, [Decimal('0.01')])
self.nodes[0].sendrawtransaction(txs[0]['hex'])
self.nodes[1].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), 60, [Decimal('0.01'), Decimal('0.02')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[0].sendrawtransaction(txs[0]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
# First argument of getbalance must be set to "*"
assert_raises_rpc_error(-32, "dummy first argument must be excluded or set to \"*\"", self.nodes[1].getbalance, "")
self.log.info("Test getbalance and getunconfirmedbalance with unconfirmed inputs")
# Before `test_balance()`, we have had two nodes with a balance of 50
# each and then we:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 60 from node B to node A with fee 0.01
#
# Then we check the balances:
#
# 1) As is
# 2) With transaction 2 from above with 2x the fee
#
# Prior to #16766, in this situation, the node would immediately report
# a balance of 30 on node B as unconfirmed and trusted.
#
# After #16766, we show that balance as unconfirmed.
#
# The balance is indeed "trusted" and "confirmed" insofar as removing
# the mempool transactions would return at least that much money. But
# the algorithm after #16766 marks it as unconfirmed because the 'taint'
# tracking of transaction trust for summing balances doesn't consider
# which inputs belong to a user. In this case, the change output in
# question could be "destroyed" by replace the 1st transaction above.
#
# The post #16766 behavior is correct; we shouldn't be treating those
# funds as confirmed. If you want to rely on that specific UTXO existing
# which has given you that balance, you cannot, as a third party
# spending the other input would destroy that unconfirmed.
#
# For example, if the test transactions were:
#
# 1) Sent 40 from node A to node B with fee 0.01
# 2) Sent 10 from node B to node A with fee 0.01
#
# Then our node would report a confirmed balance of 40 + 50 - 10 = 80
# BTR, which is more than would be available if transaction 1 were
# replaced.
def test_balances(*, fee_node_1=0):
# getbalance without any arguments includes unconfirmed transactions, but not untrusted transactions
assert_equal(self.nodes[0].getbalance(), Decimal('9.99')) # change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('0')) # node 1's send had an unsafe input
# Same with minconf=0
assert_equal(self.nodes[0].getbalance(minconf=0), Decimal('9.99'))
assert_equal(self.nodes[1].getbalance(minconf=0), Decimal('0'))
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
assert_equal(self.nodes[0].getbalance(minconf=1), Decimal('0'))
assert_equal(self.nodes[1].getbalance(minconf=1), Decimal('0'))
# getunconfirmedbalance
assert_equal(self.nodes[0].getunconfirmedbalance(), Decimal('60')) # output of node 1's spend
assert_equal(self.nodes[0].getbalances()['mine']['untrusted_pending'], Decimal('60'))
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], Decimal('60'))
assert_equal(self.nodes[1].getunconfirmedbalance(), Decimal('30') - fee_node_1) # Doesn't include output of node 0's send since it was spent
assert_equal(self.nodes[1].getbalances()['mine']['untrusted_pending'], Decimal('30') - fee_node_1)
assert_equal(self.nodes[1].getwalletinfo()["unconfirmed_balance"], Decimal('30') - fee_node_1)
test_balances(fee_node_1=Decimal('0.01'))
# Node 1 bumps the transaction fee and resends
self.nodes[1].sendrawtransaction(txs[1]['hex'])
self.nodes[0].sendrawtransaction(txs[1]['hex']) # sending on both nodes is faster than waiting for propagation
self.sync_all()
self.log.info("Test getbalance and getunconfirmedbalance with conflicted unconfirmed inputs")
test_balances(fee_node_1=Decimal('0.02'))
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
# balances are correct after the transactions are confirmed
assert_equal(self.nodes[0].getbalance(), Decimal('69.99')) # node 1's send plus change from node 0's send
assert_equal(self.nodes[1].getbalance(), Decimal('29.98')) # change from node 0's send
# Send total balance away from node 1
txs = create_transactions(self.nodes[1], self.nodes[0].getnewaddress(), Decimal('29.97'), [Decimal('0.01')])
self.nodes[1].sendrawtransaction(txs[0]['hex'])
self.nodes[1].generatetoaddress(2, ADDRESS_WATCHONLY)
self.sync_all()
# getbalance with a minconf incorrectly excludes coins that have been spent more recently than the minconf blocks ago
# TODO: fix getbalance tracking of coin spentness depth
# getbalance with minconf=3 should still show the old balance
assert_equal(self.nodes[1].getbalance(minconf=3), Decimal('0'))
# getbalance with minconf=2 will show the new balance.
assert_equal(self.nodes[1].getbalance(minconf=2), Decimal('0'))
# check mempool transactions count for wallet unconfirmed balance after
# dynamically loading the wallet.
before = self.nodes[1].getunconfirmedbalance()
dst = self.nodes[1].getnewaddress()
self.nodes[1].unloadwallet('')
self.nodes[0].sendtoaddress(dst, 0.1)
self.sync_all()
self.nodes[1].loadwallet('')
after = self.nodes[1].getunconfirmedbalance()
assert_equal(before + Decimal('0.1'), after)
# Create 3 more wallet txs, where the last is not accepted to the
# mempool because it is the third descendant of the tx above
for _ in range(3):
# Set amount high enough such that all coins are spent by each tx
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 99)
self.log.info('Check that wallet txs not in the mempool are untrusted')
assert txid not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid)['trusted'], False)
assert_equal(self.nodes[0].getbalance(minconf=0), 0)
self.log.info("Test replacement and reorg of non-mempool tx")
tx_orig = self.nodes[0].gettransaction(txid)['hex']
# Increase fee by 1 coin
tx_replace = tx_orig.replace(
struct.pack("<q", 99 * 10**8).hex(),
struct.pack("<q", 98 * 10**8).hex(),
)
tx_replace = self.nodes[0].signrawtransactionwithwallet(tx_replace)['hex']
# Total balance is given by the sum of outputs of the tx
total_amount = sum([o['value'] for o in self.nodes[0].decoderawtransaction(tx_replace)['vout']])
self.sync_all()
self.nodes[1].sendrawtransaction(hexstring=tx_replace, maxfeerate=0)
# Now confirm tx_replace
block_reorg = self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)[0]
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount)
self.log.info('Put txs back into mempool of node 1 (not node 0)')
self.nodes[0].invalidateblock(block_reorg)
self.nodes[1].invalidateblock(block_reorg)
self.sync_blocks()
self.nodes[0].syncwithvalidationinterfacequeue()
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
self.nodes[0].generatetoaddress(1, ADDRESS_WATCHONLY)
assert_equal(self.nodes[0].getbalance(minconf=0), 0) # wallet txs not in the mempool are untrusted
# Now confirm tx_orig
self.restart_node(1, ['-persistmempool=0'])
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
self.nodes[1].sendrawtransaction(tx_orig)
self.nodes[1].generatetoaddress(1, ADDRESS_WATCHONLY)
self.sync_all()
assert_equal(self.nodes[0].getbalance(minconf=0), total_amount + 1) # The reorg recovered our fee of 1 coin
if __name__ == '__main__':
WalletTest().main()
| set_test_params |
stores.go | package db
var (
AccessTokens = &accessTokens{}
ExternalServices = &ExternalServicesStore{}
DefaultRepos = &defaultRepos{} | OrgMembers = &orgMembers{}
SavedSearches = &savedSearches{}
Settings = &settings{}
Users = &users{}
UserEmails = &userEmails{}
EventLogs = &eventLogs{}
SurveyResponses = &surveyResponses{}
ExternalAccounts = &userExternalAccounts{}
OrgInvitations = &orgInvitations{}
Authz AuthzStore = &authzStore{}
Secrets = &secrets{}
) | Repos = &repos{}
Phabricator = &phabricator{}
QueryRunnerState = &queryRunnerState{}
Orgs = &orgs{} |
flags.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
use clap::App;
use clap::AppSettings;
use clap::Arg;
use clap::ArgMatches;
use clap::ArgSettings;
use clap::SubCommand;
use deno_core::serde::Deserialize;
use deno_core::serde::Serialize;
use deno_core::url::Url;
use deno_runtime::permissions::PermissionsOptions;
use log::debug;
use log::Level;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::str::FromStr;
lazy_static::lazy_static! {
static ref LONG_VERSION: String = format!(
"{} ({}, {})\nv8 {}\ntypescript {}",
crate::version::deno(),
if crate::version::is_canary() {
"canary"
} else {
env!("PROFILE")
},
env!("TARGET"),
deno_core::v8_version(),
crate::version::TYPESCRIPT
);
}
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub enum DenoSubcommand {
Bundle {
source_file: String,
out_file: Option<PathBuf>,
},
Cache {
files: Vec<String>,
},
Compile {
source_file: String,
output: Option<PathBuf>,
args: Vec<String>,
target: Option<String>,
},
Completions {
buf: Box<[u8]>,
},
Coverage {
files: Vec<PathBuf>,
ignore: Vec<PathBuf>,
include: Vec<String>,
exclude: Vec<String>,
lcov: bool,
},
Doc {
private: bool,
json: bool,
source_file: Option<String>,
filter: Option<String>,
},
Eval {
print: bool,
code: String,
ext: String,
},
Fmt {
check: bool,
files: Vec<PathBuf>,
ignore: Vec<PathBuf>,
ext: String,
},
Info {
json: bool,
file: Option<String>,
},
Install {
module_url: String,
args: Vec<String>,
name: Option<String>,
root: Option<PathBuf>,
force: bool,
},
Lsp,
Lint {
files: Vec<PathBuf>,
ignore: Vec<PathBuf>,
rules: bool,
json: bool,
},
Repl,
Run {
script: String,
},
Test {
doc: bool,
no_run: bool,
fail_fast: Option<usize>,
quiet: bool,
allow_none: bool,
include: Option<Vec<String>>,
filter: Option<String>,
shuffle: Option<u64>,
concurrent_jobs: usize,
},
Types,
Upgrade {
dry_run: bool,
force: bool,
canary: bool,
version: Option<String>,
output: Option<PathBuf>,
ca_file: Option<String>,
},
}
impl Default for DenoSubcommand {
fn default() -> DenoSubcommand {
DenoSubcommand::Repl
}
}
#[derive(Clone, Debug, PartialEq, Default)]
pub struct Flags {
/// Vector of CLI arguments - these are user script arguments, all Deno
/// specific flags are removed.
pub argv: Vec<String>,
pub subcommand: DenoSubcommand,
pub allow_env: Option<Vec<String>>,
pub allow_hrtime: bool,
pub allow_net: Option<Vec<String>>,
pub allow_plugin: bool,
pub allow_read: Option<Vec<PathBuf>>,
pub allow_run: Option<Vec<String>>,
pub allow_write: Option<Vec<PathBuf>>,
pub location: Option<Url>,
pub cache_blocklist: Vec<String>,
pub ca_file: Option<String>,
pub cached_only: bool,
pub config_path: Option<String>,
pub coverage_dir: Option<String>,
pub ignore: Vec<PathBuf>,
pub import_map_path: Option<String>,
pub inspect: Option<SocketAddr>,
pub inspect_brk: Option<SocketAddr>,
pub lock: Option<PathBuf>,
pub lock_write: bool,
pub log_level: Option<Level>,
pub no_check: bool,
pub prompt: bool,
pub no_remote: bool,
pub reload: bool,
pub repl: bool,
pub seed: Option<u64>,
pub unstable: bool,
pub v8_flags: Vec<String>,
pub version: bool,
pub watch: bool,
}
fn join_paths(allowlist: &[PathBuf], d: &str) -> String {
allowlist
.iter()
.map(|path| path.to_str().unwrap().to_string())
.collect::<Vec<String>>()
.join(d)
}
impl Flags {
/// Return list of permission arguments that are equivalent
/// to the ones used to create `self`.
pub fn to_permission_args(&self) -> Vec<String> {
let mut args = vec![];
match &self.allow_read {
Some(read_allowlist) if read_allowlist.is_empty() => {
args.push("--allow-read".to_string());
}
Some(read_allowlist) => {
let s = format!("--allow-read={}", join_paths(read_allowlist, ","));
args.push(s);
}
_ => {}
}
match &self.allow_write {
Some(write_allowlist) if write_allowlist.is_empty() => {
args.push("--allow-write".to_string());
}
Some(write_allowlist) => {
let s = format!("--allow-write={}", join_paths(write_allowlist, ","));
args.push(s);
}
_ => {}
}
match &self.allow_net {
Some(net_allowlist) if net_allowlist.is_empty() => {
args.push("--allow-net".to_string());
}
Some(net_allowlist) => {
let s = format!("--allow-net={}", net_allowlist.join(","));
args.push(s);
}
_ => {}
}
match &self.allow_env {
Some(env_allowlist) if env_allowlist.is_empty() => {
args.push("--allow-env".to_string());
}
Some(env_allowlist) => {
let s = format!("--allow-env={}", env_allowlist.join(","));
args.push(s);
}
_ => {}
}
match &self.allow_run {
Some(run_allowlist) if run_allowlist.is_empty() => {
args.push("--allow-run".to_string());
}
Some(run_allowlist) => {
let s = format!("--allow-run={}", run_allowlist.join(","));
args.push(s);
}
_ => {}
}
if self.allow_plugin {
args.push("--allow-plugin".to_string());
}
if self.allow_hrtime {
args.push("--allow-hrtime".to_string());
}
args
}
}
impl From<Flags> for PermissionsOptions {
fn from(flags: Flags) -> Self {
Self {
allow_env: flags.allow_env,
allow_hrtime: flags.allow_hrtime,
allow_net: flags.allow_net,
allow_plugin: flags.allow_plugin,
allow_read: flags.allow_read,
allow_run: flags.allow_run,
allow_write: flags.allow_write,
prompt: flags.prompt,
}
}
}
static ENV_VARIABLES_HELP: &str = r#"ENVIRONMENT VARIABLES:
DENO_AUTH_TOKENS A semi-colon separated list of bearer tokens and
hostnames to use when fetching remote modules from
private repositories
(e.g. "[email protected];[email protected]")
DENO_CERT Load certificate authority from PEM encoded file
DENO_DIR Set the cache directory
DENO_INSTALL_ROOT Set deno install's output directory
(defaults to $HOME/.deno/bin)
DENO_WEBGPU_TRACE Directory to use for wgpu traces
HTTP_PROXY Proxy address for HTTP requests
(module downloads, fetch)
HTTPS_PROXY Proxy address for HTTPS requests
(module downloads, fetch)
NO_COLOR Set to disable color
NO_PROXY Comma-separated list of hosts which do not use a proxy
(module downloads, fetch)"#;
static DENO_HELP: &str = "A secure JavaScript and TypeScript runtime
Docs: https://deno.land/manual
Modules: https://deno.land/std/ https://deno.land/x/
Bugs: https://github.com/denoland/deno/issues
To start the REPL:
deno
To execute a script:
deno run https://deno.land/std/examples/welcome.ts
To evaluate code in the shell:
deno eval \"console.log(30933 + 404)\"
";
/// Main entry point for parsing deno's command line flags.
pub fn flags_from_vec(args: Vec<String>) -> clap::Result<Flags> {
let version = crate::version::deno();
let app = clap_root(&*version);
let matches = app.get_matches_from_safe(args).map_err(|e| clap::Error {
message: e.message.trim_start_matches("error: ").to_string(),
..e
})?;
let mut flags = Flags::default();
if matches.is_present("unstable") {
flags.unstable = true;
}
if matches.is_present("log-level") {
flags.log_level = match matches.value_of("log-level").unwrap() {
"debug" => Some(Level::Debug),
"info" => Some(Level::Info),
_ => unreachable!(),
};
}
if matches.is_present("quiet") {
flags.log_level = Some(Level::Error);
}
if let Some(m) = matches.subcommand_matches("run") {
run_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("fmt") {
fmt_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("types") {
types_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("cache") {
cache_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("coverage") {
coverage_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("info") {
info_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("eval") {
eval_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("repl") {
repl_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("bundle") {
bundle_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("install") {
install_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("completions") {
completions_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("test") {
test_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("upgrade") {
upgrade_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("doc") {
doc_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("lint") {
lint_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("compile") {
compile_parse(&mut flags, m);
} else if let Some(m) = matches.subcommand_matches("lsp") {
lsp_parse(&mut flags, m);
} else {
repl_parse(&mut flags, &matches);
}
Ok(flags)
}
fn clap_root<'a, 'b>(version: &'b str) -> App<'a, 'b> {
clap::App::new("deno")
.bin_name("deno")
.global_settings(&[
AppSettings::UnifiedHelpMessage,
AppSettings::ColorNever,
AppSettings::VersionlessSubcommands,
])
// Disable clap's auto-detection of terminal width
.set_term_width(0)
// Disable each subcommand having its own version.
.version(version)
.long_version(LONG_VERSION.as_str())
.arg(
Arg::with_name("unstable")
.long("unstable")
.help("Enable unstable features and APIs")
.global(true),
)
.arg(
Arg::with_name("log-level")
.short("L")
.long("log-level")
.help("Set log level")
.takes_value(true)
.possible_values(&["debug", "info"])
.global(true),
)
.arg(
Arg::with_name("quiet")
.short("q")
.long("quiet")
.help("Suppress diagnostic output")
.long_help(
"Suppress diagnostic output
By default, subcommands print human-readable diagnostic messages to stderr.
If the flag is set, restrict these messages to errors.",
)
.global(true),
)
.subcommand(bundle_subcommand())
.subcommand(cache_subcommand())
.subcommand(compile_subcommand())
.subcommand(completions_subcommand())
.subcommand(coverage_subcommand())
.subcommand(doc_subcommand())
.subcommand(eval_subcommand())
.subcommand(fmt_subcommand())
.subcommand(info_subcommand())
.subcommand(install_subcommand())
.subcommand(lsp_subcommand())
.subcommand(lint_subcommand())
.subcommand(repl_subcommand())
.subcommand(run_subcommand())
.subcommand(test_subcommand())
.subcommand(types_subcommand())
.subcommand(upgrade_subcommand())
.long_about(DENO_HELP)
.after_help(ENV_VARIABLES_HELP)
}
fn bundle_subcommand<'a, 'b>() -> App<'a, 'b> {
compile_args(SubCommand::with_name("bundle"))
.arg(
Arg::with_name("source_file")
.takes_value(true)
.required(true),
)
.arg(Arg::with_name("out_file").takes_value(true).required(false))
.arg(watch_arg())
.about("Bundle module and dependencies into single file")
.long_about(
"Output a single JavaScript file with all dependencies.
deno bundle https://deno.land/std/examples/colors.ts colors.bundle.js
If no output file is given, the output is written to standard output:
deno bundle https://deno.land/std/examples/colors.ts",
)
}
fn cache_subcommand<'a, 'b>() -> App<'a, 'b> {
compile_args(SubCommand::with_name("cache"))
.arg(
Arg::with_name("file")
.takes_value(true)
.required(true)
.min_values(1),
)
.about("Cache the dependencies")
.long_about(
"Cache and compile remote dependencies recursively.
Download and compile a module with all of its static dependencies and save them
in the local cache, without running any code:
deno cache https://deno.land/std/http/file_server.ts
Future runs of this module will trigger no downloads or compilation unless
--reload is specified.",
)
}
fn compile_subcommand<'a, 'b>() -> App<'a, 'b> {
runtime_args(SubCommand::with_name("compile"), true, false)
.setting(AppSettings::TrailingVarArg)
.arg(
script_arg().required(true),
)
.arg(
Arg::with_name("output")
.long("output")
.short("o")
.help("Output file (defaults to $PWD/<inferred-name>)")
.takes_value(true)
)
.arg(
Arg::with_name("target")
.long("target")
.help("Target OS architecture")
.takes_value(true)
.possible_values(&["x86_64-unknown-linux-gnu", "x86_64-pc-windows-msvc", "x86_64-apple-darwin", "aarch64-apple-darwin"])
)
.about("UNSTABLE: Compile the script into a self contained executable")
.long_about(
"UNSTABLE: Compiles the given script into a self contained executable.
deno compile -A https://deno.land/std/http/file_server.ts
deno compile --output /usr/local/bin/color_util https://deno.land/std/examples/colors.ts
Any flags passed which affect runtime behavior, such as '--unstable',
'--allow-*', '--v8-flags', etc. are encoded into the output executable and used
at runtime as if they were passed to a similar 'deno run' command.
The executable name is inferred by default:
- Attempt to take the file stem of the URL path. The above example would
become 'file_server'.
- If the file stem is something generic like 'main', 'mod', 'index' or 'cli',
and the path has no parent, take the file name of the parent path. Otherwise
settle with the generic name.
- If the resulting name has an '@...' suffix, strip it.
This commands supports cross-compiling to different target architectures using `--target` flag.
On the first invocation with deno will download proper binary and cache it in $DENO_DIR. The
aarch64-apple-darwin target is not supported in canary.
",
)
}
fn completions_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("completions")
.setting(AppSettings::DisableHelpSubcommand)
.arg(
Arg::with_name("shell")
.possible_values(&clap::Shell::variants())
.required(true),
)
.about("Generate shell completions")
.long_about(
"Output shell completion script to standard output.
deno completions bash > /usr/local/etc/bash_completion.d/deno.bash
source /usr/local/etc/bash_completion.d/deno.bash",
)
}
fn coverage_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("coverage")
.about("Print coverage reports")
.long_about(
"Print coverage reports from coverage profiles.
Collect a coverage profile with deno test:
deno test --coverage=cov_profile
Print a report to stdout:
deno coverage cov_profile
Include urls that start with the file schema:
deno coverage --include=\"^file:\" cov_profile
Exclude urls ending with test.ts and test.js:
deno coverage --exclude=\"test\\.(ts|js)\" cov_profile
Include urls that start with the file schema and exclude files ending with test.ts and test.js, for
an url to match it must match the include pattern and not match the exclude pattern:
deno coverage --include=\"^file:\" --exclude=\"test\\.(ts|js)\" cov_profile
Write a report using the lcov format:
deno coverage --lcov cov_profile > cov.lcov
Generate html reports from lcov:
genhtml -o html_cov cov.lcov
",
)
.arg(
Arg::with_name("ignore")
.long("ignore")
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Ignore coverage files"),
)
.arg(
Arg::with_name("include")
.long("include")
.takes_value(true)
.value_name("regex")
.multiple(true)
.require_equals(true)
.default_value(r"^file:")
.help("Include source files in the report"),
)
.arg(
Arg::with_name("exclude")
.long("exclude")
.takes_value(true)
.value_name("regex")
.multiple(true)
.require_equals(true)
.default_value(r"test\.(js|mjs|ts|jsx|tsx)$")
.help("Exclude source files from the report"),
)
.arg(
Arg::with_name("lcov")
.long("lcov")
.help("Output coverage report in lcov format")
.takes_value(false),
)
.arg(
Arg::with_name("files")
.takes_value(true)
.multiple(true)
.required(true),
)
}
fn doc_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("doc")
.about("Show documentation for a module")
.long_about(
"Show documentation for a module.
Output documentation to standard output:
deno doc ./path/to/module.ts
Output private documentation to standard output:
deno doc --private ./path/to/module.ts
Output documentation in JSON format:
deno doc --json ./path/to/module.ts
Target a specific symbol:
deno doc ./path/to/module.ts MyClass.someField
Show documentation for runtime built-ins:
deno doc
deno doc --builtin Deno.Listener",
)
.arg(import_map_arg())
.arg(reload_arg())
.arg(
Arg::with_name("json")
.long("json")
.help("Output documentation in JSON format")
.takes_value(false),
)
.arg(
Arg::with_name("private")
.long("private")
.help("Output private documentation")
.takes_value(false),
)
// TODO(nayeemrmn): Make `--builtin` a proper option. Blocked by
// https://github.com/clap-rs/clap/issues/1794. Currently `--builtin` is
// just a possible value of `source_file` so leading hyphens must be
// enabled.
.setting(clap::AppSettings::AllowLeadingHyphen)
.arg(Arg::with_name("source_file").takes_value(true))
.arg(
Arg::with_name("filter")
.help("Dot separated path to symbol")
.takes_value(true)
.required(false)
.conflicts_with("json")
.conflicts_with("pretty"),
)
}
fn eval_subcommand<'a, 'b>() -> App<'a, 'b> {
runtime_args(SubCommand::with_name("eval"), false, true)
.about("Eval script")
.long_about(
"Evaluate JavaScript from the command line.
deno eval \"console.log('hello world')\"
To evaluate as TypeScript:
deno eval --ext=ts \"const v: string = 'hello'; console.log(v)\"
This command has implicit access to all permissions (--allow-all).",
)
.arg(
// TODO(@satyarohith): remove this argument in 2.0.
Arg::with_name("ts")
.long("ts")
.short("T")
.help("Treat eval input as TypeScript")
.takes_value(false)
.multiple(false)
.hidden(true),
)
.arg(
Arg::with_name("ext")
.long("ext")
.help("Set standard input (stdin) content type")
.takes_value(true)
.default_value("js")
.possible_values(&["ts", "tsx", "js", "jsx"]),
)
.arg(
Arg::with_name("print")
.long("print")
.short("p")
.help("print result to stdout")
.takes_value(false)
.multiple(false),
)
.arg(
Arg::with_name("code_arg")
.multiple(true)
.help("Code arg")
.value_name("CODE_ARG")
.required(true),
)
}
fn fmt_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("fmt")
.about("Format source files")
.long_about(
"Auto-format JavaScript, TypeScript, Markdown, and JSON files.
deno fmt
deno fmt myfile1.ts myfile2.ts
deno fmt --check
Format stdin and write to stdout:
cat file.ts | deno fmt -
Ignore formatting code by preceding it with an ignore comment:
// deno-fmt-ignore
Ignore formatting a file by adding an ignore comment at the top of the file:
// deno-fmt-ignore-file",
)
.arg(
Arg::with_name("check")
.long("check")
.help("Check if the source files are formatted")
.takes_value(false),
)
.arg(
Arg::with_name("ext")
.long("ext")
.help("Set standard input (stdin) content type")
.takes_value(true)
.default_value("ts")
.possible_values(&["ts", "tsx", "js", "jsx", "md", "json", "jsonc"]),
)
.arg(
Arg::with_name("ignore")
.long("ignore")
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Ignore formatting particular source files"),
)
.arg(
Arg::with_name("files")
.takes_value(true)
.multiple(true)
.required(false),
)
.arg(watch_arg())
}
fn info_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("info")
.about("Show info about cache or info related to source file")
.long_about(
"Information about a module or the cache directories.
Get information about a module:
deno info https://deno.land/std/http/file_server.ts
The following information is shown:
local: Local path of the file.
type: JavaScript, TypeScript, or JSON.
compiled: Local path of compiled source code. (TypeScript only.)
map: Local path of source map. (TypeScript only.)
deps: Dependency tree of the source file.
Without any additional arguments, 'deno info' shows:
DENO_DIR: Directory containing Deno-managed files.
Remote modules cache: Subdirectory containing downloaded remote modules.
TypeScript compiler cache: Subdirectory containing TS compiler output.",
)
.arg(Arg::with_name("file").takes_value(true).required(false))
.arg(reload_arg().requires("file"))
.arg(ca_file_arg())
.arg(
location_arg()
.conflicts_with("file")
.help("Show files used for origin bound APIs like the Web Storage API when running a script with '--location=<HREF>'")
)
// TODO(lucacasonato): remove for 2.0
.arg(no_check_arg().hidden(true))
.arg(import_map_arg())
.arg(
Arg::with_name("json")
.long("json")
.help("UNSTABLE: Outputs the information in JSON format")
.takes_value(false),
)
}
fn install_subcommand<'a, 'b>() -> App<'a, 'b> {
runtime_args(SubCommand::with_name("install"), true, true)
.setting(AppSettings::TrailingVarArg)
.arg(
Arg::with_name("cmd")
.required(true)
.multiple(true)
.allow_hyphen_values(true))
.arg(
Arg::with_name("name")
.long("name")
.short("n")
.help("Executable file name")
.takes_value(true)
.required(false))
.arg(
Arg::with_name("root")
.long("root")
.help("Installation root")
.takes_value(true)
.multiple(false))
.arg(
Arg::with_name("force")
.long("force")
.short("f")
.help("Forcefully overwrite existing installation")
.takes_value(false))
.about("Install script as an executable")
.long_about(
"Installs a script as an executable in the installation root's bin directory.
deno install --allow-net --allow-read https://deno.land/std/http/file_server.ts
deno install https://deno.land/std/examples/colors.ts
To change the executable name, use -n/--name:
deno install --allow-net --allow-read -n serve https://deno.land/std/http/file_server.ts
The executable name is inferred by default:
- Attempt to take the file stem of the URL path. The above example would
become 'file_server'.
- If the file stem is something generic like 'main', 'mod', 'index' or 'cli',
and the path has no parent, take the file name of the parent path. Otherwise
settle with the generic name.
- If the resulting name has an '@...' suffix, strip it.
To change the installation root, use --root:
deno install --allow-net --allow-read --root /usr/local https://deno.land/std/http/file_server.ts
The installation root is determined, in order of precedence:
- --root option
- DENO_INSTALL_ROOT environment variable
- $HOME/.deno
These must be added to the path manually if required.")
}
fn lsp_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("lsp")
.about("Start the language server")
.long_about(
"The 'deno lsp' subcommand provides a way for code editors and IDEs to
interact with Deno using the Language Server Protocol. Usually humans do not
use this subcommand directly. For example, 'deno lsp' can provide IDEs with
go-to-definition support and automatic code formatting.
How to connect various editors and IDEs to 'deno lsp':
https://deno.land/manual/getting_started/setup_your_environment#editors-and-ides")
}
fn lint_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("lint")
.about("Lint source files")
.long_about(
"Lint JavaScript/TypeScript source code.
deno lint
deno lint myfile1.ts myfile2.js
Print result as JSON:
deno lint --json
Read from stdin:
cat file.ts | deno lint -
cat file.ts | deno lint --json -
List available rules:
deno lint --rules
Ignore diagnostics on the next line by preceding it with an ignore comment and
rule name:
// deno-lint-ignore no-explicit-any
// deno-lint-ignore require-await no-empty
Names of rules to ignore must be specified after ignore comment.
Ignore linting a file by adding an ignore comment at the top of the file:
// deno-lint-ignore-file
",
)
.arg(
Arg::with_name("rules")
.long("rules")
.help("List available rules"),
)
.arg(
Arg::with_name("ignore")
.long("ignore")
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Ignore linting particular source files"),
)
.arg(
Arg::with_name("json")
.long("json")
.help("Output lint result in JSON format")
.takes_value(false),
)
.arg(
Arg::with_name("files")
.takes_value(true)
.multiple(true)
.required(false),
)
}
fn repl_subcommand<'a, 'b>() -> App<'a, 'b> {
runtime_args(SubCommand::with_name("repl"), false, true)
.about("Read Eval Print Loop")
}
fn run_subcommand<'a, 'b>() -> App<'a, 'b> {
runtime_args(SubCommand::with_name("run"), true, true)
.arg(
watch_arg()
.conflicts_with("inspect")
.conflicts_with("inspect-brk"),
)
.setting(AppSettings::TrailingVarArg)
.arg(script_arg().required(true))
.about("Run a JavaScript or TypeScript program")
.long_about(
"Run a JavaScript or TypeScript program
By default all programs are run in sandbox without access to disk, network or
ability to spawn subprocesses.
deno run https://deno.land/std/examples/welcome.ts
Grant all permissions:
deno run -A https://deno.land/std/http/file_server.ts
Grant permission to read from disk and listen to network:
deno run --allow-read --allow-net https://deno.land/std/http/file_server.ts
Grant permission to read allow-listed files from disk:
deno run --allow-read=/etc https://deno.land/std/http/file_server.ts
Deno allows specifying the filename '-' to read the file from stdin.
curl https://deno.land/std/examples/welcome.ts | deno run -",
)
}
fn test_subcommand<'a, 'b>() -> App<'a, 'b> {
runtime_args(SubCommand::with_name("test"), true, true)
.setting(AppSettings::TrailingVarArg)
.arg(
Arg::with_name("no-run")
.long("no-run")
.help("Cache test modules, but don't run tests")
.takes_value(false),
)
.arg(
Arg::with_name("doc")
.long("doc")
.help("UNSTABLE: type check code blocks")
.takes_value(false),
)
.arg(
Arg::with_name("fail-fast")
.long("fail-fast")
.alias("failfast")
.help("Stop after N errors. Defaults to stopping after first failure.")
.min_values(0)
.required(false)
.takes_value(true)
.require_equals(true)
.value_name("N")
.validator(|val: String| match val.parse::<usize>() {
Ok(val) => {
if val == 0 {
return Err(
"fail-fast should be an number greater than 0".to_string(),
);
}
Ok(())
}
Err(_) => Err("fail-fast should be a number".to_string()),
}),
)
.arg(
Arg::with_name("allow-none")
.long("allow-none")
.help("Don't return error code if no test files are found")
.takes_value(false),
)
.arg(
Arg::with_name("filter")
.set(ArgSettings::AllowLeadingHyphen)
.long("filter")
.takes_value(true)
.help("Run tests with this string or pattern in the test name"),
)
.arg(
Arg::with_name("shuffle")
.long("shuffle")
.value_name("NUMBER")
.help("(UNSTABLE): Shuffle the order in which the tests are run")
.min_values(0)
.max_values(1)
.require_equals(true)
.takes_value(true)
.validator(|val: String| match val.parse::<u64>() {
Ok(_) => Ok(()),
Err(_) => Err("Shuffle seed should be a number".to_string()),
}),
)
.arg(
Arg::with_name("coverage")
.long("coverage")
.require_equals(true)
.takes_value(true)
.conflicts_with("inspect")
.conflicts_with("inspect-brk")
.help("UNSTABLE: Collect coverage profile data"),
)
.arg(
Arg::with_name("jobs")
.short("j")
.long("jobs")
.help("Number of parallel workers, defaults to # of CPUs when no value is provided. Defaults to 1 when the option is not present.")
.min_values(0)
.max_values(1)
.takes_value(true)
.validator(|val: String| match val.parse::<usize>() {
Ok(_) => Ok(()),
Err(_) => Err("jobs should be a number".to_string()),
}),
)
.arg(
Arg::with_name("files")
.help("List of file names to run")
.takes_value(true)
.multiple(true),
)
.arg(
watch_arg()
.conflicts_with("no-run")
.conflicts_with("coverage"),
)
.arg(script_arg().last(true))
.about("Run tests")
.long_about(
"Run tests using Deno's built-in test runner.
Evaluate the given modules, run all tests declared with 'Deno.test()' and
report results to standard output:
deno test src/fetch_test.ts src/signal_test.ts
Directory arguments are expanded to all contained files matching the glob
{*_,*.,}test.{js,mjs,ts,jsx,tsx}:
deno test src/",
)
}
fn types_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("types")
.about("Print runtime TypeScript declarations")
.long_about(
"Print runtime TypeScript declarations.
deno types > lib.deno.d.ts
The declaration file could be saved and used for typing information.",
)
}
fn upgrade_subcommand<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("upgrade")
.about("Upgrade deno executable to given version")
.long_about(
"Upgrade deno executable to the given version.
Defaults to latest.
The version is downloaded from
https://github.com/denoland/deno/releases
and is used to replace the current executable.
If you want to not replace the current Deno executable but instead download an
update to a different location, use the --output flag
deno upgrade --output $HOME/my_deno",
)
.arg(
Arg::with_name("version")
.long("version")
.help("The version to upgrade to")
.takes_value(true),
)
.arg(
Arg::with_name("output")
.long("output")
.help("The path to output the updated version to")
.takes_value(true),
)
.arg(
Arg::with_name("dry-run")
.long("dry-run")
.help("Perform all checks without replacing old exe"),
)
.arg(
Arg::with_name("force")
.long("force")
.short("f")
.help("Replace current exe even if not out-of-date"),
)
.arg(
Arg::with_name("canary")
.long("canary")
.help("Upgrade to canary builds"),
)
.arg(ca_file_arg())
}
fn compile_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
app
.arg(import_map_arg())
.arg(no_remote_arg())
.arg(config_arg())
.arg(no_check_arg())
.arg(reload_arg())
.arg(lock_arg())
.arg(lock_write_arg())
.arg(ca_file_arg())
}
fn permission_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
app
.arg(
Arg::with_name("allow-read")
.long("allow-read")
.min_values(0)
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Allow file system read access"),
)
.arg(
Arg::with_name("allow-write")
.long("allow-write")
.min_values(0)
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Allow file system write access"),
)
.arg(
Arg::with_name("allow-net")
.long("allow-net")
.min_values(0)
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Allow network access")
.validator(crate::flags_allow_net::validator),
)
.arg(
Arg::with_name("allow-env")
.long("allow-env")
.min_values(0)
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Allow environment access")
.validator(|keys| {
for key in keys.split(',') {
if key.is_empty() || key.contains(&['=', '\0'] as &[char]) {
return Err(format!("invalid key \"{}\"", key));
}
}
Ok(())
}),
)
.arg(
Arg::with_name("allow-run")
.long("allow-run")
.min_values(0)
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Allow running subprocesses"),
)
.arg(
Arg::with_name("allow-plugin")
.long("allow-plugin")
.help("Allow loading plugins"),
)
.arg(
Arg::with_name("allow-hrtime")
.long("allow-hrtime")
.help("Allow high resolution time measurement"),
)
.arg(
Arg::with_name("allow-all")
.short("A")
.long("allow-all")
.help("Allow all permissions"),
)
.arg(
Arg::with_name("prompt")
.long("prompt")
.help("Fallback to prompt if required permission wasn't passed"),
)
}
fn runtime_args<'a, 'b>(
app: App<'a, 'b>,
include_perms: bool,
include_inspector: bool,
) -> App<'a, 'b> {
let app = compile_args(app);
let app = if include_perms {
permission_args(app)
} else {
app
};
let app = if include_inspector {
inspect_args(app)
} else {
app
};
app
.arg(cached_only_arg())
.arg(location_arg())
.arg(v8_flags_arg())
.arg(seed_arg())
}
fn inspect_args<'a, 'b>(app: App<'a, 'b>) -> App<'a, 'b> {
app
.arg(
Arg::with_name("inspect")
.long("inspect")
.value_name("HOST:PORT")
.help("Activate inspector on host:port (default: 127.0.0.1:9229)")
.min_values(0)
.max_values(1)
.require_equals(true)
.takes_value(true)
.validator(inspect_arg_validate),
)
.arg(
Arg::with_name("inspect-brk")
.long("inspect-brk")
.value_name("HOST:PORT")
.help(
"Activate inspector on host:port and break at start of user script",
)
.min_values(0)
.max_values(1)
.require_equals(true)
.takes_value(true)
.validator(inspect_arg_validate),
)
}
fn import_map_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("import-map")
.long("import-map")
.alias("importmap")
.value_name("FILE")
.help("Load import map file")
.long_help(
"Load import map file from local file or remote URL.
Docs: https://deno.land/manual/linking_to_external_code/import_maps
Specification: https://wicg.github.io/import-maps/
Examples: https://github.com/WICG/import-maps#the-import-map",
)
.takes_value(true)
}
fn reload_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("reload")
.short("r")
.min_values(0)
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.long("reload")
.help("Reload source code cache (recompile TypeScript)")
.value_name("CACHE_BLOCKLIST")
.long_help(
"Reload source code cache (recompile TypeScript)
--reload
Reload everything
--reload=https://deno.land/std
Reload only standard modules
--reload=https://deno.land/std/fs/utils.ts,https://deno.land/std/fmt/colors.ts
Reloads specific modules",
)
}
fn ca_file_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("cert")
.long("cert")
.value_name("FILE")
.help("Load certificate authority from PEM encoded file")
.takes_value(true)
}
fn cached_only_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("cached-only")
.long("cached-only")
.help("Require that remote dependencies are already cached")
}
fn location_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("location")
.long("location")
.takes_value(true)
.value_name("HREF")
.validator(|href| {
let url = Url::parse(&href);
if url.is_err() {
return Err("Failed to parse URL".to_string());
}
let mut url = url.unwrap();
if !["http", "https"].contains(&url.scheme()) {
return Err("Expected protocol \"http\" or \"https\"".to_string());
}
url.set_username("").unwrap();
url.set_password(None).unwrap();
Ok(())
})
.help("Value of 'globalThis.location' used by some web APIs")
}
fn v8_flags_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("v8-flags")
.long("v8-flags")
.takes_value(true)
.use_delimiter(true)
.require_equals(true)
.help("Set V8 command line options (for help: --v8-flags=--help)")
}
fn seed_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("seed")
.long("seed")
.value_name("NUMBER")
.help("Seed Math.random()")
.takes_value(true)
.validator(|val: String| match val.parse::<u64>() {
Ok(_) => Ok(()),
Err(_) => Err("Seed should be a number".to_string()),
})
}
fn watch_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("watch")
.long("watch")
.help("UNSTABLE: Watch for file changes and restart process automatically")
.long_help(
"UNSTABLE: Watch for file changes and restart process automatically.
Only local files from entry point module graph are watched.",
)
}
fn no_check_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("no-check")
.long("no-check")
.help("Skip type checking modules")
}
fn script_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("script_arg")
.multiple(true)
// NOTE: these defaults are provided
// so `deno run --v8-flags=--help` works
// without specifying file to run.
.default_value_ifs(&[
("v8-flags", Some("--help"), "_"),
("v8-flags", Some("-help"), "_"),
])
.help("Script arg")
.value_name("SCRIPT_ARG")
}
fn lock_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("lock")
.long("lock")
.value_name("FILE")
.help("Check the specified lock file")
.takes_value(true)
}
fn lock_write_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("lock-write")
.long("lock-write")
.requires("lock")
.help("Write lock file (use with --lock)")
}
fn config_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("config")
.short("c")
.long("config")
.value_name("FILE")
.help("Load tsconfig.json configuration file")
.takes_value(true)
}
fn no_remote_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("no-remote")
.long("no-remote")
.help("Do not resolve remote modules")
}
fn bundle_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
compile_args_parse(flags, matches);
let source_file = matches.value_of("source_file").unwrap().to_string();
let out_file = if let Some(out_file) = matches.value_of("out_file") {
flags.allow_write = Some(vec![]);
Some(PathBuf::from(out_file))
} else {
None
};
flags.watch = matches.is_present("watch");
flags.subcommand = DenoSubcommand::Bundle {
source_file,
out_file,
};
}
fn cache_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
compile_args_parse(flags, matches);
let files = matches
.values_of("file")
.unwrap()
.map(String::from)
.collect();
flags.subcommand = DenoSubcommand::Cache { files };
}
fn compile_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
runtime_args_parse(flags, matches, true, false);
let mut script: Vec<String> = matches
.values_of("script_arg")
.unwrap()
.map(String::from)
.collect();
assert!(!script.is_empty());
let args = script.split_off(1);
let source_file = script[0].to_string();
let output = matches.value_of("output").map(PathBuf::from);
let target = matches.value_of("target").map(String::from);
flags.subcommand = DenoSubcommand::Compile {
source_file,
output,
args,
target,
};
}
fn completions_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
let shell: &str = matches.value_of("shell").unwrap();
let mut buf: Vec<u8> = vec![];
clap_root(&*crate::version::deno()).gen_completions_to(
"deno",
clap::Shell::from_str(shell).unwrap(),
&mut buf,
);
flags.subcommand = DenoSubcommand::Completions {
buf: buf.into_boxed_slice(),
};
}
fn coverage_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
let files = match matches.values_of("files") {
Some(f) => f.map(PathBuf::from).collect(),
None => vec![],
};
let ignore = match matches.values_of("ignore") {
Some(f) => f.map(PathBuf::from).collect(),
None => vec![],
};
let include = match matches.values_of("include") {
Some(f) => f.map(String::from).collect(),
None => vec![],
};
let exclude = match matches.values_of("exclude") {
Some(f) => f.map(String::from).collect(),
None => vec![],
};
let lcov = matches.is_present("lcov");
flags.subcommand = DenoSubcommand::Coverage {
files,
ignore,
include,
exclude,
lcov,
};
}
fn doc_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
import_map_arg_parse(flags, matches);
reload_arg_parse(flags, matches);
let source_file = matches.value_of("source_file").map(String::from);
let private = matches.is_present("private");
let json = matches.is_present("json");
let filter = matches.value_of("filter").map(String::from);
flags.subcommand = DenoSubcommand::Doc {
source_file,
json,
filter,
private,
};
}
fn eval_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
runtime_args_parse(flags, matches, false, true);
flags.allow_net = Some(vec![]);
flags.allow_env = Some(vec![]);
flags.allow_run = Some(vec![]);
flags.allow_read = Some(vec![]);
flags.allow_write = Some(vec![]);
flags.allow_plugin = true;
flags.allow_hrtime = true;
// TODO(@satyarohith): remove this flag in 2.0.
let as_typescript = matches.is_present("ts");
let ext = if as_typescript {
"ts".to_string()
} else {
matches.value_of("ext").unwrap().to_string()
};
let print = matches.is_present("print");
let mut code: Vec<String> = matches
.values_of("code_arg")
.unwrap()
.map(String::from)
.collect();
assert!(!code.is_empty());
let code_args = code.split_off(1);
let code = code[0].to_string();
for v in code_args {
flags.argv.push(v);
}
flags.subcommand = DenoSubcommand::Eval { print, code, ext };
}
fn fmt_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
flags.watch = matches.is_present("watch");
let files = match matches.values_of("files") {
Some(f) => f.map(PathBuf::from).collect(),
None => vec![],
};
let ignore = match matches.values_of("ignore") {
Some(f) => f.map(PathBuf::from).collect(),
None => vec![],
};
let ext = matches.value_of("ext").unwrap().to_string();
flags.subcommand = DenoSubcommand::Fmt {
check: matches.is_present("check"),
ext,
files,
ignore,
}
}
fn info_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
reload_arg_parse(flags, matches);
import_map_arg_parse(flags, matches);
location_arg_parse(flags, matches);
ca_file_arg_parse(flags, matches);
let json = matches.is_present("json");
flags.subcommand = DenoSubcommand::Info {
file: matches.value_of("file").map(|f| f.to_string()),
json,
};
}
fn install_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
runtime_args_parse(flags, matches, true, true);
let root = if matches.is_present("root") {
let install_root = matches.value_of("root").unwrap();
Some(PathBuf::from(install_root))
} else {
None
};
let force = matches.is_present("force");
let name = matches.value_of("name").map(|s| s.to_string());
let cmd_values = matches.values_of("cmd").unwrap();
let mut cmd = vec![];
for value in cmd_values {
cmd.push(value.to_string());
}
let module_url = cmd[0].to_string();
let args = cmd[1..].to_vec();
flags.subcommand = DenoSubcommand::Install {
name,
module_url,
args,
root,
force,
};
}
fn lsp_parse(flags: &mut Flags, _matches: &clap::ArgMatches) {
flags.subcommand = DenoSubcommand::Lsp;
}
fn lint_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
let files = match matches.values_of("files") {
Some(f) => f.map(PathBuf::from).collect(),
None => vec![],
};
let ignore = match matches.values_of("ignore") {
Some(f) => f.map(PathBuf::from).collect(),
None => vec![],
};
let rules = matches.is_present("rules");
let json = matches.is_present("json");
flags.subcommand = DenoSubcommand::Lint {
files,
rules,
ignore,
json,
};
}
fn repl_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
runtime_args_parse(flags, matches, false, true);
flags.repl = true;
flags.subcommand = DenoSubcommand::Repl;
flags.allow_net = Some(vec![]);
flags.allow_env = Some(vec![]);
flags.allow_run = Some(vec![]);
flags.allow_read = Some(vec![]);
flags.allow_write = Some(vec![]);
flags.allow_plugin = true;
flags.allow_hrtime = true;
}
fn run_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
runtime_args_parse(flags, matches, true, true);
let mut script: Vec<String> = matches
.values_of("script_arg")
.unwrap()
.map(String::from)
.collect();
assert!(!script.is_empty());
let script_args = script.split_off(1);
let script = script[0].to_string();
for v in script_args {
flags.argv.push(v);
}
flags.watch = matches.is_present("watch");
flags.subcommand = DenoSubcommand::Run { script };
}
fn test_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
runtime_args_parse(flags, matches, true, true);
let no_run = matches.is_present("no-run");
let doc = matches.is_present("doc");
let allow_none = matches.is_present("allow-none");
let quiet = matches.is_present("quiet");
let filter = matches.value_of("filter").map(String::from);
let fail_fast = if matches.is_present("fail-fast") {
if let Some(value) = matches.value_of("fail-fast") {
Some(value.parse().unwrap())
} else {
Some(1)
}
} else {
None
};
let shuffle = if matches.is_present("shuffle") {
let value = if let Some(value) = matches.value_of("shuffle") {
value.parse::<u64>().unwrap()
} else {
rand::random::<u64>()
};
Some(value)
} else {
None
};
if matches.is_present("script_arg") {
let script_arg: Vec<String> = matches
.values_of("script_arg")
.unwrap()
.map(String::from)
.collect();
for v in script_arg {
flags.argv.push(v);
}
}
let concurrent_jobs = if matches.is_present("jobs") {
if let Some(value) = matches.value_of("jobs") {
value.parse().unwrap()
} else {
// TODO(caspervonb) drop the dependency on num_cpus when https://doc.rust-lang.org/std/thread/fn.available_concurrency.html becomes stable.
num_cpus::get()
}
} else {
1
};
let include = if matches.is_present("files") {
let files: Vec<String> = matches
.values_of("files")
.unwrap()
.map(String::from)
.collect();
Some(files)
} else {
None
};
flags.coverage_dir = matches.value_of("coverage").map(String::from);
flags.watch = matches.is_present("watch");
flags.subcommand = DenoSubcommand::Test {
no_run,
doc,
fail_fast,
quiet,
include,
filter,
shuffle,
allow_none,
concurrent_jobs,
};
}
fn types_parse(flags: &mut Flags, _matches: &clap::ArgMatches) {
flags.subcommand = DenoSubcommand::Types;
}
fn upgrade_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
ca_file_arg_parse(flags, matches);
let dry_run = matches.is_present("dry-run");
let force = matches.is_present("force");
let canary = matches.is_present("canary");
let version = matches.value_of("version").map(|s| s.to_string());
let output = if matches.is_present("output") {
let install_root = matches.value_of("output").unwrap();
Some(PathBuf::from(install_root))
} else {
None
};
let ca_file = matches.value_of("cert").map(|s| s.to_string());
flags.subcommand = DenoSubcommand::Upgrade {
dry_run,
force,
canary,
version,
output,
ca_file,
};
}
fn compile_args_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
import_map_arg_parse(flags, matches);
no_remote_arg_parse(flags, matches);
config_arg_parse(flags, matches);
no_check_arg_parse(flags, matches);
reload_arg_parse(flags, matches);
lock_args_parse(flags, matches);
ca_file_arg_parse(flags, matches);
}
fn permission_args_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
if let Some(read_wl) = matches.values_of("allow-read") {
let read_allowlist: Vec<PathBuf> = read_wl.map(PathBuf::from).collect();
flags.allow_read = Some(read_allowlist);
}
if let Some(write_wl) = matches.values_of("allow-write") {
let write_allowlist: Vec<PathBuf> = write_wl.map(PathBuf::from).collect();
flags.allow_write = Some(write_allowlist);
}
if let Some(net_wl) = matches.values_of("allow-net") {
let net_allowlist: Vec<String> =
crate::flags_allow_net::parse(net_wl.map(ToString::to_string).collect())
.unwrap();
flags.allow_net = Some(net_allowlist);
debug!("net allowlist: {:#?}", &flags.allow_net);
}
if let Some(env_wl) = matches.values_of("allow-env") {
let env_allowlist: Vec<String> = env_wl
.map(|env: &str| {
if cfg!(windows) {
env.to_uppercase()
} else {
env.to_string()
}
})
.collect();
flags.allow_env = Some(env_allowlist);
debug!("env allowlist: {:#?}", &flags.allow_env);
}
if let Some(run_wl) = matches.values_of("allow-run") {
let run_allowlist: Vec<String> = run_wl.map(ToString::to_string).collect();
flags.allow_run = Some(run_allowlist);
debug!("run allowlist: {:#?}", &flags.allow_run);
}
if matches.is_present("allow-plugin") {
flags.allow_plugin = true;
}
if matches.is_present("allow-hrtime") {
flags.allow_hrtime = true;
}
if matches.is_present("allow-all") {
flags.allow_read = Some(vec![]);
flags.allow_env = Some(vec![]);
flags.allow_net = Some(vec![]);
flags.allow_run = Some(vec![]);
flags.allow_write = Some(vec![]);
flags.allow_plugin = true;
flags.allow_hrtime = true;
}
if matches.is_present("prompt") {
flags.prompt = true;
}
}
fn runtime_args_parse(
flags: &mut Flags,
matches: &clap::ArgMatches,
include_perms: bool,
include_inspector: bool,
) {
compile_args_parse(flags, matches);
cached_only_arg_parse(flags, matches);
if include_perms {
permission_args_parse(flags, matches);
}
if include_inspector {
inspect_arg_parse(flags, matches);
}
location_arg_parse(flags, matches);
v8_flags_arg_parse(flags, matches);
seed_arg_parse(flags, matches);
inspect_arg_parse(flags, matches);
}
fn inspect_arg_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
let default = || "127.0.0.1:9229".parse::<SocketAddr>().unwrap();
flags.inspect = if matches.is_present("inspect") {
if let Some(host) = matches.value_of("inspect") {
Some(host.parse().unwrap())
} else {
Some(default())
}
} else {
None
};
flags.inspect_brk = if matches.is_present("inspect-brk") {
if let Some(host) = matches.value_of("inspect-brk") {
Some(host.parse().unwrap())
} else {
Some(default())
}
} else {
None
};
}
fn | (flags: &mut Flags, matches: &clap::ArgMatches) {
flags.import_map_path = matches.value_of("import-map").map(ToOwned::to_owned);
}
fn reload_arg_parse(flags: &mut Flags, matches: &ArgMatches) {
if let Some(cache_bl) = matches.values_of("reload") {
let raw_cache_blocklist: Vec<String> =
cache_bl.map(ToString::to_string).collect();
if raw_cache_blocklist.is_empty() {
flags.reload = true;
} else {
flags.cache_blocklist = resolve_urls(raw_cache_blocklist);
debug!("cache blocklist: {:#?}", &flags.cache_blocklist);
flags.reload = false;
}
}
}
fn ca_file_arg_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
flags.ca_file = matches.value_of("cert").map(ToOwned::to_owned);
}
fn cached_only_arg_parse(flags: &mut Flags, matches: &ArgMatches) {
if matches.is_present("cached-only") {
flags.cached_only = true;
}
}
fn location_arg_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
flags.location = matches
.value_of("location")
.map(|href| Url::parse(href).unwrap());
}
fn v8_flags_arg_parse(flags: &mut Flags, matches: &ArgMatches) {
if let Some(v8_flags) = matches.values_of("v8-flags") {
flags.v8_flags = v8_flags.map(String::from).collect();
}
}
fn seed_arg_parse(flags: &mut Flags, matches: &ArgMatches) {
if matches.is_present("seed") {
let seed_string = matches.value_of("seed").unwrap();
let seed = seed_string.parse::<u64>().unwrap();
flags.seed = Some(seed);
flags.v8_flags.push(format!("--random-seed={}", seed));
}
}
fn no_check_arg_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
if matches.is_present("no-check") {
flags.no_check = true;
}
}
fn lock_args_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
if matches.is_present("lock") {
let lockfile = matches.value_of("lock").unwrap();
flags.lock = Some(PathBuf::from(lockfile));
}
if matches.is_present("lock-write") {
flags.lock_write = true;
}
}
fn config_arg_parse(flags: &mut Flags, matches: &ArgMatches) {
flags.config_path = matches.value_of("config").map(ToOwned::to_owned);
}
fn no_remote_arg_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
if matches.is_present("no-remote") {
flags.no_remote = true;
}
}
fn inspect_arg_validate(val: String) -> Result<(), String> {
match val.parse::<SocketAddr>() {
Ok(_) => Ok(()),
Err(e) => Err(e.to_string()),
}
}
// TODO(ry) move this to utility module and add test.
/// Strips fragment part of URL. Panics on bad URL.
pub fn resolve_urls(urls: Vec<String>) -> Vec<String> {
let mut out: Vec<String> = vec![];
for urlstr in urls.iter() {
if let Ok(mut url) = Url::from_str(urlstr) {
url.set_fragment(None);
let mut full_url = String::from(url.as_str());
if full_url.len() > 1 && full_url.ends_with('/') {
full_url.pop();
}
out.push(full_url);
} else {
panic!("Bad Url: {}", urlstr);
}
}
out
}
#[cfg(test)]
mod tests {
use super::*;
/// Creates vector of strings, Vec<String>
macro_rules! svec {
($($x:expr),*) => (vec![$($x.to_string()),*]);
}
#[test]
fn global_flags() {
#[rustfmt::skip]
let r = flags_from_vec(svec!["deno", "--unstable", "--log-level", "debug", "--quiet", "run", "script.ts"]);
let flags = r.unwrap();
assert_eq!(
flags,
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
unstable: true,
log_level: Some(Level::Error),
..Flags::default()
}
);
#[rustfmt::skip]
let r2 = flags_from_vec(svec!["deno", "run", "--unstable", "--log-level", "debug", "--quiet", "script.ts"]);
let flags2 = r2.unwrap();
assert_eq!(flags2, flags);
}
#[test]
fn upgrade() {
let r = flags_from_vec(svec!["deno", "upgrade", "--dry-run", "--force"]);
let flags = r.unwrap();
assert_eq!(
flags,
Flags {
subcommand: DenoSubcommand::Upgrade {
force: true,
dry_run: true,
canary: false,
version: None,
output: None,
ca_file: None,
},
..Flags::default()
}
);
}
#[test]
fn version() {
let r = flags_from_vec(svec!["deno", "--version"]);
assert_eq!(r.unwrap_err().kind, clap::ErrorKind::VersionDisplayed);
let r = flags_from_vec(svec!["deno", "-V"]);
assert_eq!(r.unwrap_err().kind, clap::ErrorKind::VersionDisplayed);
}
#[test]
fn run_reload() {
let r = flags_from_vec(svec!["deno", "run", "-r", "script.ts"]);
let flags = r.unwrap();
assert_eq!(
flags,
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
reload: true,
..Flags::default()
}
);
}
#[test]
fn run_watch() {
let r = flags_from_vec(svec!["deno", "run", "--watch", "script.ts"]);
let flags = r.unwrap();
assert_eq!(
flags,
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
watch: true,
..Flags::default()
}
);
}
#[test]
fn run_reload_allow_write() {
let r =
flags_from_vec(svec!["deno", "run", "-r", "--allow-write", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
reload: true,
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
allow_write: Some(vec![]),
..Flags::default()
}
);
}
#[test]
fn run_v8_flags() {
let r = flags_from_vec(svec!["deno", "run", "--v8-flags=--help"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "_".to_string(),
},
v8_flags: svec!["--help"],
..Flags::default()
}
);
let r = flags_from_vec(svec![
"deno",
"run",
"--v8-flags=--expose-gc,--gc-stats=1",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
v8_flags: svec!["--expose-gc", "--gc-stats=1"],
..Flags::default()
}
);
}
#[test]
fn script_args() {
let r = flags_from_vec(svec![
"deno",
"run",
"--allow-net",
"gist.ts",
"--title",
"X"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "gist.ts".to_string(),
},
argv: svec!["--title", "X"],
allow_net: Some(vec![]),
..Flags::default()
}
);
}
#[test]
fn allow_all() {
let r = flags_from_vec(svec!["deno", "run", "--allow-all", "gist.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "gist.ts".to_string(),
},
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn allow_read() {
let r = flags_from_vec(svec!["deno", "run", "--allow-read", "gist.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "gist.ts".to_string(),
},
allow_read: Some(vec![]),
..Flags::default()
}
);
}
#[test]
fn allow_hrtime() {
let r = flags_from_vec(svec!["deno", "run", "--allow-hrtime", "gist.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "gist.ts".to_string(),
},
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn double_hyphen() {
// notice that flags passed after double dash will not
// be parsed to Flags but instead forwarded to
// script args as Deno.args
let r = flags_from_vec(svec![
"deno",
"run",
"--allow-write",
"script.ts",
"--",
"-D",
"--allow-net"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
argv: svec!["--", "-D", "--allow-net"],
allow_write: Some(vec![]),
..Flags::default()
}
);
}
#[test]
fn fmt() {
let r = flags_from_vec(svec!["deno", "fmt", "script_1.ts", "script_2.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Fmt {
ignore: vec![],
check: false,
files: vec![
PathBuf::from("script_1.ts"),
PathBuf::from("script_2.ts")
],
ext: "ts".to_string()
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "fmt", "--check"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Fmt {
ignore: vec![],
check: true,
files: vec![],
ext: "ts".to_string(),
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "fmt"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Fmt {
ignore: vec![],
check: false,
files: vec![],
ext: "ts".to_string(),
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "fmt", "--watch"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Fmt {
ignore: vec![],
check: false,
files: vec![],
ext: "ts".to_string(),
},
watch: true,
..Flags::default()
}
);
let r = flags_from_vec(svec![
"deno",
"fmt",
"--check",
"--watch",
"foo.ts",
"--ignore=bar.js"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Fmt {
ignore: vec![PathBuf::from("bar.js")],
check: true,
files: vec![PathBuf::from("foo.ts")],
ext: "ts".to_string(),
},
watch: true,
..Flags::default()
}
);
}
#[test]
fn lint() {
let r = flags_from_vec(svec!["deno", "lint", "script_1.ts", "script_2.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Lint {
files: vec![
PathBuf::from("script_1.ts"),
PathBuf::from("script_2.ts")
],
rules: false,
json: false,
ignore: vec![],
},
..Flags::default()
}
);
let r =
flags_from_vec(svec!["deno", "lint", "--ignore=script_1.ts,script_2.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Lint {
files: vec![],
rules: false,
json: false,
ignore: vec![
PathBuf::from("script_1.ts"),
PathBuf::from("script_2.ts")
],
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "lint", "--rules"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Lint {
files: vec![],
rules: true,
json: false,
ignore: vec![],
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "lint", "--json", "script_1.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Lint {
files: vec![PathBuf::from("script_1.ts")],
rules: false,
json: true,
ignore: vec![],
},
..Flags::default()
}
);
}
#[test]
fn types() {
let r = flags_from_vec(svec!["deno", "types"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Types,
..Flags::default()
}
);
}
#[test]
fn cache() {
let r = flags_from_vec(svec!["deno", "cache", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Cache {
files: svec!["script.ts"],
},
..Flags::default()
}
);
}
#[test]
fn info() {
let r = flags_from_vec(svec!["deno", "info", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Info {
json: false,
file: Some("script.ts".to_string()),
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "info", "--reload", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Info {
json: false,
file: Some("script.ts".to_string()),
},
reload: true,
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "info", "--json", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Info {
json: true,
file: Some("script.ts".to_string()),
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "info"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Info {
json: false,
file: None
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "info", "--json"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Info {
json: true,
file: None
},
..Flags::default()
}
);
}
#[test]
fn tsconfig() {
let r =
flags_from_vec(svec!["deno", "run", "-c", "tsconfig.json", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
config_path: Some("tsconfig.json".to_owned()),
..Flags::default()
}
);
}
#[test]
fn eval() {
let r = flags_from_vec(svec!["deno", "eval", "'console.log(\"hello\")'"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Eval {
print: false,
code: "'console.log(\"hello\")'".to_string(),
ext: "js".to_string(),
},
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn eval_p() {
let r = flags_from_vec(svec!["deno", "eval", "-p", "1+2"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Eval {
print: true,
code: "1+2".to_string(),
ext: "js".to_string(),
},
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn eval_typescript() {
let r =
flags_from_vec(svec!["deno", "eval", "-T", "'console.log(\"hello\")'"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Eval {
print: false,
code: "'console.log(\"hello\")'".to_string(),
ext: "ts".to_string(),
},
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn eval_with_flags() {
#[rustfmt::skip]
let r = flags_from_vec(svec!["deno", "eval", "--import-map", "import_map.json", "--no-remote", "--config", "tsconfig.json", "--no-check", "--reload", "--lock", "lock.json", "--lock-write", "--cert", "example.crt", "--cached-only", "--location", "https:foo", "--v8-flags=--help", "--seed", "1", "--inspect=127.0.0.1:9229", "42"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Eval {
print: false,
code: "42".to_string(),
ext: "js".to_string(),
},
import_map_path: Some("import_map.json".to_string()),
no_remote: true,
config_path: Some("tsconfig.json".to_string()),
no_check: true,
reload: true,
lock: Some(PathBuf::from("lock.json")),
lock_write: true,
ca_file: Some("example.crt".to_string()),
cached_only: true,
location: Some(Url::parse("https://foo/").unwrap()),
v8_flags: svec!["--help", "--random-seed=1"],
seed: Some(1),
inspect: Some("127.0.0.1:9229".parse().unwrap()),
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn eval_args() {
let r = flags_from_vec(svec![
"deno",
"eval",
"console.log(Deno.args)",
"arg1",
"arg2"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Eval {
print: false,
code: "console.log(Deno.args)".to_string(),
ext: "js".to_string(),
},
argv: svec!["arg1", "arg2"],
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn repl() {
let r = flags_from_vec(svec!["deno"]);
assert_eq!(
r.unwrap(),
Flags {
repl: true,
subcommand: DenoSubcommand::Repl,
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn repl_with_flags() {
#[rustfmt::skip]
let r = flags_from_vec(svec!["deno", "repl", "--import-map", "import_map.json", "--no-remote", "--config", "tsconfig.json", "--no-check", "--reload", "--lock", "lock.json", "--lock-write", "--cert", "example.crt", "--cached-only", "--location", "https:foo", "--v8-flags=--help", "--seed", "1", "--inspect=127.0.0.1:9229"]);
assert_eq!(
r.unwrap(),
Flags {
repl: true,
subcommand: DenoSubcommand::Repl,
import_map_path: Some("import_map.json".to_string()),
no_remote: true,
config_path: Some("tsconfig.json".to_string()),
no_check: true,
reload: true,
lock: Some(PathBuf::from("lock.json")),
lock_write: true,
ca_file: Some("example.crt".to_string()),
cached_only: true,
location: Some(Url::parse("https://foo/").unwrap()),
v8_flags: svec!["--help", "--random-seed=1"],
seed: Some(1),
inspect: Some("127.0.0.1:9229".parse().unwrap()),
allow_net: Some(vec![]),
allow_env: Some(vec![]),
allow_run: Some(vec![]),
allow_read: Some(vec![]),
allow_write: Some(vec![]),
allow_plugin: true,
allow_hrtime: true,
..Flags::default()
}
);
}
#[test]
fn allow_read_allowlist() {
use tempfile::TempDir;
let temp_dir = TempDir::new().expect("tempdir fail").path().to_path_buf();
let r = flags_from_vec(svec![
"deno",
"run",
format!("--allow-read=.,{}", temp_dir.to_str().unwrap()),
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
allow_read: Some(vec![PathBuf::from("."), temp_dir]),
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
..Flags::default()
}
);
}
#[test]
fn allow_write_allowlist() {
use tempfile::TempDir;
let temp_dir = TempDir::new().expect("tempdir fail").path().to_path_buf();
let r = flags_from_vec(svec![
"deno",
"run",
format!("--allow-write=.,{}", temp_dir.to_str().unwrap()),
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
allow_write: Some(vec![PathBuf::from("."), temp_dir]),
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
..Flags::default()
}
);
}
#[test]
fn allow_net_allowlist() {
let r = flags_from_vec(svec![
"deno",
"run",
"--allow-net=127.0.0.1",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
allow_net: Some(svec!["127.0.0.1"]),
..Flags::default()
}
);
}
#[test]
fn allow_env_allowlist() {
let r =
flags_from_vec(svec!["deno", "run", "--allow-env=HOME", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
allow_env: Some(svec!["HOME"]),
..Flags::default()
}
);
}
#[test]
fn allow_env_allowlist_multiple() {
let r = flags_from_vec(svec![
"deno",
"run",
"--allow-env=HOME,PATH",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
allow_env: Some(svec!["HOME", "PATH"]),
..Flags::default()
}
);
}
#[test]
fn allow_env_allowlist_validator() {
let r =
flags_from_vec(svec!["deno", "run", "--allow-env=HOME", "script.ts"]);
assert!(r.is_ok());
let r =
flags_from_vec(svec!["deno", "run", "--allow-env=H=ME", "script.ts"]);
assert!(r.is_err());
let r =
flags_from_vec(svec!["deno", "run", "--allow-env=H\0ME", "script.ts"]);
assert!(r.is_err());
}
#[test]
fn bundle() {
let r = flags_from_vec(svec!["deno", "bundle", "source.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Bundle {
source_file: "source.ts".to_string(),
out_file: None,
},
..Flags::default()
}
);
}
#[test]
fn bundle_with_config() {
let r = flags_from_vec(svec![
"deno",
"bundle",
"--no-remote",
"--config",
"tsconfig.json",
"source.ts",
"bundle.js"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Bundle {
source_file: "source.ts".to_string(),
out_file: Some(PathBuf::from("bundle.js")),
},
allow_write: Some(vec![]),
no_remote: true,
config_path: Some("tsconfig.json".to_owned()),
..Flags::default()
}
);
}
#[test]
fn bundle_with_output() {
let r = flags_from_vec(svec!["deno", "bundle", "source.ts", "bundle.js"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Bundle {
source_file: "source.ts".to_string(),
out_file: Some(PathBuf::from("bundle.js")),
},
allow_write: Some(vec![]),
..Flags::default()
}
);
}
#[test]
fn bundle_with_lock() {
let r = flags_from_vec(svec![
"deno",
"bundle",
"--lock-write",
"--lock=lock.json",
"source.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Bundle {
source_file: "source.ts".to_string(),
out_file: None,
},
lock_write: true,
lock: Some(PathBuf::from("lock.json")),
..Flags::default()
}
);
}
#[test]
fn bundle_with_reload() {
let r = flags_from_vec(svec!["deno", "bundle", "--reload", "source.ts"]);
assert_eq!(
r.unwrap(),
Flags {
reload: true,
subcommand: DenoSubcommand::Bundle {
source_file: "source.ts".to_string(),
out_file: None,
},
..Flags::default()
}
);
}
#[test]
fn bundle_nocheck() {
let r = flags_from_vec(svec!["deno", "bundle", "--no-check", "script.ts"])
.unwrap();
assert_eq!(
r,
Flags {
subcommand: DenoSubcommand::Bundle {
source_file: "script.ts".to_string(),
out_file: None,
},
no_check: true,
..Flags::default()
}
);
}
#[test]
fn bundle_watch() {
let r = flags_from_vec(svec!["deno", "bundle", "--watch", "source.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Bundle {
source_file: "source.ts".to_string(),
out_file: None,
},
watch: true,
..Flags::default()
}
)
}
#[test]
fn run_import_map() {
let r = flags_from_vec(svec![
"deno",
"run",
"--import-map=import_map.json",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
import_map_path: Some("import_map.json".to_owned()),
..Flags::default()
}
);
}
#[test]
fn info_import_map() {
let r = flags_from_vec(svec![
"deno",
"info",
"--import-map=import_map.json",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Info {
file: Some("script.ts".to_string()),
json: false,
},
import_map_path: Some("import_map.json".to_owned()),
..Flags::default()
}
);
}
#[test]
fn cache_import_map() {
let r = flags_from_vec(svec![
"deno",
"cache",
"--import-map=import_map.json",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Cache {
files: svec!["script.ts"],
},
import_map_path: Some("import_map.json".to_owned()),
..Flags::default()
}
);
}
#[test]
fn doc_import_map() {
let r = flags_from_vec(svec![
"deno",
"doc",
"--import-map=import_map.json",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Doc {
source_file: Some("script.ts".to_owned()),
private: false,
json: false,
filter: None,
},
import_map_path: Some("import_map.json".to_owned()),
..Flags::default()
}
);
}
#[test]
fn cache_multiple() {
let r =
flags_from_vec(svec!["deno", "cache", "script.ts", "script_two.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Cache {
files: svec!["script.ts", "script_two.ts"],
},
..Flags::default()
}
);
}
#[test]
fn run_seed() {
let r = flags_from_vec(svec!["deno", "run", "--seed", "250", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
seed: Some(250_u64),
v8_flags: svec!["--random-seed=250"],
..Flags::default()
}
);
}
#[test]
fn run_seed_with_v8_flags() {
let r = flags_from_vec(svec![
"deno",
"run",
"--seed",
"250",
"--v8-flags=--expose-gc",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
seed: Some(250_u64),
v8_flags: svec!["--expose-gc", "--random-seed=250"],
..Flags::default()
}
);
}
#[test]
fn install() {
let r = flags_from_vec(svec![
"deno",
"install",
"https://deno.land/std/examples/colors.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Install {
name: None,
module_url: "https://deno.land/std/examples/colors.ts".to_string(),
args: vec![],
root: None,
force: false,
},
..Flags::default()
}
);
}
#[test]
fn install_with_flags() {
#[rustfmt::skip]
let r = flags_from_vec(svec!["deno", "install", "--import-map", "import_map.json", "--no-remote", "--config", "tsconfig.json", "--no-check", "--reload", "--lock", "lock.json", "--lock-write", "--cert", "example.crt", "--cached-only", "--allow-read", "--allow-net", "--v8-flags=--help", "--seed", "1", "--inspect=127.0.0.1:9229", "--name", "file_server", "--root", "/foo", "--force", "https://deno.land/std/http/file_server.ts", "foo", "bar"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Install {
name: Some("file_server".to_string()),
module_url: "https://deno.land/std/http/file_server.ts".to_string(),
args: svec!["foo", "bar"],
root: Some(PathBuf::from("/foo")),
force: true,
},
import_map_path: Some("import_map.json".to_string()),
no_remote: true,
config_path: Some("tsconfig.json".to_string()),
no_check: true,
reload: true,
lock: Some(PathBuf::from("lock.json")),
lock_write: true,
ca_file: Some("example.crt".to_string()),
cached_only: true,
v8_flags: svec!["--help", "--random-seed=1"],
seed: Some(1),
inspect: Some("127.0.0.1:9229".parse().unwrap()),
allow_net: Some(vec![]),
allow_read: Some(vec![]),
..Flags::default()
}
);
}
#[test]
fn log_level() {
let r =
flags_from_vec(svec!["deno", "run", "--log-level=debug", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
log_level: Some(Level::Debug),
..Flags::default()
}
);
}
#[test]
fn quiet() {
let r = flags_from_vec(svec!["deno", "run", "-q", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
log_level: Some(Level::Error),
..Flags::default()
}
);
}
#[test]
fn completions() {
let r = flags_from_vec(svec!["deno", "completions", "zsh"]).unwrap();
match r.subcommand {
DenoSubcommand::Completions { buf } => assert!(!buf.is_empty()),
_ => unreachable!(),
}
}
#[test]
fn run_with_args() {
let r = flags_from_vec(svec![
"deno",
"run",
"script.ts",
"--allow-read",
"--allow-net"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
argv: svec!["--allow-read", "--allow-net"],
..Flags::default()
}
);
let r = flags_from_vec(svec![
"deno",
"run",
"--location",
"https:foo",
"--allow-read",
"script.ts",
"--allow-net",
"-r",
"--help",
"--foo",
"bar"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
location: Some(Url::parse("https://foo/").unwrap()),
allow_read: Some(vec![]),
argv: svec!["--allow-net", "-r", "--help", "--foo", "bar"],
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "run", "script.ts", "foo", "bar"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
argv: svec!["foo", "bar"],
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "run", "script.ts", "-"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
argv: svec!["-"],
..Flags::default()
}
);
let r =
flags_from_vec(svec!["deno", "run", "script.ts", "-", "foo", "bar"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
argv: svec!["-", "foo", "bar"],
..Flags::default()
}
);
}
#[test]
fn no_check() {
let r = flags_from_vec(svec!["deno", "run", "--no-check", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
no_check: true,
..Flags::default()
}
);
}
#[test]
fn no_remote() {
let r = flags_from_vec(svec!["deno", "run", "--no-remote", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
no_remote: true,
..Flags::default()
}
);
}
#[test]
fn cached_only() {
let r = flags_from_vec(svec!["deno", "run", "--cached-only", "script.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
cached_only: true,
..Flags::default()
}
);
}
#[test]
fn allow_net_allowlist_with_ports() {
let r = flags_from_vec(svec![
"deno",
"run",
"--allow-net=deno.land,:8000,:4545",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
allow_net: Some(svec![
"deno.land",
"0.0.0.0:8000",
"127.0.0.1:8000",
"localhost:8000",
"0.0.0.0:4545",
"127.0.0.1:4545",
"localhost:4545"
]),
..Flags::default()
}
);
}
#[test]
fn allow_net_allowlist_with_ipv6_address() {
let r = flags_from_vec(svec![
"deno",
"run",
"--allow-net=deno.land,deno.land:80,::,127.0.0.1,[::1],1.2.3.4:5678,:5678,[::1]:8080",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
allow_net: Some(svec![
"deno.land",
"deno.land:80",
"::",
"127.0.0.1",
"[::1]",
"1.2.3.4:5678",
"0.0.0.0:5678",
"127.0.0.1:5678",
"localhost:5678",
"[::1]:8080"
]),
..Flags::default()
}
);
}
#[test]
fn lock_write() {
let r = flags_from_vec(svec![
"deno",
"run",
"--lock-write",
"--lock=lock.json",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
lock_write: true,
lock: Some(PathBuf::from("lock.json")),
..Flags::default()
}
);
}
#[test]
fn test_with_flags() {
#[rustfmt::skip]
let r = flags_from_vec(svec!["deno", "test", "--unstable", "--no-run", "--filter", "- foo", "--coverage=cov", "--location", "https:foo", "--allow-net", "--allow-none", "dir1/", "dir2/", "--", "arg1", "arg2"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Test {
no_run: true,
doc: false,
fail_fast: None,
filter: Some("- foo".to_string()),
allow_none: true,
quiet: false,
include: Some(svec!["dir1/", "dir2/"]),
shuffle: None,
concurrent_jobs: 1,
},
unstable: true,
coverage_dir: Some("cov".to_string()),
location: Some(Url::parse("https://foo/").unwrap()),
allow_net: Some(vec![]),
argv: svec!["arg1", "arg2"],
..Flags::default()
}
);
}
#[test]
fn run_with_cafile() {
let r = flags_from_vec(svec![
"deno",
"run",
"--cert",
"example.crt",
"script.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "script.ts".to_string(),
},
ca_file: Some("example.crt".to_owned()),
..Flags::default()
}
);
}
#[test]
fn test_with_fail_fast() {
let r = flags_from_vec(svec!["deno", "test", "--fail-fast=3"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Test {
no_run: false,
doc: false,
fail_fast: Some(3),
filter: None,
allow_none: false,
quiet: false,
shuffle: None,
include: None,
concurrent_jobs: 1,
},
..Flags::default()
}
);
}
#[test]
fn test_watch() {
let r = flags_from_vec(svec!["deno", "test", "--watch"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Test {
no_run: false,
doc: false,
fail_fast: None,
filter: None,
allow_none: false,
quiet: false,
shuffle: None,
include: None,
concurrent_jobs: 1,
},
watch: true,
..Flags::default()
}
);
}
#[test]
fn bundle_with_cafile() {
let r = flags_from_vec(svec![
"deno",
"bundle",
"--cert",
"example.crt",
"source.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Bundle {
source_file: "source.ts".to_string(),
out_file: None,
},
ca_file: Some("example.crt".to_owned()),
..Flags::default()
}
);
}
#[test]
fn upgrade_with_ca_file() {
let r = flags_from_vec(svec!["deno", "upgrade", "--cert", "example.crt"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Upgrade {
force: false,
dry_run: false,
canary: false,
version: None,
output: None,
ca_file: Some("example.crt".to_owned()),
},
ca_file: Some("example.crt".to_owned()),
..Flags::default()
}
);
}
#[test]
fn cache_with_cafile() {
let r = flags_from_vec(svec![
"deno",
"cache",
"--cert",
"example.crt",
"script.ts",
"script_two.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Cache {
files: svec!["script.ts", "script_two.ts"],
},
ca_file: Some("example.crt".to_owned()),
..Flags::default()
}
);
}
#[test]
fn info_with_cafile() {
let r = flags_from_vec(svec![
"deno",
"info",
"--cert",
"example.crt",
"https://example.com"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Info {
json: false,
file: Some("https://example.com".to_string()),
},
ca_file: Some("example.crt".to_owned()),
..Flags::default()
}
);
}
#[test]
fn doc() {
let r = flags_from_vec(svec!["deno", "doc", "--json", "path/to/module.ts"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Doc {
private: false,
json: true,
source_file: Some("path/to/module.ts".to_string()),
filter: None,
},
..Flags::default()
}
);
let r = flags_from_vec(svec![
"deno",
"doc",
"path/to/module.ts",
"SomeClass.someField"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Doc {
private: false,
json: false,
source_file: Some("path/to/module.ts".to_string()),
filter: Some("SomeClass.someField".to_string()),
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "doc"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Doc {
private: false,
json: false,
source_file: None,
filter: None,
},
..Flags::default()
}
);
let r = flags_from_vec(svec!["deno", "doc", "--builtin", "Deno.Listener"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Doc {
private: false,
json: false,
source_file: Some("--builtin".to_string()),
filter: Some("Deno.Listener".to_string()),
},
..Flags::default()
}
);
let r =
flags_from_vec(svec!["deno", "doc", "--private", "path/to/module.js"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Doc {
private: true,
json: false,
source_file: Some("path/to/module.js".to_string()),
filter: None,
},
..Flags::default()
}
);
}
#[test]
fn inspect_default_host() {
let r = flags_from_vec(svec!["deno", "run", "--inspect", "foo.js"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Run {
script: "foo.js".to_string(),
},
inspect: Some("127.0.0.1:9229".parse().unwrap()),
..Flags::default()
}
);
}
#[test]
fn compile() {
let r = flags_from_vec(svec![
"deno",
"compile",
"https://deno.land/std/examples/colors.ts"
]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Compile {
source_file: "https://deno.land/std/examples/colors.ts".to_string(),
output: None,
args: vec![],
target: None,
},
..Flags::default()
}
);
}
#[test]
fn compile_with_flags() {
#[rustfmt::skip]
let r = flags_from_vec(svec!["deno", "compile", "--import-map", "import_map.json", "--no-remote", "--config", "tsconfig.json", "--no-check", "--reload", "--lock", "lock.json", "--lock-write", "--cert", "example.crt", "--cached-only", "--location", "https:foo", "--allow-read", "--allow-net", "--v8-flags=--help", "--seed", "1", "--output", "colors", "https://deno.land/std/examples/colors.ts", "foo", "bar"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Compile {
source_file: "https://deno.land/std/examples/colors.ts".to_string(),
output: Some(PathBuf::from("colors")),
args: svec!["foo", "bar"],
target: None,
},
import_map_path: Some("import_map.json".to_string()),
no_remote: true,
config_path: Some("tsconfig.json".to_string()),
no_check: true,
reload: true,
lock: Some(PathBuf::from("lock.json")),
lock_write: true,
ca_file: Some("example.crt".to_string()),
cached_only: true,
location: Some(Url::parse("https://foo/").unwrap()),
allow_read: Some(vec![]),
allow_net: Some(vec![]),
v8_flags: svec!["--help", "--random-seed=1"],
seed: Some(1),
..Flags::default()
}
);
}
#[test]
fn coverage() {
let r = flags_from_vec(svec!["deno", "coverage", "foo.json"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Coverage {
files: vec![PathBuf::from("foo.json")],
ignore: vec![],
include: vec![r"^file:".to_string()],
exclude: vec![r"test\.(js|mjs|ts|jsx|tsx)$".to_string()],
lcov: false,
},
..Flags::default()
}
);
}
#[test]
fn location_with_bad_scheme() {
#[rustfmt::skip]
let r = flags_from_vec(svec!["deno", "run", "--location", "foo:", "mod.ts"]);
assert!(r.is_err());
assert!(r
.unwrap_err()
.to_string()
.contains("Expected protocol \"http\" or \"https\""));
}
}
| import_map_arg_parse |
ray_constants.py | """Ray constants used in the Python code."""
import logging
import math
import os
logger = logging.getLogger(__name__)
def env_integer(key, default):
if key in os.environ:
value = os.environ[key]
if value.isdigit():
return int(os.environ[key])
logger.debug(f"Found {key} in environment, but value must "
f"be an integer. Got: {value}. Returning "
f"provided default {default}.")
return default
return default
def env_bool(key, default):
|
ID_SIZE = 28
# The default maximum number of bytes to allocate to the object store unless
# overridden by the user.
DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES = 200 * 10**9
# The default proportion of available memory allocated to the object store
DEFAULT_OBJECT_STORE_MEMORY_PROPORTION = 0.3
# The smallest cap on the memory used by the object store that we allow.
# This must be greater than MEMORY_RESOURCE_UNIT_BYTES
OBJECT_STORE_MINIMUM_MEMORY_BYTES = 75 * 1024 * 1024
# The default maximum number of bytes that the non-primary Redis shards are
# allowed to use unless overridden by the user.
DEFAULT_REDIS_MAX_MEMORY_BYTES = 10**10
# The smallest cap on the memory used by Redis that we allow.
REDIS_MINIMUM_MEMORY_BYTES = 10**7
# Above this number of bytes, raise an error by default unless the user sets
# RAY_ALLOW_SLOW_STORAGE=1. This avoids swapping with large object stores.
REQUIRE_SHM_SIZE_THRESHOLD = 10**10
# If a user does not specify a port for the primary Ray service,
# we attempt to start the service running at this port.
DEFAULT_PORT = 6379
RAY_ADDRESS_ENVIRONMENT_VARIABLE = "RAY_ADDRESS"
RAY_NAMESPACE_ENVIRONMENT_VARIABLE = "RAY_NAMESPACE"
RAY_RUNTIME_ENV_ENVIRONMENT_VARIABLE = "RAY_RUNTIME_ENV"
DEFAULT_DASHBOARD_IP = "127.0.0.1"
DEFAULT_DASHBOARD_PORT = 8265
REDIS_KEY_DASHBOARD = "dashboard"
PROMETHEUS_SERVICE_DISCOVERY_FILE = "prom_metrics_service_discovery.json"
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPU_SIMPLE = 1
DEFAULT_ACTOR_CREATION_CPU_SIMPLE = 0
# Default resource requirements for actors when some resource requirements are
# specified in .
DEFAULT_ACTOR_METHOD_CPU_SPECIFIED = 0
DEFAULT_ACTOR_CREATION_CPU_SPECIFIED = 1
# Default number of return values for each actor method.
DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS = 1
# If a remote function or actor (or some other export) has serialized size
# greater than this quantity, print an warning.
PICKLE_OBJECT_WARNING_SIZE = 10**7
# If remote functions with the same source are imported this many times, then
# print a warning.
DUPLICATE_REMOTE_FUNCTION_THRESHOLD = 100
# The maximum resource quantity that is allowed. TODO(rkn): This could be
# relaxed, but the current implementation of the node manager will be slower
# for large resource quantities due to bookkeeping of specific resource IDs.
MAX_RESOURCE_QUANTITY = 100e12
# Each memory "resource" counts as this many bytes of memory.
MEMORY_RESOURCE_UNIT_BYTES = 1
# Number of units 1 resource can be subdivided into.
MIN_RESOURCE_GRANULARITY = 0.0001
def round_to_memory_units(memory_bytes, round_up):
"""Round bytes to the nearest memory unit."""
return from_memory_units(to_memory_units(memory_bytes, round_up))
def from_memory_units(memory_units):
"""Convert from memory units -> bytes."""
return memory_units * MEMORY_RESOURCE_UNIT_BYTES
def to_memory_units(memory_bytes, round_up):
"""Convert from bytes -> memory units."""
value = memory_bytes / MEMORY_RESOURCE_UNIT_BYTES
if value < 1:
raise ValueError(
"The minimum amount of memory that can be requested is {} bytes, "
"however {} bytes was asked.".format(MEMORY_RESOURCE_UNIT_BYTES,
memory_bytes))
if isinstance(value, float) and not value.is_integer():
# TODO(ekl) Ray currently does not support fractional resources when
# the quantity is greater than one. We should fix memory resources to
# be allocated in units of bytes and not 100MB.
if round_up:
value = int(math.ceil(value))
else:
value = int(math.floor(value))
return int(value)
# Different types of Ray errors that can be pushed to the driver.
# TODO(rkn): These should be defined in flatbuffers and must be synced with
# the existing C++ definitions.
WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
TASK_PUSH_ERROR = "task"
REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
CHECKPOINT_PUSH_ERROR = "checkpoint"
REGISTER_ACTOR_PUSH_ERROR = "register_actor"
WORKER_CRASH_PUSH_ERROR = "worker_crash"
WORKER_DIED_PUSH_ERROR = "worker_died"
WORKER_POOL_LARGE_ERROR = "worker_pool_large"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
INFEASIBLE_TASK_ERROR = "infeasible_task"
RESOURCE_DEADLOCK_ERROR = "resource_deadlock"
REMOVED_NODE_ERROR = "node_removed"
MONITOR_DIED_ERROR = "monitor_died"
LOG_MONITOR_DIED_ERROR = "log_monitor_died"
REPORTER_DIED_ERROR = "reporter_died"
DASHBOARD_AGENT_DIED_ERROR = "dashboard_agent_died"
DASHBOARD_DIED_ERROR = "dashboard_died"
RAYLET_CONNECTION_ERROR = "raylet_connection_error"
DETACHED_ACTOR_ANONYMOUS_NAMESPACE_ERROR = "detached_actor_anonymous_namespace"
# Used in gpu detection
RESOURCE_CONSTRAINT_PREFIX = "accelerator_type:"
RESOURCES_ENVIRONMENT_VARIABLE = "RAY_OVERRIDE_RESOURCES"
# The reporter will report its statistics this often (milliseconds).
REPORTER_UPDATE_INTERVAL_MS = env_integer("REPORTER_UPDATE_INTERVAL_MS", 2500)
# Number of attempts to ping the Redis server. See
# `services.py:wait_for_redis_to_start`.
START_REDIS_WAIT_RETRIES = env_integer("RAY_START_REDIS_WAIT_RETRIES", 16)
LOGGER_FORMAT = (
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
LOGGER_FORMAT_HELP = f"The logging format. default='{LOGGER_FORMAT}'"
LOGGER_LEVEL = "info"
LOGGER_LEVEL_CHOICES = ["debug", "info", "warning", "error", "critical"]
LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
" 'warning', 'error', 'critical'], default='info'")
LOGGING_ROTATE_BYTES = 512 * 1024 * 1024 # 512MB.
LOGGING_ROTATE_BACKUP_COUNT = 5 # 5 Backup files at max.
# Constants used to define the different process types.
PROCESS_TYPE_REAPER = "reaper"
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_RAY_CLIENT_SERVER = "ray_client_server"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
# TODO(sang): Delete it.
PROCESS_TYPE_REPORTER = "reporter"
PROCESS_TYPE_DASHBOARD = "dashboard"
PROCESS_TYPE_DASHBOARD_AGENT = "dashboard_agent"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
PROCESS_TYPE_GCS_SERVER = "gcs_server"
PROCESS_TYPE_PYTHON_CORE_WORKER_DRIVER = "python-core-driver"
PROCESS_TYPE_PYTHON_CORE_WORKER = "python-core-worker"
# Log file names
MONITOR_LOG_FILE_NAME = f"{PROCESS_TYPE_MONITOR}.log"
LOG_MONITOR_LOG_FILE_NAME = f"{PROCESS_TYPE_LOG_MONITOR}.log"
WORKER_PROCESS_TYPE_IDLE_WORKER = "ray::IDLE"
WORKER_PROCESS_TYPE_SPILL_WORKER_NAME = "SpillWorker"
WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME = "RestoreWorker"
WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE = (
f"ray::IDLE_{WORKER_PROCESS_TYPE_SPILL_WORKER_NAME}")
WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE = (
f"ray::IDLE_{WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME}")
WORKER_PROCESS_TYPE_SPILL_WORKER = (
f"ray::SPILL_{WORKER_PROCESS_TYPE_SPILL_WORKER_NAME}")
WORKER_PROCESS_TYPE_RESTORE_WORKER = (
f"ray::RESTORE_{WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME}")
WORKER_PROCESS_TYPE_SPILL_WORKER_DELETE = (
f"ray::DELETE_{WORKER_PROCESS_TYPE_SPILL_WORKER_NAME}")
WORKER_PROCESS_TYPE_RESTORE_WORKER_DELETE = (
f"ray::DELETE_{WORKER_PROCESS_TYPE_RESTORE_WORKER_NAME}")
LOG_MONITOR_MAX_OPEN_FILES = 200
# The object metadata field uses the following format: It is a comma
# separated list of fields. The first field is mandatory and is the
# type of the object (see types below) or an integer, which is interpreted
# as an error value. The second part is optional and if present has the
# form DEBUG:<breakpoint_id>, it is used for implementing the debugger.
# A constant used as object metadata to indicate the object is cross language.
OBJECT_METADATA_TYPE_CROSS_LANGUAGE = b"XLANG"
# A constant used as object metadata to indicate the object is python specific.
OBJECT_METADATA_TYPE_PYTHON = b"PYTHON"
# A constant used as object metadata to indicate the object is raw bytes.
OBJECT_METADATA_TYPE_RAW = b"RAW"
# A constant used as object metadata to indicate the object is an actor handle.
# This value should be synchronized with the Java definition in
# ObjectSerializer.java
# TODO(fyrestone): Serialize the ActorHandle via the custom type feature
# of XLANG.
OBJECT_METADATA_TYPE_ACTOR_HANDLE = b"ACTOR_HANDLE"
# A constant indicating the debugging part of the metadata (see above).
OBJECT_METADATA_DEBUG_PREFIX = b"DEBUG:"
AUTOSCALER_RESOURCE_REQUEST_CHANNEL = b"autoscaler_resource_request"
# The default password to prevent redis port scanning attack.
# Hex for ray.
REDIS_DEFAULT_PASSWORD = "5241590000000000"
# The default module path to a Python function that sets up the worker env.
DEFAULT_WORKER_SETUP_HOOK = "ray.workers.setup_runtime_env.setup_worker"
# The default module path to a Python function that sets up runtime envs.
DEFAULT_RUNTIME_ENV_SETUP_HOOK = \
"ray.workers.setup_runtime_env.setup_runtime_env"
# The default ip address to bind to.
NODE_DEFAULT_IP = "127.0.0.1"
# The Mach kernel page size in bytes.
MACH_PAGE_SIZE_BYTES = 4096
# Max 64 bit integer value, which is needed to ensure against overflow
# in C++ when passing integer values cross-language.
MAX_INT64_VALUE = 9223372036854775807
# Object Spilling related constants
DEFAULT_OBJECT_PREFIX = "ray_spilled_objects"
GCS_PORT_ENVIRONMENT_VARIABLE = "RAY_GCS_SERVER_PORT"
HEALTHCHECK_EXPIRATION_S = os.environ.get("RAY_HEALTHCHECK_EXPIRATION_S", 10)
# Filename of "shim process" that sets up Python worker environment.
# Should be kept in sync with kSetupWorkerFilename in
# src/ray/common/constants.h.
SETUP_WORKER_FILENAME = "setup_worker.py"
| if key in os.environ:
return True if os.environ[key].lower() == "true" else False
return default |
shared_poi_all_lair_warren_large_fog_mustard.py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
| result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_warren_large_fog_mustard.iff"
result.attribute_template_id = -1
result.stfName("lair_n","warren")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
|
main.rs | use reqwest::Result;
use std::time::Duration;
use reqwest::ClientBuilder;
fn | () {
println!("Welcome to testing Rust from 0 course!");
testapi();
}
async fn testapi() -> Result<()> {
let emisor = "7884";
let request_url = format!("https://hkpy.irstrat.com/intradia/{}", emisor);
println!("{}", request_url);
let timeout = Duration::new(5, 0);
let client = ClientBuilder::new().timeout(timeout).build()?;
let response = client.head(&request_url).send().await?;
if response.status().is_success() {
println!("{} is a emiter!", emisor);
} else {
println!("{} is not a emiter!", emisor);
}
Ok(())
} | main |
delete_dataset_collection.py | import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, dict_output, _arg_split
@click.command('delete_dataset_collection')
@click.argument("history_id", type=str)
@click.argument("dataset_collection_id", type=str)
@pass_context
@custom_exception
@dict_output
def cli(ctx, history_id, dataset_collection_id):
| """Mark corresponding dataset collection as deleted.
Output:
"""
return ctx.gi.histories.delete_dataset_collection(history_id, dataset_collection_id) |
|
actions.js | import * as api from "../api/api";
// ---------------------------- Class Methods ------------------------------
export const createClass = async (Class, user, Classes, setClasses) => {
try {
const newClass = {
name: Class.name,
subcode: Class.subcode,
subject: Class.subject,
instructor: { name: user.username, email: user.email },
image: user.image,
users: [user.email],
};
const res = await api.createClass(newClass);
setClasses([...Classes, res.data]);
} catch (error) {
console.log(error.message);
}
};
export const getClass = async (code, setClass) => {
try {
if (code) {
const res = await api.getClass(code);
setClass(res.data);
}
} catch (error) {
console.log(error.message);
}
};
export const getClasses = async (user, setClasses) => {
try {
if (user) {
const res = await api.getClasses(user);
setClasses(res.data);
}
} catch (error) {
console.log(error.message);
}
};
export const joinClass = async (user, Classes, setClasses, classCode) => {
try {
const Class = await api.getClass(classCode);
if (
Class.data.users.find((element) => {
return user.email === element;
})
) {
alert("Already joined the class.");
} else { | user: user.email,
});
setClasses([...Classes, joinedClass.data]);
}
} catch (error) {
alert("Class does not exist.");
}
};
export const deleteClass = async (code, user, setClasses) => {
try {
if (window.confirm("Are you sure you want to delete the classroom ? ")) {
await api.deleteClass(code);
getClasses(user, setClasses);
}
} catch (error) {
console.log(error.message);
}
};
export const editClassDetails = async (Class, Config, user, setClasses) => {
try {
await api.editClass(Class.code, Config);
getClasses(user, setClasses);
} catch (error) {
console.log(error.message);
}
};
export const leaveClass = async (code, user, setClasses) => {
try {
if (window.confirm("Are you sure you want to leave the classroom ? ")) {
await api.leaveClass(code, { user: user });
getClasses(user, setClasses);
}
} catch (error) {
console.log(error.message);
}
};
export const removeStudents = async (
code,
user,
instructor,
deleteAll,
setClass
) => {
try {
if (deleteAll) {
if (
window.confirm(
"Are you sure you want to remove all students from classroom ? "
)
) {
await api.leaveClass(code, { user: user });
await api.joinClass(code, { user: instructor.email });
getClass(code, setClass);
}
} else {
if (
window.confirm(
"Are you sure you want to remove " + user + " form classroom ? "
)
) {
await api.leaveClass(code, { user: user });
getClass(code, setClass);
}
}
} catch (error) {
console.log(error.message);
}
};
//------------------------------- Test Methods -------------------------------
export const createTest = async (newTest) => {
try {
await api.createTest(newTest);
window.location.replace(`${window.location.href}true`);
} catch (error) {
console.log(error.message);
}
};
export const removeTest=async(id)=>{
try{
await api.removeTest(id);
}
catch (error) {
console.log(error.message);
}
}
export const getTests = async (code, setTests) => {
try {
const res = await api.getTests(code);
setTests(res.data);
} catch (error) {
console.log(error.message);
}
};
export const startTest = async (testId, setTest) => {
try {
if (testId) {
const res = await api.startTest(testId);
setTest(res.data);
}
} catch (error) {
console.log(error.message);
}
};
export const submitTest = async (testId, response, user) => {
try {
if (window.confirm("Are you sure you want to submit ? ")) {
const data = { user: user, response: response };
await api.submitTest(testId, data);
window.location.replace(`/`);
}
} catch (error) {
console.log(error.message);
}
}; | const joinedClass = await api.joinClass(classCode, { |
IntensityImage.py | #!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "Hendrix Demers ([email protected])"
__version__ = ""
__date__ = ""
__copyright__ = "Copyright (c) 2009 Hendrix Demers"
__license__ = ""
# Standard library modules.
import logging
import os.path
# Third party modules.
from PIL import Image
# Local modules.
import casinotools.fileformat.casino3.File as File
import casinotools.fileformat.casino3.ScanPointResults as ScanPointResults
# Globals and constants variables.
INTENSITY_TRANSMITTED = "TransmittedIntensity"
INTENSITY_TRANSMITTED_DETECTED = "TransmittedDetectedIntensity"
class IntensityImage(object):
def __init__(self, filepath, imageName="IntensityImage", intensityType=INTENSITY_TRANSMITTED_DETECTED):
self._filepath = filepath
self._imageName = imageName
self._intensityType = intensityType
self._imageSize = (800, 600)
self._createGetIntensityMethod()
def _createGetIntensityMethod(self):
if self._intensityType == INTENSITY_TRANSMITTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedCoefficient
elif self._intensityType == INTENSITY_TRANSMITTED_DETECTED:
self._getIntensity = ScanPointResults.ScanPointResults.getTransmittedDetectedCoefficient
def _createImage(self):
self._extractData()
self._analyzePositions()
self._createRawImage2()
def _extractData(self):
casinoFile = File.File(self._filepath)
casinoFile.open()
assert 1 == casinoFile.getNumberSimulations()
scanPointsResults = casinoFile.getScanPointResults()
self._numberScanPoints = len(scanPointsResults)
self._positions = []
self._intensities = {}
for scanPointResults in scanPointsResults:
position = scanPointResults.getPosition()
self._positions.append(position)
self._intensities[position] = self._getIntensity(scanPointResults)
def _analyzePositions(self):
self._xSet = set()
self._ySet = set()
self._zSet = set()
for position in self._positions:
x, y, z = position
self._xSet.add(x)
self._ySet.add(y)
self._zSet.add(z)
numberUniqueX = len(self._xSet)
numberUniqueY = len(self._ySet)
numberUniqueZ = len(self._zSet)
imageType = None
if numberUniqueX > 1:
if numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "3D"
else:
imageType = "XY"
elif numberUniqueZ > 1:
imageType = "XZ"
else:
imageType = "X"
elif numberUniqueY > 1:
if numberUniqueZ > 1:
imageType = "YZ"
else:
imageType = "Y"
elif numberUniqueZ > 1:
imageType = "Z"
else:
imageType = "P"
self._imageType = imageType
logging.info("Number unique X: %i", len(self._xSet))
logging.info("Number unique Y: %i", len(self._ySet))
logging.info("Number unique Z: %i", len(self._zSet))
logging.info("Image type: %s", imageType)
def _createRawImage(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
data = []
for y in sorted(self._xSet):
for x in sorted(self._ySet):
position = x, y, z
intensity = self._intensities[position]
data.append(intensity)
self._imageRaw.putdata(data)
def _createRawImage2(self):
if self._imageType == "XY":
size = len(self._xSet), len(self._ySet)
self._imageRaw = Image.new("F", size, color="black")
z = list(self._zSet)[0]
pix = self._imageRaw.load()
for indexH, x in enumerate(sorted(self._xSet)):
for indexV, y in enumerate(sorted(self._ySet)):
position = (x, y, z)
#index = positions.index(position)
value = self._intensities[position]
pix[indexH, indexV] = value
def save(self, path):
self._saveRawImage(path)
#self._saveImage(path)
def _saveRawImage(self, path):
imageFilepath = os.path.join(path, self._imageName + "_raw.tiff")
self._imageRaw.save(imageFilepath)
def _saveImage(self, path):
size = self._imageRaw.size
zoomFactor = self._computeZoomFactor(size)
newSize = size[0] * zoomFactor, size[1] * zoomFactor
filters = {"near": Image.NEAREST, "bilin": Image.BILINEAR,
"bicub": Image.BICUBIC, "anti": Image.ANTIALIAS}
for name, filter in filters.items():
imageFilepath = os.path.join(path, self._imageName + "_" + name + ".tiff")
image = self._imageRaw.resize(newSize, filter)
image.save(imageFilepath)
imageFilepath = os.path.join(path, self._imageName + ".tiff")
tmpImage = self._imageRaw.resize(newSize, Image.BICUBIC)
#tmpImage = tmpImage.convert('L')
image = Image.new(tmpImage.mode, self._imageSize)
topCorner = (self._imageSize[0] - tmpImage.size[0]) / 2, (self._imageSize[1] - tmpImage.size[1]) / 2
box = topCorner[0], topCorner[1], topCorner[0] + tmpImage.size[0], topCorner[1] + tmpImage.size[1]
image.paste(tmpImage, box)
image.save(imageFilepath)
#tmpImage.save(imageFilepath)
def _computeZoomFactor(self, size):
xZoom = int(self._imageSize[0] / size[0])
yZoom = int(self._imageSize[1] / size[1])
zoom = min(xZoom, yZoom)
return zoom
def | ():
from pkg_resources import resource_filename #@UnresolvedImport
resultsPath = resource_filename(__name__, "../../test_data/casino3.x/createImage")
casBinnedFilepath = os.path.join(resultsPath, "Au_C_thin_1nm_Inside_100ke_binned.cas")
imageBinned = IntensityImage(casBinnedFilepath)
imageBinned._createImage()
imageBinned.save(resultsPath)
if __name__ == '__main__':
run() | run |
symbol.rs | use std::fmt::{self, Display};
use syn::{Ident, Path};
#[derive(Copy, Clone)]
pub struct Symbol(&'static str);
pub const GREMLIN: Symbol = Symbol("gremlin");
pub const LABEL: Symbol = Symbol("label");
pub const SKIP: Symbol = Symbol("skip");
pub const OPTION: Symbol = Symbol("Option");
impl PartialEq<Symbol> for Ident {
fn | (&self, word: &Symbol) -> bool {
self == word.0
}
}
impl<'a> PartialEq<Symbol> for &'a Ident {
fn eq(&self, word: &Symbol) -> bool {
*self == word.0
}
}
impl PartialEq<Symbol> for Path {
fn eq(&self, word: &Symbol) -> bool {
self.is_ident(word.0)
}
}
impl<'a> PartialEq<Symbol> for &'a Path {
fn eq(&self, word: &Symbol) -> bool {
self.is_ident(word.0)
}
}
impl Display for Symbol {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(self.0)
}
}
| eq |
main.go | package main
import (
"flag"
"os"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
applicationsamplev1alpha1 "github.com/ibm/operator-sample-go/operator-application/api/v1alpha1"
applicationsamplev1beta1 "github.com/ibm/operator-sample-go/operator-application/api/v1beta1"
applicationcontroller "github.com/ibm/operator-sample-go/operator-application/controllers/application"
databasesamplev1alpha1 "github.com/ibm/operator-sample-go/operator-database/api/v1alpha1"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(databasesamplev1alpha1.AddToScheme(scheme))
utilruntime.Must(monitoringv1.AddToScheme(scheme))
utilruntime.Must(applicationsamplev1alpha1.AddToScheme(scheme))
utilruntime.Must(applicationsamplev1beta1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "320821e4.ibm.com",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&applicationcontroller.ApplicationReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Application")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = (&applicationsamplev1beta1.Application{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Application")
os.Exit(1)
}
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { | setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
} | setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { |
load_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointable object SavedModel loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.training.checkpointable import tracking
class LoadTest(test.TestCase):
def cycle(self, obj):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(obj, path, signatures={})
return load.load(path)
def test_structure_import(self):
root = tracking.Checkpointable()
root.dep_one = tracking.Checkpointable()
root.dep_two = tracking.Checkpointable()
root.dep_two.dep = tracking.Checkpointable()
root.dep_three = root.dep_two.dep
imported = self.cycle(root)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
def test_variables(self):
root = tracking.Checkpointable()
root.v1 = variables.Variable(1., trainable=True)
root.v2 = variables.Variable(2., trainable=False)
imported = self.cycle(root)
self.assertEquals(imported.v1.numpy(), 1.0)
self.assertTrue(imported.v1.trainable)
self.assertEquals(imported.v2.numpy(), 2.0)
self.assertFalse(imported.v2.trainable)
def test_capture_variables(self):
root = tracking.Checkpointable()
root.weights = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.weights * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
imported.weights.assign(4.0)
self.assertEqual(8., imported.f(constant_op.constant(2.)).numpy())
def _make_asset(self, contents):
filename = tempfile.mktemp(prefix=self.get_temp_dir())
with open(filename, "w") as f:
f.write(contents)
return filename
def test_assets(self):
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = tracking.Checkpointable()
root.asset1 = tracking.TrackableAsset(file1)
root.asset2 = tracking.TrackableAsset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir, signatures={})
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = load.load(load_dir)
with open(imported.asset1.asset_path.numpy(), "r") as f:
self.assertEquals("contents 1", f.read())
with open(imported.asset2.asset_path.numpy(), "r") as f:
self.assertEquals("contents 2", f.read())
def test_capture_assets(self):
root = tracking.Checkpointable()
root.vocab = tracking.TrackableAsset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
imported = self.cycle(root)
origin_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(origin_output, imported_output)
with open(imported_output, "r") as f:
self.assertEquals("contents", f.read())
def test_dedup_assets(self):
vocab = self._make_asset("contents")
root = tracking.Checkpointable()
root.asset1 = tracking.TrackableAsset(vocab)
root.asset2 = tracking.TrackableAsset(vocab)
imported = self.cycle(root)
self.assertEqual(imported.asset1.asset_path.numpy(),
imported.asset2.asset_path.numpy())
def test_implicit_input_signature(self):
@def_function.function
def func(x):
return 2 * x
root = tracking.Checkpointable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.))
root.f(constant_op.constant(1))
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def func(x):
return 2 * x
root = tracking.Checkpointable()
root.f = func
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_nested_functions(self):
f = def_function.function(
lambda x: x*2.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
g = def_function.function(
lambda x: f(x) + 1.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.Checkpointable()
root.g = g
imported = self.cycle(root)
imported.g(constant_op.constant([1.0]))
def test_function_with_default_bool_input(self):
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = tracking.Checkpointable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def | (self):
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = tracking.Checkpointable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_member_function(self):
class CheckpointableWithMember(tracking.Checkpointable):
def __init__(self):
super(CheckpointableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = CheckpointableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
def test_side_effect_listing(self):
class M(tracking.Checkpointable):
def __init__(self):
super(M, self).__init__()
self.var = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def f(self, x):
if self.var is None:
self.var = variables.Variable(2.)
return x * self.var
m = M()
self.cycle(m)
self.assertEquals(4.0, m.f(constant_op.constant(2.0)).numpy())
def test_basic_backprop(self):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
g = def_function.function(
lambda x: x*weight + bias,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.Checkpointable()
root.weight = weight
root.bias = bias
root.g = g
imported = self.cycle(root)
with backprop.GradientTape(watch_accessed_variables=True) as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 1.0])
def test_callable(self):
class M1(tracking.Checkpointable):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def __call__(self, x):
return x
root = tracking.Checkpointable()
root.m1 = M1()
root.m2 = tracking.Checkpointable()
root.m2.__call__ = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
imported = self.cycle(root)
x = constant_op.constant(1.0)
self.assertTrue(callable(imported.m1))
self.assertAllEqual(root.m1(x), imported.m1(x))
# Note: `root.m2` was not callable since `__call__` attribute was set
# into the instance and not on the class. But after a serialization cycle
# that starts to work.
self.assertTrue(callable(imported.m2))
self.assertAllEqual(root.m2.__call__(x), imported.m2(x))
# Verify that user objects without `__call__` attribute are not callable.
self.assertFalse(callable(imported))
def test_chain_callable(self):
func = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
root = tracking.Checkpointable()
root.__call__ = tracking.Checkpointable()
root.__call__.__call__ = tracking.Checkpointable()
root.__call__.__call__.__call__ = func
imported = self.cycle(root)
self.assertTrue(callable(imported))
x = constant_op.constant(1.0)
self.assertAllEqual(imported(x).numpy(), 3.0)
if __name__ == "__main__":
test.main()
| test_positional_arguments |
test_multiple_outputs.py | import logging
import prodtest
class MultipleOutputsTest(prodtest.ProduceTestCase):
"""
Tests the handling of recipes with multiple outputs.
"""
def test_without(self):
"""
Without the outputs attribute, the recipe is run twice, once for each
target, thus two INFO messages are generated:
"""
self.assertDirectoryContents(['produce.ini', 'Makefile'])
with self.assertLogs(logger='produce', level='INFO') as l:
self.produce('a.txt', 'b.txt', **{'-j': '3'})
self.assertEqual(len(l.output), 4)
self.assertDirectoryContents(['produce.ini', 'Makefile', 'a.txt', 'b.txt'])
def test_with(self): | """
self.assertDirectoryContents(['produce.ini', 'Makefile'])
with self.assertLogs(logger='produce', level='INFO') as l:
self.produce('c.txt', 'd.txt', **{'-j': '3'})
self.assertEqual(len(l.output), 2)
self.assertDirectoryContents(['produce.ini', 'Makefile', 'c.txt', 'd.txt'])
def test_with_2(self):
"""
Same, but using the out. prefix instead of the outputs attribute.
"""
self.assertDirectoryContents(['produce.ini', 'Makefile'])
with self.assertLogs(logger='produce', level='INFO') as l:
self.produce('e.txt', 'f.txt', **{'-j': '3'})
self.assertEqual(len(l.output), 2)
self.assertDirectoryContents(['produce.ini', 'Makefile', 'e.txt', 'f.txt'])
def test_with_3(self):
"""
Same, mixing out. and outputs.
"""
self.assertDirectoryContents(['produce.ini', 'Makefile'])
with self.assertLogs(logger='produce', level='INFO') as l:
self.produce('g.txt', 'h.txt', 'i.txt', **{'-j': '3'})
self.assertEqual(len(l.output), 2)
self.assertDirectoryContents(['produce.ini', 'Makefile', 'g.txt', 'h.txt', 'i.txt']) | """
With the outputs attribute, the recipe is run only once: |
f218.go | package internal
func | (ctx *Context, l0 int32) int32 {
var l1 int32
_ = l1
var s0i32 int32
_ = s0i32
var s1i32 int32
_ = s1i32
s0i32 = l0
s0i32 = int32(int8(ctx.Mem[int(s0i32+52)]))
l1 = s0i32
s1i32 = -1
if s0i32 <= s1i32 {
s0i32 = 1
} else {
s0i32 = 0
}
if s0i32 != 0 {
s0i32 = l0
s0i32 = f266(ctx, s0i32)
return s0i32
}
s0i32 = l1
if s0i32 == 0 {
s0i32 = 1
} else {
s0i32 = 0
}
if s0i32 != 0 {
s0i32 = 0
return s0i32
}
s0i32 = l0
s0i32 = f265(ctx, s0i32)
return s0i32
}
| f218 |
_managed_clusters_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations(object):
"""ManagedClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedClusterListResult"]
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ManagedClusterListResult"]
|
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def get_upgrade_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterUpgradeProfile"
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_upgrade_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
def get_access_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
role_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedClusterAccessProfile"
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name. **WARNING**\ : This API will be deprecated. Instead use
`ListClusterUserCredentials <https://docs.microsoft.com/en-
us/rest/api/aks/managedclusters/listclusterusercredentials>`_ or `ListClusterAdminCredentials
<https://docs.microsoft.com/en-us/rest/api/aks/managedclusters/listclusteradmincredentials>`_ .
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_access_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
def list_cluster_admin_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_admin_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
def list_cluster_user_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
def list_cluster_monitoring_user_credentials(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.CredentialResults"
"""Gets cluster monitoring user credential of a managed cluster.
Gets cluster monitoring user credential of the managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_monitoring_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_monitoring_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterMonitoringUserCredential'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedCluster"
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedCluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedCluster"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ManagedCluster"]
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ManagedCluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ManagedCluster"]
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.containerservice.v2020_09_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
def _reset_service_principal_profile_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterServicePrincipalProfile"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_service_principal_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
def begin_reset_service_principal_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterServicePrincipalProfile"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
def _reset_aad_profile_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterAADProfile"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_aad_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterAADProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
def begin_reset_aad_profile(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "_models.ManagedClusterAADProfile"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
def _rotate_cluster_certificates_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._rotate_cluster_certificates_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
def begin_rotate_cluster_certificates(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stop Managed Cluster.
Stops a Running Managed Cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Start Managed Cluster.
Starts a Stopped Managed Cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/start'} # type: ignore
| """Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_09_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
) |
ContactPage.tsx |
export default function | () {
return (
<div>
<p>
ContactPage
Lorem ipsum dolor sit amet consectetur adipisicing elit. Aliquam tempore, minima repellendus vel amet sed iste rem reprehenderit quod magnam incidunt qui ex corrupti eos nobis optio suscipit excepturi similique!
Lorem ipsum dolor sit amet consectetur adipisicing elit. Maiores, odio possimus tenetur nisi sequi doloribus eveniet est provident architecto recusandae quidem rerum quos qui totam debitis illum repudiandae vero inventore!
</p>
</div>
);
}
| ContactPage |
gcs_violations_test.py | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the GCS Violations upload notifier."""
import mock
import unittest
from datetime import datetime
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers import base_notification
from google.cloud.forseti.notifier.notifiers import gcs_violations
from tests.notifier.notifiers.test_data import fake_violations
from tests.unittest_utils import ForsetiTestCase
class GcsViolationsnotifierTest(ForsetiTestCase):
"""Tests for gcs_violations_notifier."""
def setUp(self):
"""Setup."""
self.fake_utcnow = datetime(
year=1900, month=1, day=1, hour=0, minute=0, second=0,
microsecond=0)
self.fake_global_conf = {
'db_host': 'x',
'db_name': 'y',
'db_user': 'z',
}
self.fake_notifier_conf = {
'gcs_path': 'gs://blah'
}
@mock.patch(
'google.cloud.forseti.notifier.notifiers.base_notification.date_time',
autospec=True)
def test_get_output_filename(self, mock_date_time):
"""Test_get_output_filename()."""
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
actual_filename = gvp._get_output_filename(
string_formats.VIOLATION_CSV_FMT)
self.assertEquals(
string_formats.VIOLATION_CSV_FMT.format(
gvp.resource, gvp.inventory_index_id, expected_timestamp),
actual_filename)
@mock.patch(
'google.cloud.forseti.notifier.notifiers.base_notification.date_time',
autospec=True)
def test_get_output_filename_with_json(self, mock_date_time):
"""Test _get_output_filename()."""
mock_date_time.get_utc_now_datetime = mock.MagicMock()
mock_date_time.get_utc_now_datetime.return_value = self.fake_utcnow
expected_timestamp = self.fake_utcnow.strftime(
string_formats.TIMESTAMP_TIMEZONE_FILES)
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
actual_filename = gvp._get_output_filename(
string_formats.VIOLATION_JSON_FMT)
self.assertEquals(
string_formats.VIOLATION_JSON_FMT.format(
gvp.resource, gvp.inventory_index_id, expected_timestamp),
actual_filename)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.os')
def test_run(self, mock_os, mock_tempfile, mock_storage):
"""Test run()."""
fake_tmpname = 'tmp_name'
fake_output_name = 'abc'
gvp = gcs_violations.GcsViolations(
'abcd',
1514764800123456,
[],
self.fake_global_conf,
{},
self.fake_notifier_conf)
gvp._get_output_filename = mock.MagicMock(return_value=fake_output_name)
gcs_path = '{}/{}'.format(
gvp.notification_config['gcs_path'], fake_output_name)
mock_tmp_csv = mock.MagicMock()
mock_tempfile.return_value = mock_tmp_csv
mock_tmp_csv.name = fake_tmpname
mock_tmp_csv.write = mock.MagicMock()
gvp.run()
mock_tmp_csv.write.assert_called()
mock_storage.return_value.put_text_file.assert_called_once_with(
fake_tmpname, gcs_path)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_json(self, mock_write_csv, mock_json_stringify,
mock_storage):
"""Test run() with json file format."""
notifier_config = fake_violations.NOTIFIER_CONFIGS_GCS_JSON
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
mock_json_stringify.return_value = 'test123'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
gvp.run()
self.assertTrue(gvp._get_output_filename.called)
self.assertEquals(
string_formats.VIOLATION_JSON_FMT,
gvp._get_output_filename.call_args[0][0])
self.assertFalse(mock_write_csv.called)
self.assertTrue(mock_json_stringify.called)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def | (self, mock_csv_writer, mock_parser, mock_storage):
"""Test run() with default file format (CSV)."""
notifier_config = fake_violations.NOTIFIER_CONFIGS_GCS_DEFAULT
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
gvp.run()
self.assertTrue(gvp._get_output_filename.called)
self.assertEquals(
string_formats.VIOLATION_CSV_FMT,
gvp._get_output_filename.call_args[0][0])
self.assertTrue(mock_csv_writer.called)
self.assertFalse(mock_parser.called)
@mock.patch(
'google.cloud.forseti.common.util.file_uploader.StorageClient',
autospec=True)
@mock.patch('google.cloud.forseti.common.util.parser.json_stringify')
@mock.patch('google.cloud.forseti.common.data_access.csv_writer.write_csv')
def test_run_with_invalid_data_format(self, mock_write_csv,
mock_json_stringify, mock_storage):
"""Test run() with json file format."""
notifier_config = (
fake_violations.NOTIFIER_CONFIGS_GCS_INVALID_DATA_FORMAT)
notification_config = notifier_config['resources'][0]['notifiers'][0]['configuration']
resource = 'policy_violations'
cycle_timestamp = '2018-03-24T00:49:02.891287'
mock_json_stringify.return_value = 'test123'
gvp = gcs_violations.GcsViolations(
resource,
cycle_timestamp,
fake_violations.VIOLATIONS,
fake_violations.GLOBAL_CONFIGS,
notifier_config,
notification_config)
gvp._get_output_filename = mock.MagicMock()
with self.assertRaises(base_notification.InvalidDataFormatError):
gvp.run()
self.assertFalse(gvp._get_output_filename.called)
self.assertFalse(mock_write_csv.called)
self.assertFalse(mock_json_stringify.called)
if __name__ == '__main__':
unittest.main()
| test_run_with_csv |
pillify.js | /*
Copyright 2019, 2020 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import React from "react";
import ReactDOM from 'react-dom';
import {MatrixClientPeg} from '../MatrixClientPeg';
import SettingsStore from "../settings/SettingsStore";
import {PushProcessor} from 'matrix-js-sdk/src/pushprocessor';
import * as sdk from '../index';
/**
* Recurses depth-first through a DOM tree, converting matrix.to links
* into pills based on the context of a given room. Returns a list of
* the resulting React nodes so they can be unmounted rather than leaking.
*
* @param {Node[]} nodes - a list of sibling DOM nodes to traverse to try
* to turn into pills.
* @param {MatrixEvent} mxEvent - the matrix event which the DOM nodes are
* part of representing.
* @param {Node[]} pills: an accumulator of the DOM nodes which contain
* React components which have been mounted as part of this.
* The initial caller should pass in an empty array to seed the accumulator.
*/
export function | (nodes, mxEvent, pills) {
const room = MatrixClientPeg.get().getRoom(mxEvent.getRoomId());
const shouldShowPillAvatar = SettingsStore.getValue("Pill.shouldShowPillAvatar");
let node = nodes[0];
while (node) {
let pillified = false;
if (node.tagName === "A" && node.getAttribute("href")) {
const href = node.getAttribute("href");
// If the link is a (localised) matrix.to link, replace it with a pill
const Pill = sdk.getComponent('elements.Pill');
if (Pill.isMessagePillUrl(href)) {
const pillContainer = document.createElement('span');
const pill = <Pill
url={href}
inMessage={true}
room={room}
shouldShowPillAvatar={shouldShowPillAvatar}
/>;
ReactDOM.render(pill, pillContainer);
node.parentNode.replaceChild(pillContainer, node);
pills.push(pillContainer);
// Pills within pills aren't going to go well, so move on
pillified = true;
// update the current node with one that's now taken its place
node = pillContainer;
}
} else if (
node.nodeType === Node.TEXT_NODE &&
// as applying pills happens outside of react, make sure we're not doubly
// applying @room pills here, as a rerender with the same content won't touch the DOM
// to clear the pills from the last run of pillifyLinks
!node.parentElement.classList.contains("mx_AtRoomPill")
) {
const Pill = sdk.getComponent('elements.Pill');
let currentTextNode = node;
const roomNotifTextNodes = [];
// Take a textNode and break it up to make all the instances of @room their
// own textNode, adding those nodes to roomNotifTextNodes
while (currentTextNode !== null) {
const roomNotifPos = Pill.roomNotifPos(currentTextNode.textContent);
let nextTextNode = null;
if (roomNotifPos > -1) {
let roomTextNode = currentTextNode;
if (roomNotifPos > 0) roomTextNode = roomTextNode.splitText(roomNotifPos);
if (roomTextNode.textContent.length > Pill.roomNotifLen()) {
nextTextNode = roomTextNode.splitText(Pill.roomNotifLen());
}
roomNotifTextNodes.push(roomTextNode);
}
currentTextNode = nextTextNode;
}
if (roomNotifTextNodes.length > 0) {
const pushProcessor = new PushProcessor(MatrixClientPeg.get());
const atRoomRule = pushProcessor.getPushRuleById(".m.rule.roomnotif");
if (atRoomRule && pushProcessor.ruleMatchesEvent(atRoomRule, mxEvent)) {
// Now replace all those nodes with Pills
for (const roomNotifTextNode of roomNotifTextNodes) {
// Set the next node to be processed to the one after the node
// we're adding now, since we've just inserted nodes into the structure
// we're iterating over.
// Note we've checked roomNotifTextNodes.length > 0 so we'll do this at least once
node = roomNotifTextNode.nextSibling;
const pillContainer = document.createElement('span');
const pill = <Pill
type={Pill.TYPE_AT_ROOM_MENTION}
inMessage={true}
room={room}
shouldShowPillAvatar={true}
/>;
ReactDOM.render(pill, pillContainer);
roomNotifTextNode.parentNode.replaceChild(pillContainer, roomNotifTextNode);
pills.push(pillContainer);
}
// Nothing else to do for a text node (and we don't need to advance
// the loop pointer because we did it above)
continue;
}
}
}
if (node.childNodes && node.childNodes.length && !pillified) {
pillifyLinks(node.childNodes, mxEvent, pills);
}
node = node.nextSibling;
}
}
/**
* Unmount all the pill containers from React created by pillifyLinks.
*
* It's critical to call this after pillifyLinks, otherwise
* Pills will leak, leaking entire DOM trees via the event
* emitter on BaseAvatar as per
* https://github.com/vector-im/riot-web/issues/12417
*
* @param {Node[]} pills - array of pill containers whose React
* components should be unmounted.
*/
export function unmountPills(pills) {
for (const pillContainer of pills) {
ReactDOM.unmountComponentAtNode(pillContainer);
}
}
| pillifyLinks |
create_account_o_k_body.go | // Code generated by go-swagger; DO NOT EDIT.
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// CreateAccountOKBody create account o k body
// swagger:model createAccountOKBody
type CreateAccountOKBody struct {
// data
Data *Accounts `json:"data,omitempty"`
}
| if err := m.validateData(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *CreateAccountOKBody) validateData(formats strfmt.Registry) error {
if swag.IsZero(m.Data) { // not required
return nil
}
if m.Data != nil {
if err := m.Data.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *CreateAccountOKBody) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *CreateAccountOKBody) UnmarshalBinary(b []byte) error {
var res CreateAccountOKBody
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
} | // Validate validates this create account o k body
func (m *CreateAccountOKBody) Validate(formats strfmt.Registry) error {
var res []error
|
Vizcontent.js | import FeedingViz from './FeedingViz'
import HousingViz from './HousingViz'
import HealthViz from './HealthViz'
import BehaviourViz from './BehaviourViz'
// import React, { useRef, useLayoutEffect } from 'react';
import React from 'react';
const Vizcontent=({btnIndex})=>{
if(btnIndex==="1"){
return(
<FeedingViz/>
)
}
if(btnIndex==="2"){
return(
<HousingViz/>
)
}
if(btnIndex==="3"){
return(
<HealthViz/>
| if(btnIndex==="4"){
return(
<BehaviourViz/>
)
}
}
export default Vizcontent | )
}
|
reviews.js | const mongoose = require("mongoose")
const Review = require("../models/review.models")
const User = require("../models/user.model")
const axios = require("axios")
const express = require("express")
const router = express.Router()
router.get("/", (req, res) => {
res.send("reviews route") |
// Get all reviews sorted in latest order
router.get("/feed", async (req, res) => {
try{
const reviews = await Review.find({}).sort({"date":-1})
res.status(200).json(reviews)
}catch(error){
console.log("error in getting all the reviews in the reviews.js file ")
console.log(error.message)
res.status(500).json(error.message)
}
})
// get all the reviews for a given movie by its id
router.get("/movie/:movieID", async (req, res) => {
const movieID = req.params.movieID
try {
console.log("movieID =>> ", movieID)
const reviews = await Review.find({tmdbMovieId: movieID}).sort({"date":-1})
res.status(200).json(reviews)
} catch (error) {
console.log("error = ", error.message)
res.status(500).json("error in finding recommendations in database")
}
})
// get review by its id
router.get("/review/:id", async (req, res) => {
const reviewID = req.params.id
console.log("the /review/:id route was called with id = ", reviewID )
try {
let review = await Review.findById(reviewID)
res.status(200).json(review)
} catch (error) {
console.log("error in the catch block of the reviews .js page getreviewById function")
console.log("ERROR =>> ", error.message)
res.status(500).json(error.message)
}
})
// Add a review
router.post("/addReview", async (req, res) => {
data = req.body
// console.log("data = ", data)
const NewReview = new Review({
username: data.username,
tmdbMovieId: data.movie_id,
userID: data.user_id,
review: data.review,
reting: data.rating,
movie_data: data.movie_data
})
const user = await User.findById(data.user_id)
console.log("Found user = ", user)
try {
await NewReview.save()
user.movies_reviewed.push([data.movie_id, NewReview.id])
await user.save()
console.log("New review saved in the database and reviewed_movies array updted")
res.status(200).json(user)
} catch (error) {
console.log("error in the catch block in the router.pst in revies.js route file")
console.log(error.message)
res.status(500).json(error.message)
}
})
// Get all the reviews for a perticular user
router.get("/user/:userID", async(req, res)=>{
const {userID} = req.params;
console.log("User ID comming to the get method = ", userID)
try {
const reviews = await Review.find({userID: userID}).sort({"date":-1})
// console.log("Reviews found = ", reviews)
res.status(200).json(reviews)
} catch (error) {
console.error("error in the catch bkock of get ", error)
res.status(500).json(error.message)
}
})
module.exports = router | })
|
insert_edge.go | /* Copyright (c) 2021 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
package edge
import (
"fmt"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/kikimo/nebula-stresser/pkg/client"
nebula "github.com/vesoft-inc/nebula-go/v2"
)
const (
address = "192.168.15.11"
// The default port of Nebula Graph 2.x is 9669.
// 3699 is only for testing.
port = 9669
username = "root"
password = "nebula"
)
// Initialize logger
var log = nebula.DefaultLogger{}
// type Client struct {
// ID int
// session *nebula.Session
// }
func convertAddrs(addrs []string) ([]nebula.HostAddress, error) {
hostAddrs := []nebula.HostAddress{}
for _, a := range addrs {
parts := strings.Split(a, ":")
if len(parts) != 2 |
port, err := strconv.Atoi(parts[1])
if err != nil {
return nil, fmt.Errorf("illegal graph address: %s", a)
}
addr := nebula.HostAddress{
Host: parts[0],
Port: port,
}
hostAddrs = append(hostAddrs, addr)
}
return hostAddrs, nil
}
func RunInsertEdge(spaceName string, edgeName string, clientNum int, vertexNum int, addrs []string, shuffleWindow int) {
doneSet := make([]int32, vertexNum*vertexNum)
hostList, err := convertAddrs(addrs)
if err != nil {
panic(err)
}
// Create configs for connection pool using default values
testPoolConfig := nebula.GetDefaultConf()
testPoolConfig.TimeOut = 500 * time.Millisecond
testPoolConfig.MaxConnPoolSize = clientNum * 10
// Initialize connection pool
pool, err := nebula.NewConnectionPool(hostList, testPoolConfig, log)
if err != nil {
log.Fatal(fmt.Sprintf("Fail to initialize the connection pool, host: %s, port: %d, %s", address, port, err.Error()))
}
// Close all connections in the pool
defer pool.Close()
clients := make([]*client.SessionX, clientNum)
for i := range clients {
ok := false
for !ok {
session, err := pool.GetSession(username, password)
if err != nil {
fmt.Printf("failed creating nebula session %+v\n", err)
continue
}
clients[i] = client.New(i+1, session)
if _, err := clients[i].Execute(fmt.Sprintf("use %s;", spaceName)); err != nil {
fmt.Printf("error switching space: %+v\n", err)
continue
}
ok = true
}
}
// TODO batch insert edge
edges := vertexNum * vertexNum
fmt.Printf("building insert edge statments for clients...\n")
stmts := make([][]string, clientNum)
var swg sync.WaitGroup
swg.Add(clientNum)
for i := 0; i < clientNum; i++ {
go func(iClient int) {
iStmts := make([]string, edges)
ith := 0
for x := 0; x < vertexNum; x++ {
for y := 0; y < vertexNum; y++ {
stmt := fmt.Sprintf(`insert edge %s(idx) values %d->%d:("%d-%d")`, edgeName, x+1, y+1, iClient+1, ith+1)
iStmts[ith] = stmt
ith++
}
}
stmts[iClient] = iStmts
swg.Done()
}(i)
}
swg.Wait()
// for i := range stmts {
// stmts[i] = make([]string, edges)
// // 500 edges
// for j := 0; j < edges; j++ {
// src, dst := j+1, j+edges+1
// stmts[i][j] = fmt.Sprintf(`insert edge known2(idx) values %d->%d:("%d-%d")`, src, dst, i+1, j+1)
// // fmt.Printf("%s\n", stmts[i][j])
// }
// }
if shuffleWindow > 1 {
// for i := 0; i < edges; i += shuffleWindow {
// // shuffle edge through [i, i + shuffleWindow)
// start, end := i, i+shuffleWindow
// if end > edges {
// end = edges
// }
// // TODO perform shuffle
// sz := end - start
// for j := 0; j < clientNum; j++ {
// rand.Shuffle(sz, func(x, y int) {
// stmts[j][x+i], stmts[j][y+i] = stmts[j][y+i], stmts[j][x+i]
// })
// }
// }
fmt.Printf("skip shuffle\n")
}
// for i := 0; i < edges; i++ {
// fmt.Printf("%d: %s\n", i+1, stmts[0][i])
// }
fmt.Printf("done building insert edge statments, inserting edges....\n")
for i := 0; i < edges; i++ {
var wg sync.WaitGroup
wg.Add(clientNum)
for j := range clients {
go func(c *client.SessionX, stmt string) {
// for k := 0; k < 100; k++ {
for {
// ret, err := c.Execute(stmt)
// ret.GetErrorMsg()
// ret.geterr
_, err := c.Execute(stmt)
if err == nil {
fmt.Printf("done edge %d\n", i)
atomic.StoreInt32(&doneSet[i], 1)
break
} else {
// log.Error(fmt.Sprintf("client %d failed executing %s, %+v, retry...", c.GetID(), stmt, err))
break
}
}
// c.session.Execute(stmt)
// fmt.Println(stmt)
// if err != nil {
// continue
// }
// if err := checkResultSet("", rs); err != nil {
// continue
// }
// }
wg.Done()
// break
}(clients[j], stmts[j][i])
}
wg.Wait()
}
failed := 0
for i := range doneSet {
if doneSet[i] == 0 {
in := i / vertexNum
out := i % vertexNum
fmt.Printf("failed inserting %d->%d\n", in, out)
failed++
}
}
fmt.Printf("failed: %d\n", failed)
// TODO don't delete me
// for i := range clients {
// go func(c *Client, stmt []string) {
// // fmt.Printf("session: %+v\n", c.session)
// for _, stm := range stmt {
// for k := 0; k < 100; k++ {
// rs, err := c.session.Execute(stm)
// if err != nil {
// panic(fmt.Sprintf("failed inserting edge: %s", stm))
// }
// checkResultSet("", rs)
// }
// }
// wg.Done()
// }(&clients[i], stmts[i])
// }
// wg.Wait()
fmt.Printf("insert done!\n")
for _, c := range clients {
c.Release()
}
}
| {
return nil, fmt.Errorf("illegal graph address: %s", a)
} |
re.rs | use once_cell::sync::Lazy;
use regex::Regex;
#[derive(Debug)]
pub enum RegexError {
Account,
AccountEmail,
AccountActive,
AccountExpires,
Cities,
Connect,
Countries,
Groups,
Login,
Settings,
SettingsTechnology,
SettingsProtocol,
SettingsFirewall,
SettingsKillswitch,
SettingsCybersec,
SettingsObfuscate,
SettingsNotify,
SettingsAutoconnect,
SettingsIpv6,
SettingsDns,
Status,
StatusHostname,
StatusCountry,
StatusCity,
StatusIp,
StatusTechnology,
StatusProtocol,
StatusTransfer,
Version,
}
pub static WORD_LIST: Lazy<Regex> = Lazy::new(|| Regex::new(strings::WORD_LIST).unwrap());
pub static ACCOUNT: Lazy<Regex> = Lazy::new(|| Regex::new(strings::ACCOUNT).unwrap());
pub static CONNECT: Lazy<Regex> =
Lazy::new(|| Regex::new(strings::connect::COUNTRY_SERVER_HOSTNAME).unwrap());
pub static LOGIN: Lazy<Regex> = Lazy::new(|| Regex::new(strings::login::URL).unwrap());
pub static INVALID_SETTING: Lazy<Regex> =
Lazy::new(|| Regex::new(strings::settings::INVALID_NAME).unwrap());
pub static SETTINGS: Lazy<Regex> = Lazy::new(|| Regex::new(strings::SETTINGS).unwrap());
pub static STATUS: Lazy<Regex> = Lazy::new(|| Regex::new(strings::STATUS).unwrap());
pub static VERSION: Lazy<Regex> = Lazy::new(|| Regex::new(strings::version::VERSION).unwrap());
pub mod strings {
use const_format::*;
pub const WORD_LIST: &str = r#"(\w+)(?:,\s*|\s*$)"#;
pub const ACCOUNT: &str = formatcp!(
r#"(?:{}|{}|{})+"#,
account::EMAIL,
account::ACTIVE,
account::EXPIRES
);
pub const SETTINGS: &str = formatcp!(
r#"(?:{}|{}|{}|{}|{}|{}|{}|{}|{}|{})+"#,
settings::TECHNOLOGY,
settings::PROTOCOL,
settings::FIREWALL,
settings::KILLSWITCH,
settings::CYBERSEC,
settings::OBFUSCATE,
settings::NOTIFY,
settings::AUTOCONNECT,
settings::IPV6,
settings::DNS,
);
pub const STATUS: &str = formatcp!(
r#"(?:{}|{}|{}|{}|{}|{}|{}|{})+"#,
status::HOSTNAME,
status::COUNTRY,
status::CITY,
status::IP,
status::TECHNOLOGY,
status::PROTOCOL,
status::TRANSFER,
status::UPTIME
);
pub mod shared {
pub const LINE_END_OR_NEWLINE: &str = r#"\s*(?:\n|$)"#;
pub const IPV4_OR_IPV6: &str =
r#"(?P<GROUP_NAME>(?i)(?:[\da-f]{0,4}:){1,7}[\da-f]{0,4}|(?:\d{1,3}\.){3}\d{1,3})"#;
pub const OPENVPN_OR_NORDLYNX: &str = r#"(?P<GROUP_NAME>(?i)OPENVPN|NORDLYNX)"#;
pub const TCP_OR_UDP: &str = r#"(?P<GROUP_NAME>(?i)TCP|UDP)"#;
pub const ENABLED_OR_DISABLED: &str = r#"(?P<GROUP_NAME>(?i)enabled|disabled)"#;
}
pub mod account {
use super::shared::*;
use const_format::*;
pub const EMAIL: &str = concatcp!(r#"Email Address:\s+(?P<email>.+)"#, LINE_END_OR_NEWLINE);
pub const ACTIVE: &str = r#"VPN Service:\s+(?P<active>(?i)[a-z]+)\s*"#;
pub const EXPIRES: &str = r#"\(Expires on\s+(?P<expires_month>(?i)[a-z]{3})\s+(?P<expires_day>\d+)(?i:st|nd|rd|th),\s+(?P<expires_year>\d{4})\)"#;
}
pub mod connect {
pub const COUNTRY_SERVER_HOSTNAME: &str = r#"You are connected to\s+(?P<country>(?i)[a-z_ ]+)\s+#(?P<server>\d+)\s+\((?P<hostname>[\w\d\-\.]+)\)!"#;
}
pub mod login {
use super::shared::*;
use const_format::*;
pub const URL: &str = concatcp!(
r#"Continue in the browser:\s+(?P<url>.+)"#,
LINE_END_OR_NEWLINE
);
}
pub mod settings {
use super::shared::*;
use const_format::*;
pub const INVALID_NAME: &str = r#"Command '(?P<name>.+)' doesn't exist."#;
pub const TECHNOLOGY: &str = concatcp!(
r#"Technology:\s+"#,
str_replace!(OPENVPN_OR_NORDLYNX, "GROUP_NAME", "technology"),
LINE_END_OR_NEWLINE,
);
pub const PROTOCOL: &str = concatcp!(
r#"Protocol:\s+"#,
str_replace!(TCP_OR_UDP, "GROUP_NAME", "protocol"),
LINE_END_OR_NEWLINE,
);
pub const FIREWALL: &str = concatcp!(
r#"Firewall:\s+"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "firewall"),
LINE_END_OR_NEWLINE
);
pub const KILLSWITCH: &str = concatcp!(
r#"Kill Switch:\s+"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "killswitch"),
LINE_END_OR_NEWLINE
);
pub const CYBERSEC: &str = concatcp!(
r#"CyberSec:\s+"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "cybersec"),
LINE_END_OR_NEWLINE
);
pub const OBFUSCATE: &str = concatcp!(
r#"Obfuscate:\s+"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "obfuscate"),
LINE_END_OR_NEWLINE
);
pub const NOTIFY: &str = concatcp!(
r#"Notify:\s+"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "notify"),
LINE_END_OR_NEWLINE
);
pub const AUTOCONNECT: &str = concatcp!(
r#"Auto-connect:\s+"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "autoconnect"),
LINE_END_OR_NEWLINE
);
pub const IPV6: &str = concatcp!(
r#"IPv6:\s+"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "ipv6"),
LINE_END_OR_NEWLINE
);
pub const DNS: &str = formatcp!(
r#"DNS:\s+(?:{}|(?:{}(?:,\s+)?)?(?:{}(?:,\s+)?)?{}?){}"#,
str_replace!(ENABLED_OR_DISABLED, "GROUP_NAME", "dns_disabled"),
str_replace!(IPV4_OR_IPV6, "GROUP_NAME", "dns_primary"),
str_replace!(IPV4_OR_IPV6, "GROUP_NAME", "dns_secondary"),
str_replace!(IPV4_OR_IPV6, "GROUP_NAME", "dns_tertiary"),
LINE_END_OR_NEWLINE
);
}
pub mod status {
use super::shared::*;
use const_format::*;
pub const HOSTNAME: &str = concatcp!(
r#"Current server:\s+(?P<hostname>[\w\d\-\.]+)"#,
LINE_END_OR_NEWLINE
);
pub const COUNTRY: &str = concatcp!(
r#"Country:\s+(?P<country>(?i)[a-z_ ]+[a-z_ ])"#,
LINE_END_OR_NEWLINE
);
pub const CITY: &str = concatcp!(
r#"City:\s+(?P<city>(?i)[a-z_ ]+[a-z_ ])"#,
LINE_END_OR_NEWLINE
);
pub const IP: &str = concatcp!(
r#"Server IP:\s+"#,
str_replace!(IPV4_OR_IPV6, "GROUP_NAME", "ip"),
LINE_END_OR_NEWLINE
);
pub const TECHNOLOGY: &str = concatcp!(
r#"Current technology:\s+"#,
str_replace!(OPENVPN_OR_NORDLYNX, "GROUP_NAME", "technology"),
LINE_END_OR_NEWLINE
);
pub const PROTOCOL: &str = concatcp!(
r#"Current protocol:\s+"#,
str_replace!(TCP_OR_UDP, "GROUP_NAME", "protocol"),
LINE_END_OR_NEWLINE
);
pub const TRANSFER: &str = concatcp!(
r#"Transfer:\s+(?i:(?P<transfer_received>(?:\d+\.)?\d+\s+[a-z]+)\s+received,\s+(?P<transfer_sent>(?:\d+\.)?\d+\s+[a-z]+)\s+sent)"#,
LINE_END_OR_NEWLINE
);
pub const UPTIME: &str = concatcp!(
r#"Uptime:\s+(?i:(?:(?P<uptime_years>\d+)\s+years?\s*)?(?:(?P<uptime_months>\d+)\s+months?\s*)?(?:(?P<uptime_days>\d+)\s+days?\s*)?(?:(?P<uptime_hours>\d+)\s+hours?\s*)?(?:(?P<uptime_minutes>\d+)\s+minutes?\s*)?(?:(?P<uptime_seconds>\d+)\s+seconds?\s*)?)"#,
LINE_END_OR_NEWLINE
);
}
pub mod version {
use super::shared::*;
use const_format::*;
pub const VERSION: &str = concatcp!(r#"(?P<version>\d+\.\d+.\d+)"#, LINE_END_OR_NEWLINE);
}
}
pub fn parse_list(text: &str) -> Option<Vec<String>> {
let mut captures = WORD_LIST.captures_iter(text).peekable();
captures.peek()?;
let items = captures.map(|capture| capture.get(1).unwrap().as_str().to_owned());
Some(items.collect())
}
#[cfg(test)]
mod tests {
#[test]
fn print_account_pattern() {
println!("Account Pattern: {}", super::strings::ACCOUNT);
}
#[test]
fn | () {
println!("Settings Pattern: {}", super::strings::SETTINGS);
}
#[test]
fn print_status_pattern() {
println!("Status Pattern: {}", super::strings::STATUS);
}
}
| print_settings_pattern |
ip_a_m_model.go | // Code generated by go-swagger; DO NOT EDIT.
package cli
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"github.com/go-swagger/dockerctl/models"
"github.com/spf13/cobra"
)
// Schema cli for IPAM
// register flags to command
func registerModelIPAMFlags(depth int, cmdPrefix string, cmd *cobra.Command) error {
if err := registerIPAMConfig(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerIPAMDriver(depth, cmdPrefix, cmd); err != nil {
return err
}
if err := registerIPAMOptions(depth, cmdPrefix, cmd); err != nil {
return err
}
return nil
}
func registerIPAMConfig(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
// warning: Config []map[string]string array type is not supported by go-swagger cli yet
return nil
}
func registerIPAMDriver(depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
driverDescription := `Name of the IPAM driver to use.`
var driverFlagName string
if cmdPrefix == "" {
driverFlagName = "Driver"
} else {
driverFlagName = fmt.Sprintf("%v.Driver", cmdPrefix)
}
var driverFlagDefault string = "default"
_ = cmd.PersistentFlags().String(driverFlagName, driverFlagDefault, driverDescription)
return nil
}
func | (depth int, cmdPrefix string, cmd *cobra.Command) error {
if depth > maxDepth {
return nil
}
// warning: Options map[string]string map type is not supported by go-swagger cli yet
return nil
}
// retrieve flags from commands, and set value in model. Return true if any flag is passed by user to fill model field.
func retrieveModelIPAMFlags(depth int, m *models.IPAM, cmdPrefix string, cmd *cobra.Command) (error, bool) {
retAdded := false
err, configAdded := retrieveIPAMConfigFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || configAdded
err, driverAdded := retrieveIPAMDriverFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || driverAdded
err, optionsAdded := retrieveIPAMOptionsFlags(depth, m, cmdPrefix, cmd)
if err != nil {
return err, false
}
retAdded = retAdded || optionsAdded
return nil, retAdded
}
func retrieveIPAMConfigFlags(depth int, m *models.IPAM, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
configFlagName := fmt.Sprintf("%v.Config", cmdPrefix)
if cmd.Flags().Changed(configFlagName) {
// warning: Config array type []map[string]string is not supported by go-swagger cli yet
}
return nil, retAdded
}
func retrieveIPAMDriverFlags(depth int, m *models.IPAM, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
driverFlagName := fmt.Sprintf("%v.Driver", cmdPrefix)
if cmd.Flags().Changed(driverFlagName) {
var driverFlagName string
if cmdPrefix == "" {
driverFlagName = "Driver"
} else {
driverFlagName = fmt.Sprintf("%v.Driver", cmdPrefix)
}
driverFlagValue, err := cmd.Flags().GetString(driverFlagName)
if err != nil {
return err, false
}
m.Driver = &driverFlagValue
retAdded = true
}
return nil, retAdded
}
func retrieveIPAMOptionsFlags(depth int, m *models.IPAM, cmdPrefix string, cmd *cobra.Command) (error, bool) {
if depth > maxDepth {
return nil, false
}
retAdded := false
optionsFlagName := fmt.Sprintf("%v.Options", cmdPrefix)
if cmd.Flags().Changed(optionsFlagName) {
// warning: Options map type map[string]string is not supported by go-swagger cli yet
}
return nil, retAdded
}
| registerIPAMOptions |
urls.py | """dailypythontip home app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home | """
from django.urls import path, include
from home import views
app_name = 'home'
apis = [
path('tips/', views.api_tip_list, name='api-tip-list'),
path('tips/<int:pk>/', views.api_tip_detail, name='api-tip-detail'),
]
urlpatterns = [
path('', views.index, name='index'),
path('retweet/<int:tweet_id>/', views.retweet, name='retweet'),
path('search/', views.search_tips, name='search-tips'),
path('filter/<str:tag>/', views.filter_tag, name='filter-tag'),
path('sort/<str:criteria>/', views.sort_tips, name='sort-tips'),
path('today/', views.todays_tip, name='retrieve-today'),
path('accounts/register/', views.create_account, name='register'),
path('link_twitter/', views.link_twitter, name='link-twitter'),
path('accounts/login/', views.log_in, name='login'),
path('logout/', views.log_out, name='logout'),
path('api/', include(apis)),
] | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) |
options.go | package kinesis
import (
"github.com/b2wdigital/goignite/v2/core/config"
)
type Options struct {
RandomPartitionKey bool `config:"randompartitionkey"`
}
func DefaultOptions() (*Options, error) | {
o := &Options{}
err := config.UnmarshalWithPath(root, o)
if err != nil {
return nil, err
}
return o, nil
} |
|
base.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
// This module should be updated at every release
package version
// AgentVersion contains the version of the Agent
var AgentVersion string
// Commit is populated with the short commit hash from which the Agent was built
var Commit string
var agentVersionDefault = "6.0.0"
func init() {
if AgentVersion == "" |
}
| {
AgentVersion = agentVersionDefault
} |
mod.rs | //! Native threads.
//!
//! ## The threading model
//!
//! An executing Rust program consists of a collection of native OS threads,
//! each with their own stack and local state. Threads can be named, and
//! provide some built-in support for low-level synchronization.
//!
//! Communication between threads can be done through
//! [channels], Rust's message-passing types, along with [other forms of thread
//! synchronization](../../std/sync/index.html) and shared-memory data
//! structures. In particular, types that are guaranteed to be
//! threadsafe are easily shared between threads using the
//! atomically-reference-counted container, [`Arc`].
//!
//! Fatal logic errors in Rust cause *thread panic*, during which
//! a thread will unwind the stack, running destructors and freeing
//! owned resources. While not meant as a 'try/catch' mechanism, panics
//! in Rust can nonetheless be caught (unless compiling with `panic=abort`) with
//! [`catch_unwind`](../../std/panic/fn.catch_unwind.html) and recovered
//! from, or alternatively be resumed with
//! [`resume_unwind`](../../std/panic/fn.resume_unwind.html). If the panic
//! is not caught the thread will exit, but the panic may optionally be
//! detected from a different thread with [`join`]. If the main thread panics
//! without the panic being caught, the application will exit with a
//! non-zero exit code.
//!
//! When the main thread of a Rust program terminates, the entire program shuts
//! down, even if other threads are still running. However, this module provides
//! convenient facilities for automatically waiting for the termination of a
//! thread (i.e., join).
//!
//! ## Spawning a thread
//!
//! A new thread can be spawned using the [`thread::spawn`][`spawn`] function:
//!
//! ```rust
//! use std::thread;
//!
//! thread::spawn(move || {
//! // some work here
//! });
//! ```
//!
//! In this example, the spawned thread is "detached," which means that there is
//! no way for the program to learn when the spawned thread completes or otherwise
//! terminates.
//!
//! To learn when a thread completes, it is necessary to capture the [`JoinHandle`]
//! object that is returned by the call to [`spawn`], which provides
//! a `join` method that allows the caller to wait for the completion of the
//! spawned thread:
//!
//! ```rust
//! use std::thread;
//!
//! let thread_join_handle = thread::spawn(move || {
//! // some work here
//! });
//! // some work here
//! let res = thread_join_handle.join();
//! ```
//!
//! The [`join`] method returns a [`thread::Result`] containing [`Ok`] of the final
//! value produced by the spawned thread, or [`Err`] of the value given to
//! a call to [`panic!`] if the thread panicked.
//!
//! Note that there is no parent/child relationship between a thread that spawns a
//! new thread and the thread being spawned. In particular, the spawned thread may or
//! may not outlive the spawning thread, unless the spawning thread is the main thread.
//!
//! ## Configuring threads
//!
//! A new thread can be configured before it is spawned via the [`Builder`] type,
//! which currently allows you to set the name and stack size for the thread:
//!
//! ```rust
//! # #![allow(unused_must_use)]
//! use std::thread;
//!
//! thread::Builder::new().name("thread1".to_string()).spawn(move || {
//! println!("Hello, world!");
//! });
//! ```
//!
//! ## The `Thread` type
//!
//! Threads are represented via the [`Thread`] type, which you can get in one of
//! two ways:
//!
//! * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
//! function, and calling [`thread`][`JoinHandle::thread`] on the [`JoinHandle`].
//! * By requesting the current thread, using the [`thread::current`] function.
//!
//! The [`thread::current`] function is available even for threads not spawned
//! by the APIs of this module.
//!
//! ## Thread-local storage
//!
//! This module also provides an implementation of thread-local storage for Rust
//! programs. Thread-local storage is a method of storing data into a global
//! variable that each thread in the program will have its own copy of.
//! Threads do not share this data, so accesses do not need to be synchronized.
//!
//! A thread-local key owns the value it contains and will destroy the value when the
//! thread exits. It is created with the [`thread_local!`] macro and can contain any
//! value that is `'static` (no borrowed pointers). It provides an accessor function,
//! [`with`], that yields a shared reference to the value to the specified
//! closure. Thread-local keys allow only shared access to values, as there would be no
//! way to guarantee uniqueness if mutable borrows were allowed. Most values
//! will want to make use of some form of **interior mutability** through the
//! [`Cell`] or [`RefCell`] types.
//!
//! ## Naming threads
//!
//! Threads are able to have associated names for identification purposes. By default, spawned
//! threads are unnamed. To specify a name for a thread, build the thread with [`Builder`] and pass
//! the desired thread name to [`Builder::name`]. To retrieve the thread name from within the
//! thread, use [`Thread::name`]. A couple examples of where the name of a thread gets used:
//!
//! * If a panic occurs in a named thread, the thread name will be printed in the panic message.
//! * The thread name is provided to the OS where applicable (e.g., `pthread_setname_np` in
//! unix-like platforms).
//!
//! ## Stack size
//!
//! The default stack size for spawned threads is 2 MiB, though this particular stack size is
//! subject to change in the future. There are two ways to manually specify the stack size for
//! spawned threads:
//!
//! * Build the thread with [`Builder`] and pass the desired stack size to [`Builder::stack_size`].
//! * Set the `RUST_MIN_STACK` environment variable to an integer representing the desired stack
//! size (in bytes). Note that setting [`Builder::stack_size`] will override this.
//!
//! Note that the stack size of the main thread is *not* determined by Rust.
//!
//! [channels]: crate::sync::mpsc
//! [`join`]: JoinHandle::join
//! [`Result`]: crate::result::Result
//! [`Ok`]: crate::result::Result::Ok
//! [`Err`]: crate::result::Result::Err
//! [`thread::current`]: current
//! [`thread::Result`]: Result
//! [`unpark`]: Thread::unpark
//! [`thread::park_timeout`]: park_timeout
//! [`Cell`]: crate::cell::Cell
//! [`RefCell`]: crate::cell::RefCell
//! [`with`]: LocalKey::with
//! [`thread_local!`]: crate::thread_local
#![stable(feature = "rust1", since = "1.0.0")]
#![deny(unsafe_op_in_unsafe_fn)]
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
use crate::any::Any;
use crate::cell::UnsafeCell;
use crate::ffi::{CStr, CString};
use crate::fmt;
use crate::io;
use crate::mem;
use crate::num::NonZeroU64;
use crate::num::NonZeroUsize;
use crate::panic;
use crate::panicking;
use crate::pin::Pin;
use crate::ptr::addr_of_mut;
use crate::str;
use crate::sync::Arc;
use crate::sys::thread as imp;
use crate::sys_common::mutex;
use crate::sys_common::thread;
use crate::sys_common::thread_info;
use crate::sys_common::thread_parker::Parker;
use crate::sys_common::{AsInner, IntoInner};
use crate::time::Duration;
////////////////////////////////////////////////////////////////////////////////
// Thread-local storage
////////////////////////////////////////////////////////////////////////////////
#[macro_use]
mod local;
#[stable(feature = "scoped_threads", since = "1.63.0")]
mod scoped;
#[stable(feature = "scoped_threads", since = "1.63.0")]
pub use scoped::{scope, Scope, ScopedJoinHandle};
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::local::{AccessError, LocalKey};
// The types used by the thread_local! macro to access TLS keys. Note that there
// are two types, the "OS" type and the "fast" type. The OS thread local key
// type is accessed via platform-specific API calls and is slow, while the fast
// key type is accessed via code generated via LLVM, where TLS keys are set up
// by the elf linker. Note that the OS TLS type is always available: on macOS
// the standard library is compiled with support for older platform versions
// where fast TLS was not available; end-user code is compiled with fast TLS
// where available, but both are needed.
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[cfg(target_thread_local)]
#[doc(hidden)]
pub use self::local::fast::Key as __FastLocalKeyInner;
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[doc(hidden)]
pub use self::local::os::Key as __OsLocalKeyInner;
#[unstable(feature = "libstd_thread_internals", issue = "none")]
#[cfg(all(target_family = "wasm", not(target_feature = "atomics")))]
#[doc(hidden)]
pub use self::local::statik::Key as __StaticLocalKeyInner;
////////////////////////////////////////////////////////////////////////////////
// Builder
////////////////////////////////////////////////////////////////////////////////
/// Thread factory, which can be used in order to configure the properties of
/// a new thread.
///
/// Methods can be chained on it in order to configure it.
///
/// The two configurations available are:
///
/// - [`name`]: specifies an [associated name for the thread][naming-threads]
/// - [`stack_size`]: specifies the [desired stack size for the thread][stack-size]
///
/// The [`spawn`] method will take ownership of the builder and create an
/// [`io::Result`] to the thread handle with the given configuration.
///
/// The [`thread::spawn`] free function uses a `Builder` with default
/// configuration and [`unwrap`]s its return value.
///
/// You may want to use [`spawn`] instead of [`thread::spawn`], when you want
/// to recover from a failure to launch a thread, indeed the free function will
/// panic where the `Builder` method will return a [`io::Result`].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [`stack_size`]: Builder::stack_size
/// [`name`]: Builder::name
/// [`spawn`]: Builder::spawn
/// [`thread::spawn`]: spawn
/// [`io::Result`]: crate::io::Result
/// [`unwrap`]: crate::result::Result::unwrap
/// [naming-threads]: ./index.html#naming-threads
/// [stack-size]: ./index.html#stack-size
#[must_use = "must eventually spawn the thread"]
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Builder {
// A name for the thread-to-be, for identification in panic messages
name: Option<String>,
// The size of the stack for the spawned thread in bytes
stack_size: Option<usize>,
}
impl Builder {
/// Generates the base configuration for spawning a thread, from which
/// configuration methods can be chained.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into())
/// .stack_size(32 * 1024);
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> Builder {
Builder { name: None, stack_size: None }
}
/// Names the thread-to-be. Currently the name is used for identification
/// only in panic messages.
///
/// The name must not contain null bytes (`\0`).
///
/// For more information about named threads, see
/// [this module-level documentation][naming-threads].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into());
///
/// let handler = builder.spawn(|| {
/// assert_eq!(thread::current().name(), Some("foo"))
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [naming-threads]: ./index.html#naming-threads
#[stable(feature = "rust1", since = "1.0.0")]
pub fn name(mut self, name: String) -> Builder {
self.name = Some(name);
self
}
/// Sets the size of the stack (in bytes) for the new thread.
///
/// The actual stack size may be greater than this value if
/// the platform specifies a minimal stack size.
///
/// For more information about the stack size for threads, see
/// [this module-level documentation][stack-size].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new().stack_size(32 * 1024);
/// ```
///
/// [stack-size]: ./index.html#stack-size
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stack_size(mut self, size: usize) -> Builder {
self.stack_size = Some(size);
self
}
/// Spawns a new thread by taking ownership of the `Builder`, and returns an
/// [`io::Result`] to its [`JoinHandle`].
///
/// The spawned thread may outlive the caller (unless the caller thread
/// is the main thread; the whole process is terminated when the main
/// thread finishes). The join handle can be used to block on
/// termination of the spawned thread, including recovering its panics.
///
/// For a more complete documentation see [`thread::spawn`][`spawn`].
///
/// # Errors
///
/// Unlike the [`spawn`] free function, this method yields an
/// [`io::Result`] to capture any failure to create the thread at
/// the OS level.
///
/// [`io::Result`]: crate::io::Result
///
/// # Panics
///
/// Panics if a thread name was set and it contained null bytes.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// // thread code
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn spawn<F, T>(self, f: F) -> io::Result<JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
unsafe { self.spawn_unchecked(f) }
}
/// Spawns a new thread without any lifetime restrictions by taking ownership
/// of the `Builder`, and returns an [`io::Result`] to its [`JoinHandle`].
///
/// The spawned thread may outlive the caller (unless the caller thread
/// is the main thread; the whole process is terminated when the main
/// thread finishes). The join handle can be used to block on
/// termination of the spawned thread, including recovering its panics.
///
/// This method is identical to [`thread::Builder::spawn`][`Builder::spawn`],
/// except for the relaxed lifetime bounds, which render it unsafe.
/// For a more complete documentation see [`thread::spawn`][`spawn`].
///
/// # Errors
///
/// Unlike the [`spawn`] free function, this method yields an
/// [`io::Result`] to capture any failure to create the thread at
/// the OS level.
///
/// # Panics
///
/// Panics if a thread name was set and it contained null bytes.
///
/// # Safety
///
/// The caller has to ensure that the spawned thread does not outlive any
/// references in the supplied thread closure and its return type.
/// This can be guaranteed in two ways:
///
/// - ensure that [`join`][`JoinHandle::join`] is called before any referenced
/// data is dropped
/// - use only types with `'static` lifetime bounds, i.e., those with no or only
/// `'static` references (both [`thread::Builder::spawn`][`Builder::spawn`]
/// and [`thread::spawn`][`spawn`] enforce this property statically)
///
/// # Examples
///
/// ```
/// #![feature(thread_spawn_unchecked)]
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let x = 1;
/// let thread_x = &x;
///
/// let handler = unsafe {
/// builder.spawn_unchecked(move || {
/// println!("x = {}", *thread_x);
/// }).unwrap()
/// };
///
/// // caller has to ensure `join()` is called, otherwise
/// // it is possible to access freed memory if `x` gets
/// // dropped before the thread closure is executed!
/// handler.join().unwrap();
/// ```
///
/// [`io::Result`]: crate::io::Result
#[unstable(feature = "thread_spawn_unchecked", issue = "55132")]
pub unsafe fn spawn_unchecked<'a, F, T>(self, f: F) -> io::Result<JoinHandle<T>>
where
F: FnOnce() -> T,
F: Send + 'a,
T: Send + 'a,
{
Ok(JoinHandle(unsafe { self.spawn_unchecked_(f, None) }?))
}
unsafe fn spawn_unchecked_<'a, 'scope, F, T>(
self,
f: F,
scope_data: Option<&'scope scoped::ScopeData>,
) -> io::Result<JoinInner<'scope, T>>
where
F: FnOnce() -> T,
F: Send + 'a,
T: Send + 'a,
'scope: 'a,
{
let Builder { name, stack_size } = self;
let stack_size = stack_size.unwrap_or_else(thread::min_stack);
let my_thread = Thread::new(name.map(|name| {
CString::new(name).expect("thread name may not contain interior null bytes")
}));
let their_thread = my_thread.clone();
let my_packet: Arc<Packet<'scope, T>> =
Arc::new(Packet { scope: scope_data, result: UnsafeCell::new(None) });
let their_packet = my_packet.clone();
let output_capture = crate::io::set_output_capture(None);
crate::io::set_output_capture(output_capture.clone());
let main = move || {
if let Some(name) = their_thread.cname() {
imp::Thread::set_name(name);
}
crate::io::set_output_capture(output_capture);
// SAFETY: the stack guard passed is the one for the current thread.
// This means the current thread's stack and the new thread's stack
// are properly set and protected from each other.
thread_info::set(unsafe { imp::guard::current() }, their_thread);
let try_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
crate::sys_common::backtrace::__rust_begin_short_backtrace(f)
}));
// SAFETY: `their_packet` as been built just above and moved by the
// closure (it is an Arc<...>) and `my_packet` will be stored in the
// same `JoinInner` as this closure meaning the mutation will be
// safe (not modify it and affect a value far away).
unsafe { *their_packet.result.get() = Some(try_result) };
};
if let Some(scope_data) = scope_data {
scope_data.increment_num_running_threads();
}
Ok(JoinInner {
// SAFETY:
//
// `imp::Thread::new` takes a closure with a `'static` lifetime, since it's passed
// through FFI or otherwise used with low-level threading primitives that have no
// notion of or way to enforce lifetimes.
//
// As mentioned in the `Safety` section of this function's documentation, the caller of
// this function needs to guarantee that the passed-in lifetime is sufficiently long
// for the lifetime of the thread.
//
// Similarly, the `sys` implementation must guarantee that no references to the closure
// exist after the thread has terminated, which is signaled by `Thread::join`
// returning.
native: unsafe {
imp::Thread::new(
stack_size,
mem::transmute::<Box<dyn FnOnce() + 'a>, Box<dyn FnOnce() + 'static>>(
Box::new(main),
),
)?
},
thread: my_thread,
packet: my_packet,
})
}
}
////////////////////////////////////////////////////////////////////////////////
// Free functions
////////////////////////////////////////////////////////////////////////////////
/// Spawns a new thread, returning a [`JoinHandle`] for it.
///
/// The join handle provides a [`join`] method that can be used to join the spawned
/// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing
/// the argument given to [`panic!`].
///
/// If the join handle is dropped, the spawned thread will implicitly be *detached*.
/// In this case, the spawned thread may no longer be joined.
/// (It is the responsibility of the program to either eventually join threads it
/// creates or detach them; otherwise, a resource leak will result.)
///
/// This call will create a thread using default parameters of [`Builder`], if you
/// want to specify the stack size or the name of the thread, use this API
/// instead.
///
/// As you can see in the signature of `spawn` there are two constraints on
/// both the closure given to `spawn` and its return value, let's explain them:
///
/// - The `'static` constraint means that the closure and its return value
/// must have a lifetime of the whole program execution. The reason for this
/// is that threads can outlive the lifetime they have been created in.
///
/// Indeed if the thread, and by extension its return value, can outlive their
/// caller, we need to make sure that they will be valid afterwards, and since
/// we *can't* know when it will return we need to have them valid as long as
/// possible, that is until the end of the program, hence the `'static`
/// lifetime.
/// - The [`Send`] constraint is because the closure will need to be passed
/// *by value* from the thread where it is spawned to the new thread. Its
/// return value will need to be passed from the new thread to the thread
/// where it is `join`ed.
/// As a reminder, the [`Send`] marker trait expresses that it is safe to be
/// passed from thread to thread. [`Sync`] expresses that it is safe to have a
/// reference be passed from thread to thread.
///
/// # Panics
///
/// Panics if the OS fails to create a thread; use [`Builder::spawn`]
/// to recover from such errors.
///
/// # Examples
///
/// Creating a thread.
///
/// ```
/// use std::thread;
///
/// let handler = thread::spawn(|| {
/// // thread code
/// });
///
/// handler.join().unwrap();
/// ```
///
/// As mentioned in the module documentation, threads are usually made to
/// communicate using [`channels`], here is how it usually looks.
///
/// This example also shows how to use `move`, in order to give ownership
/// of values to a thread.
///
/// ```
/// use std::thread;
/// use std::sync::mpsc::channel;
///
/// let (tx, rx) = channel();
///
/// let sender = thread::spawn(move || {
/// tx.send("Hello, thread".to_owned())
/// .expect("Unable to send on channel");
/// });
///
/// let receiver = thread::spawn(move || {
/// let value = rx.recv().expect("Unable to receive from channel");
/// println!("{value}");
/// });
///
/// sender.join().expect("The sender thread has panicked");
/// receiver.join().expect("The receiver thread has panicked");
/// ```
///
/// A thread can also return a value through its [`JoinHandle`], you can use
/// this to make asynchronous computations (futures might be more appropriate
/// though).
/// | /// // Some expensive computation.
/// 42
/// });
///
/// let result = computation.join().unwrap();
/// println!("{result}");
/// ```
///
/// [`channels`]: crate::sync::mpsc
/// [`join`]: JoinHandle::join
/// [`Err`]: crate::result::Result::Err
#[stable(feature = "rust1", since = "1.0.0")]
pub fn spawn<F, T>(f: F) -> JoinHandle<T>
where
F: FnOnce() -> T,
F: Send + 'static,
T: Send + 'static,
{
Builder::new().spawn(f).expect("failed to spawn thread")
}
/// Gets a handle to the thread that invokes it.
///
/// # Examples
///
/// Getting a handle to the current thread with `thread::current()`:
///
/// ```
/// use std::thread;
///
/// let handler = thread::Builder::new()
/// .name("named thread".into())
/// .spawn(|| {
/// let handle = thread::current();
/// assert_eq!(handle.name(), Some("named thread"));
/// })
/// .unwrap();
///
/// handler.join().unwrap();
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn current() -> Thread {
thread_info::current_thread().expect(
"use of std::thread::current() is not possible \
after the thread's local data has been destroyed",
)
}
/// Cooperatively gives up a timeslice to the OS scheduler.
///
/// This calls the underlying OS scheduler's yield primitive, signaling
/// that the calling thread is willing to give up its remaining timeslice
/// so that the OS may schedule other threads on the CPU.
///
/// A drawback of yielding in a loop is that if the OS does not have any
/// other ready threads to run on the current CPU, the thread will effectively
/// busy-wait, which wastes CPU time and energy.
///
/// Therefore, when waiting for events of interest, a programmer's first
/// choice should be to use synchronization devices such as [`channel`]s,
/// [`Condvar`]s, [`Mutex`]es or [`join`] since these primitives are
/// implemented in a blocking manner, giving up the CPU until the event
/// of interest has occurred which avoids repeated yielding.
///
/// `yield_now` should thus be used only rarely, mostly in situations where
/// repeated polling is required because there is no other suitable way to
/// learn when an event of interest has occurred.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// thread::yield_now();
/// ```
///
/// [`channel`]: crate::sync::mpsc
/// [`join`]: JoinHandle::join
/// [`Condvar`]: crate::sync::Condvar
/// [`Mutex`]: crate::sync::Mutex
#[stable(feature = "rust1", since = "1.0.0")]
pub fn yield_now() {
imp::Thread::yield_now()
}
/// Determines whether the current thread is unwinding because of panic.
///
/// A common use of this feature is to poison shared resources when writing
/// unsafe code, by checking `panicking` when the `drop` is called.
///
/// This is usually not needed when writing safe code, as [`Mutex`es][Mutex]
/// already poison themselves when a thread panics while holding the lock.
///
/// This can also be used in multithreaded applications, in order to send a
/// message to other threads warning that a thread has panicked (e.g., for
/// monitoring purposes).
///
/// # Examples
///
/// ```should_panic
/// use std::thread;
///
/// struct SomeStruct;
///
/// impl Drop for SomeStruct {
/// fn drop(&mut self) {
/// if thread::panicking() {
/// println!("dropped while unwinding");
/// } else {
/// println!("dropped while not unwinding");
/// }
/// }
/// }
///
/// {
/// print!("a: ");
/// let a = SomeStruct;
/// }
///
/// {
/// print!("b: ");
/// let b = SomeStruct;
/// panic!()
/// }
/// ```
///
/// [Mutex]: crate::sync::Mutex
#[inline]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn panicking() -> bool {
panicking::panicking()
}
/// Puts the current thread to sleep for at least the specified amount of time.
///
/// The thread may sleep longer than the duration specified due to scheduling
/// specifics or platform-dependent functionality. It will never sleep less.
///
/// This function is blocking, and should not be used in `async` functions.
///
/// # Platform-specific behavior
///
/// On Unix platforms, the underlying syscall may be interrupted by a
/// spurious wakeup or signal handler. To ensure the sleep occurs for at least
/// the specified duration, this function may invoke that system call multiple
/// times.
///
/// # Examples
///
/// ```no_run
/// use std::thread;
///
/// // Let's sleep for 2 seconds:
/// thread::sleep_ms(2000);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(since = "1.6.0", note = "replaced by `std::thread::sleep`")]
pub fn sleep_ms(ms: u32) {
sleep(Duration::from_millis(ms as u64))
}
/// Puts the current thread to sleep for at least the specified amount of time.
///
/// The thread may sleep longer than the duration specified due to scheduling
/// specifics or platform-dependent functionality. It will never sleep less.
///
/// This function is blocking, and should not be used in `async` functions.
///
/// # Platform-specific behavior
///
/// On Unix platforms, the underlying syscall may be interrupted by a
/// spurious wakeup or signal handler. To ensure the sleep occurs for at least
/// the specified duration, this function may invoke that system call multiple
/// times.
/// Platforms which do not support nanosecond precision for sleeping will
/// have `dur` rounded up to the nearest granularity of time they can sleep for.
///
/// Currently, specifying a zero duration on Unix platforms returns immediately
/// without invoking the underlying [`nanosleep`] syscall, whereas on Windows
/// platforms the underlying [`Sleep`] syscall is always invoked.
/// If the intention is to yield the current time-slice you may want to use
/// [`yield_now`] instead.
///
/// [`nanosleep`]: https://linux.die.net/man/2/nanosleep
/// [`Sleep`]: https://docs.microsoft.com/en-us/windows/win32/api/synchapi/nf-synchapi-sleep
///
/// # Examples
///
/// ```no_run
/// use std::{thread, time};
///
/// let ten_millis = time::Duration::from_millis(10);
/// let now = time::Instant::now();
///
/// thread::sleep(ten_millis);
///
/// assert!(now.elapsed() >= ten_millis);
/// ```
#[stable(feature = "thread_sleep", since = "1.4.0")]
pub fn sleep(dur: Duration) {
imp::Thread::sleep(dur)
}
/// Blocks unless or until the current thread's token is made available.
///
/// A call to `park` does not guarantee that the thread will remain parked
/// forever, and callers should be prepared for this possibility.
///
/// # park and unpark
///
/// Every thread is equipped with some basic low-level blocking support, via the
/// [`thread::park`][`park`] function and [`thread::Thread::unpark`][`unpark`]
/// method. [`park`] blocks the current thread, which can then be resumed from
/// another thread by calling the [`unpark`] method on the blocked thread's
/// handle.
///
/// Conceptually, each [`Thread`] handle has an associated token, which is
/// initially not present:
///
/// * The [`thread::park`][`park`] function blocks the current thread unless or
/// until the token is available for its thread handle, at which point it
/// atomically consumes the token. It may also return *spuriously*, without
/// consuming the token. [`thread::park_timeout`] does the same, but allows
/// specifying a maximum time to block the thread for.
///
/// * The [`unpark`] method on a [`Thread`] atomically makes the token available
/// if it wasn't already. Because the token is initially absent, [`unpark`]
/// followed by [`park`] will result in the second call returning immediately.
///
/// In other words, each [`Thread`] acts a bit like a spinlock that can be
/// locked and unlocked using `park` and `unpark`.
///
/// Notice that being unblocked does not imply any synchronization with someone
/// that unparked this thread, it could also be spurious.
/// For example, it would be a valid, but inefficient, implementation to make both [`park`] and
/// [`unpark`] return immediately without doing anything.
///
/// The API is typically used by acquiring a handle to the current thread,
/// placing that handle in a shared data structure so that other threads can
/// find it, and then `park`ing in a loop. When some desired condition is met, another
/// thread calls [`unpark`] on the handle.
///
/// The motivation for this design is twofold:
///
/// * It avoids the need to allocate mutexes and condvars when building new
/// synchronization primitives; the threads already provide basic
/// blocking/signaling.
///
/// * It can be implemented very efficiently on many platforms.
///
/// # Examples
///
/// ```
/// use std::thread;
/// use std::sync::{Arc, atomic::{Ordering, AtomicBool}};
/// use std::time::Duration;
///
/// let flag = Arc::new(AtomicBool::new(false));
/// let flag2 = Arc::clone(&flag);
///
/// let parked_thread = thread::spawn(move || {
/// // We want to wait until the flag is set. We *could* just spin, but using
/// // park/unpark is more efficient.
/// while !flag2.load(Ordering::Acquire) {
/// println!("Parking thread");
/// thread::park();
/// // We *could* get here spuriously, i.e., way before the 10ms below are over!
/// // But that is no problem, we are in a loop until the flag is set anyway.
/// println!("Thread unparked");
/// }
/// println!("Flag received");
/// });
///
/// // Let some time pass for the thread to be spawned.
/// thread::sleep(Duration::from_millis(10));
///
/// // Set the flag, and let the thread wake up.
/// // There is no race condition here, if `unpark`
/// // happens first, `park` will return immediately.
/// // Hence there is no risk of a deadlock.
/// flag.store(true, Ordering::Release);
/// println!("Unpark the thread");
/// parked_thread.thread().unpark();
///
/// parked_thread.join().unwrap();
/// ```
///
/// [`unpark`]: Thread::unpark
/// [`thread::park_timeout`]: park_timeout
#[stable(feature = "rust1", since = "1.0.0")]
pub fn park() {
// SAFETY: park_timeout is called on the parker owned by this thread.
unsafe {
current().inner.as_ref().parker().park();
}
}
/// Use [`park_timeout`].
///
/// Blocks unless or until the current thread's token is made available or
/// the specified duration has been reached (may wake spuriously).
///
/// The semantics of this function are equivalent to [`park`] except
/// that the thread will be blocked for roughly no longer than `dur`. This
/// method should not be used for precise timing due to anomalies such as
/// preemption or platform differences that might not cause the maximum
/// amount of time waited to be precisely `ms` long.
///
/// See the [park documentation][`park`] for more detail.
#[stable(feature = "rust1", since = "1.0.0")]
#[deprecated(since = "1.6.0", note = "replaced by `std::thread::park_timeout`")]
pub fn park_timeout_ms(ms: u32) {
park_timeout(Duration::from_millis(ms as u64))
}
/// Blocks unless or until the current thread's token is made available or
/// the specified duration has been reached (may wake spuriously).
///
/// The semantics of this function are equivalent to [`park`][park] except
/// that the thread will be blocked for roughly no longer than `dur`. This
/// method should not be used for precise timing due to anomalies such as
/// preemption or platform differences that might not cause the maximum
/// amount of time waited to be precisely `dur` long.
///
/// See the [park documentation][park] for more details.
///
/// # Platform-specific behavior
///
/// Platforms which do not support nanosecond precision for sleeping will have
/// `dur` rounded up to the nearest granularity of time they can sleep for.
///
/// # Examples
///
/// Waiting for the complete expiration of the timeout:
///
/// ```rust,no_run
/// use std::thread::park_timeout;
/// use std::time::{Instant, Duration};
///
/// let timeout = Duration::from_secs(2);
/// let beginning_park = Instant::now();
///
/// let mut timeout_remaining = timeout;
/// loop {
/// park_timeout(timeout_remaining);
/// let elapsed = beginning_park.elapsed();
/// if elapsed >= timeout {
/// break;
/// }
/// println!("restarting park_timeout after {elapsed:?}");
/// timeout_remaining = timeout - elapsed;
/// }
/// ```
#[stable(feature = "park_timeout", since = "1.4.0")]
pub fn park_timeout(dur: Duration) {
// SAFETY: park_timeout is called on the parker owned by this thread.
unsafe {
current().inner.as_ref().parker().park_timeout(dur);
}
}
////////////////////////////////////////////////////////////////////////////////
// ThreadId
////////////////////////////////////////////////////////////////////////////////
/// A unique identifier for a running thread.
///
/// A `ThreadId` is an opaque object that uniquely identifies each thread
/// created during the lifetime of a process. `ThreadId`s are guaranteed not to
/// be reused, even when a thread terminates. `ThreadId`s are under the control
/// of Rust's standard library and there may not be any relationship between
/// `ThreadId` and the underlying platform's notion of a thread identifier --
/// the two concepts cannot, therefore, be used interchangeably. A `ThreadId`
/// can be retrieved from the [`id`] method on a [`Thread`].
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let other_thread = thread::spawn(|| {
/// thread::current().id()
/// });
///
/// let other_thread_id = other_thread.join().unwrap();
/// assert!(thread::current().id() != other_thread_id);
/// ```
///
/// [`id`]: Thread::id
#[stable(feature = "thread_id", since = "1.19.0")]
#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)]
pub struct ThreadId(NonZeroU64);
impl ThreadId {
// Generate a new unique thread ID.
fn new() -> ThreadId {
// It is UB to attempt to acquire this mutex reentrantly!
static GUARD: mutex::StaticMutex = mutex::StaticMutex::new();
static mut COUNTER: u64 = 1;
unsafe {
let guard = GUARD.lock();
// If we somehow use up all our bits, panic so that we're not
// covering up subtle bugs of IDs being reused.
if COUNTER == u64::MAX {
drop(guard); // in case the panic handler ends up calling `ThreadId::new()`, avoid reentrant lock acquire.
panic!("failed to generate unique thread ID: bitspace exhausted");
}
let id = COUNTER;
COUNTER += 1;
ThreadId(NonZeroU64::new(id).unwrap())
}
}
/// This returns a numeric identifier for the thread identified by this
/// `ThreadId`.
///
/// As noted in the documentation for the type itself, it is essentially an
/// opaque ID, but is guaranteed to be unique for each thread. The returned
/// value is entirely opaque -- only equality testing is stable. Note that
/// it is not guaranteed which values new threads will return, and this may
/// change across Rust versions.
#[must_use]
#[unstable(feature = "thread_id_value", issue = "67939")]
pub fn as_u64(&self) -> NonZeroU64 {
self.0
}
}
////////////////////////////////////////////////////////////////////////////////
// Thread
////////////////////////////////////////////////////////////////////////////////
/// The internal representation of a `Thread` handle
struct Inner {
name: Option<CString>, // Guaranteed to be UTF-8
id: ThreadId,
parker: Parker,
}
impl Inner {
fn parker(self: Pin<&Self>) -> Pin<&Parker> {
unsafe { Pin::map_unchecked(self, |inner| &inner.parker) }
}
}
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
/// A handle to a thread.
///
/// Threads are represented via the `Thread` type, which you can get in one of
/// two ways:
///
/// * By spawning a new thread, e.g., using the [`thread::spawn`][`spawn`]
/// function, and calling [`thread`][`JoinHandle::thread`] on the
/// [`JoinHandle`].
/// * By requesting the current thread, using the [`thread::current`] function.
///
/// The [`thread::current`] function is available even for threads not spawned
/// by the APIs of this module.
///
/// There is usually no need to create a `Thread` struct yourself, one
/// should instead use a function like `spawn` to create new threads, see the
/// docs of [`Builder`] and [`spawn`] for more details.
///
/// [`thread::current`]: current
pub struct Thread {
inner: Pin<Arc<Inner>>,
}
impl Thread {
// Used only internally to construct a thread object without spawning
// Panics if the name contains nuls.
pub(crate) fn new(name: Option<CString>) -> Thread {
// We have to use `unsafe` here to constuct the `Parker` in-place,
// which is required for the UNIX implementation.
//
// SAFETY: We pin the Arc immediately after creation, so its address never
// changes.
let inner = unsafe {
let mut arc = Arc::<Inner>::new_uninit();
let ptr = Arc::get_mut_unchecked(&mut arc).as_mut_ptr();
addr_of_mut!((*ptr).name).write(name);
addr_of_mut!((*ptr).id).write(ThreadId::new());
Parker::new(addr_of_mut!((*ptr).parker));
Pin::new_unchecked(arc.assume_init())
};
Thread { inner }
}
/// Atomically makes the handle's token available if it is not already.
///
/// Every thread is equipped with some basic low-level blocking support, via
/// the [`park`][park] function and the `unpark()` method. These can be
/// used as a more CPU-efficient implementation of a spinlock.
///
/// See the [park documentation][park] for more details.
///
/// # Examples
///
/// ```
/// use std::thread;
/// use std::time::Duration;
///
/// let parked_thread = thread::Builder::new()
/// .spawn(|| {
/// println!("Parking thread");
/// thread::park();
/// println!("Thread unparked");
/// })
/// .unwrap();
///
/// // Let some time pass for the thread to be spawned.
/// thread::sleep(Duration::from_millis(10));
///
/// println!("Unpark the thread");
/// parked_thread.thread().unpark();
///
/// parked_thread.join().unwrap();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn unpark(&self) {
self.inner.as_ref().parker().unpark();
}
/// Gets the thread's unique identifier.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let other_thread = thread::spawn(|| {
/// thread::current().id()
/// });
///
/// let other_thread_id = other_thread.join().unwrap();
/// assert!(thread::current().id() != other_thread_id);
/// ```
#[stable(feature = "thread_id", since = "1.19.0")]
#[must_use]
pub fn id(&self) -> ThreadId {
self.inner.id
}
/// Gets the thread's name.
///
/// For more information about named threads, see
/// [this module-level documentation][naming-threads].
///
/// # Examples
///
/// Threads by default have no name specified:
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let handler = builder.spawn(|| {
/// assert!(thread::current().name().is_none());
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// Thread with a specified name:
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new()
/// .name("foo".into());
///
/// let handler = builder.spawn(|| {
/// assert_eq!(thread::current().name(), Some("foo"))
/// }).unwrap();
///
/// handler.join().unwrap();
/// ```
///
/// [naming-threads]: ./index.html#naming-threads
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
pub fn name(&self) -> Option<&str> {
self.cname().map(|s| unsafe { str::from_utf8_unchecked(s.to_bytes()) })
}
fn cname(&self) -> Option<&CStr> {
self.inner.name.as_deref()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl fmt::Debug for Thread {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Thread")
.field("id", &self.id())
.field("name", &self.name())
.finish_non_exhaustive()
}
}
////////////////////////////////////////////////////////////////////////////////
// JoinHandle
////////////////////////////////////////////////////////////////////////////////
/// A specialized [`Result`] type for threads.
///
/// Indicates the manner in which a thread exited.
///
/// The value contained in the `Result::Err` variant
/// is the value the thread panicked with;
/// that is, the argument the `panic!` macro was called with.
/// Unlike with normal errors, this value doesn't implement
/// the [`Error`](crate::error::Error) trait.
///
/// Thus, a sensible way to handle a thread panic is to either:
///
/// 1. propagate the panic with [`std::panic::resume_unwind`]
/// 2. or in case the thread is intended to be a subsystem boundary
/// that is supposed to isolate system-level failures,
/// match on the `Err` variant and handle the panic in an appropriate way
///
/// A thread that completes without panicking is considered to exit successfully.
///
/// # Examples
///
/// Matching on the result of a joined thread:
///
/// ```no_run
/// use std::{fs, thread, panic};
///
/// fn copy_in_thread() -> thread::Result<()> {
/// thread::spawn(|| {
/// fs::copy("foo.txt", "bar.txt").unwrap();
/// }).join()
/// }
///
/// fn main() {
/// match copy_in_thread() {
/// Ok(_) => println!("copy succeeded"),
/// Err(e) => panic::resume_unwind(e),
/// }
/// }
/// ```
///
/// [`Result`]: crate::result::Result
/// [`std::panic::resume_unwind`]: crate::panic::resume_unwind
#[stable(feature = "rust1", since = "1.0.0")]
pub type Result<T> = crate::result::Result<T, Box<dyn Any + Send + 'static>>;
// This packet is used to communicate the return value between the spawned
// thread and the rest of the program. It is shared through an `Arc` and
// there's no need for a mutex here because synchronization happens with `join()`
// (the caller will never read this packet until the thread has exited).
//
// An Arc to the packet is stored into a `JoinInner` which in turns is placed
// in `JoinHandle`.
struct Packet<'scope, T> {
scope: Option<&'scope scoped::ScopeData>,
result: UnsafeCell<Option<Result<T>>>,
}
// Due to the usage of `UnsafeCell` we need to manually implement Sync.
// The type `T` should already always be Send (otherwise the thread could not
// have been created) and the Packet is Sync because all access to the
// `UnsafeCell` synchronized (by the `join()` boundary), and `ScopeData` is Sync.
unsafe impl<'scope, T: Sync> Sync for Packet<'scope, T> {}
impl<'scope, T> Drop for Packet<'scope, T> {
fn drop(&mut self) {
// If this packet was for a thread that ran in a scope, the thread
// panicked, and nobody consumed the panic payload, we make sure
// the scope function will panic.
let unhandled_panic = matches!(self.result.get_mut(), Some(Err(_)));
// Drop the result without causing unwinding.
// This is only relevant for threads that aren't join()ed, as
// join() will take the `result` and set it to None, such that
// there is nothing left to drop here.
// If this panics, we should handle that, because we're outside the
// outermost `catch_unwind` of our thread.
// We just abort in that case, since there's nothing else we can do.
// (And even if we tried to handle it somehow, we'd also need to handle
// the case where the panic payload we get out of it also panics on
// drop, and so on. See issue #86027.)
if let Err(_) = panic::catch_unwind(panic::AssertUnwindSafe(|| {
*self.result.get_mut() = None;
})) {
rtabort!("thread result panicked on drop");
}
// Book-keeping so the scope knows when it's done.
if let Some(scope) = self.scope {
// Now that there will be no more user code running on this thread
// that can use 'scope, mark the thread as 'finished'.
// It's important we only do this after the `result` has been dropped,
// since dropping it might still use things it borrowed from 'scope.
scope.decrement_num_running_threads(unhandled_panic);
}
}
}
/// Inner representation for JoinHandle
struct JoinInner<'scope, T> {
native: imp::Thread,
thread: Thread,
packet: Arc<Packet<'scope, T>>,
}
impl<'scope, T> JoinInner<'scope, T> {
fn join(mut self) -> Result<T> {
self.native.join();
Arc::get_mut(&mut self.packet).unwrap().result.get_mut().take().unwrap()
}
}
/// An owned permission to join on a thread (block on its termination).
///
/// A `JoinHandle` *detaches* the associated thread when it is dropped, which
/// means that there is no longer any handle to the thread and no way to `join`
/// on it.
///
/// Due to platform restrictions, it is not possible to [`Clone`] this
/// handle: the ability to join a thread is a uniquely-owned permission.
///
/// This `struct` is created by the [`thread::spawn`] function and the
/// [`thread::Builder::spawn`] method.
///
/// # Examples
///
/// Creation from [`thread::spawn`]:
///
/// ```
/// use std::thread;
///
/// let join_handle: thread::JoinHandle<_> = thread::spawn(|| {
/// // some work here
/// });
/// ```
///
/// Creation from [`thread::Builder::spawn`]:
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
/// ```
///
/// A thread being detached and outliving the thread that spawned it:
///
/// ```no_run
/// use std::thread;
/// use std::time::Duration;
///
/// let original_thread = thread::spawn(|| {
/// let _detached_thread = thread::spawn(|| {
/// // Here we sleep to make sure that the first thread returns before.
/// thread::sleep(Duration::from_millis(10));
/// // This will be called, even though the JoinHandle is dropped.
/// println!("♫ Still alive ♫");
/// });
/// });
///
/// original_thread.join().expect("The thread being joined has panicked");
/// println!("Original thread is joined.");
///
/// // We make sure that the new thread has time to run, before the main
/// // thread returns.
///
/// thread::sleep(Duration::from_millis(1000));
/// ```
///
/// [`thread::Builder::spawn`]: Builder::spawn
/// [`thread::spawn`]: spawn
#[stable(feature = "rust1", since = "1.0.0")]
pub struct JoinHandle<T>(JoinInner<'static, T>);
#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")]
unsafe impl<T> Send for JoinHandle<T> {}
#[stable(feature = "joinhandle_impl_send_sync", since = "1.29.0")]
unsafe impl<T> Sync for JoinHandle<T> {}
impl<T> JoinHandle<T> {
/// Extracts a handle to the underlying thread.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
///
/// let thread = join_handle.thread();
/// println!("thread id: {:?}", thread.id());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
pub fn thread(&self) -> &Thread {
&self.0.thread
}
/// Waits for the associated thread to finish.
///
/// This function will return immediately if the associated thread has already finished.
///
/// In terms of [atomic memory orderings], the completion of the associated
/// thread synchronizes with this function returning. In other words, all
/// operations performed by that thread [happen
/// before](https://doc.rust-lang.org/nomicon/atomics.html#data-accesses) all
/// operations that happen after `join` returns.
///
/// If the associated thread panics, [`Err`] is returned with the parameter given
/// to [`panic!`].
///
/// [`Err`]: crate::result::Result::Err
/// [atomic memory orderings]: crate::sync::atomic
///
/// # Panics
///
/// This function may panic on some platforms if a thread attempts to join
/// itself or otherwise may create a deadlock with joining threads.
///
/// # Examples
///
/// ```
/// use std::thread;
///
/// let builder = thread::Builder::new();
///
/// let join_handle: thread::JoinHandle<_> = builder.spawn(|| {
/// // some work here
/// }).unwrap();
/// join_handle.join().expect("Couldn't join on the associated thread");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn join(self) -> Result<T> {
self.0.join()
}
/// Checks if the associated thread has finished running its main function.
///
/// This might return `true` for a brief moment after the thread's main
/// function has returned, but before the thread itself has stopped running.
/// However, once this returns `true`, [`join`][Self::join] can be expected
/// to return quickly, without blocking for any significant amount of time.
///
/// This function does not block. To block while waiting on the thread to finish,
/// use [`join`][Self::join].
#[stable(feature = "thread_is_running", since = "1.61.0")]
pub fn is_finished(&self) -> bool {
Arc::strong_count(&self.0.packet) == 1
}
}
impl<T> AsInner<imp::Thread> for JoinHandle<T> {
fn as_inner(&self) -> &imp::Thread {
&self.0.native
}
}
impl<T> IntoInner<imp::Thread> for JoinHandle<T> {
fn into_inner(self) -> imp::Thread {
self.0.native
}
}
#[stable(feature = "std_debug", since = "1.16.0")]
impl<T> fmt::Debug for JoinHandle<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("JoinHandle").finish_non_exhaustive()
}
}
fn _assert_sync_and_send() {
fn _assert_both<T: Send + Sync>() {}
_assert_both::<JoinHandle<()>>();
_assert_both::<Thread>();
}
/// Returns an estimate of the default amount of parallelism a program should use.
///
/// Parallelism is a resource. A given machine provides a certain capacity for
/// parallelism, i.e., a bound on the number of computations it can perform
/// simultaneously. This number often corresponds to the amount of CPUs a
/// computer has, but it may diverge in various cases.
///
/// Host environments such as VMs or container orchestrators may want to
/// restrict the amount of parallelism made available to programs in them. This
/// is often done to limit the potential impact of (unintentionally)
/// resource-intensive programs on other programs running on the same machine.
///
/// # Limitations
///
/// The purpose of this API is to provide an easy and portable way to query
/// the default amount of parallelism the program should use. Among other things it
/// does not expose information on NUMA regions, does not account for
/// differences in (co)processor capabilities or current system load,
/// and will not modify the program's global state in order to more accurately
/// query the amount of available parallelism.
///
/// Where both fixed steady-state and burst limits are available the steady-state
/// capacity will be used to ensure more predictable latencies.
///
/// Resource limits can be changed during the runtime of a program, therefore the value is
/// not cached and instead recomputed every time this function is called. It should not be
/// called from hot code.
///
/// The value returned by this function should be considered a simplified
/// approximation of the actual amount of parallelism available at any given
/// time. To get a more detailed or precise overview of the amount of
/// parallelism available to the program, you may wish to use
/// platform-specific APIs as well. The following platform limitations currently
/// apply to `available_parallelism`:
///
/// On Windows:
/// - It may undercount the amount of parallelism available on systems with more
/// than 64 logical CPUs. However, programs typically need specific support to
/// take advantage of more than 64 logical CPUs, and in the absence of such
/// support, the number returned by this function accurately reflects the
/// number of logical CPUs the program can use by default.
/// - It may overcount the amount of parallelism available on systems limited by
/// process-wide affinity masks, or job object limitations.
///
/// On Linux:
/// - It may overcount the amount of parallelism available when limited by a
/// process-wide affinity mask or cgroup quotas and cgroup2 fs or `sched_getaffinity()` can't be
/// queried, e.g. due to sandboxing.
/// - It may undercount the amount of parallelism if the current thread's affinity mask
/// does not reflect the process' cpuset, e.g. due to pinned threads.
///
/// On all targets:
/// - It may overcount the amount of parallelism available when running in a VM
/// with CPU usage limits (e.g. an overcommitted host).
///
/// # Errors
///
/// This function will, but is not limited to, return errors in the following
/// cases:
///
/// - If the amount of parallelism is not known for the target platform.
/// - If the program lacks permission to query the amount of parallelism made
/// available to it.
///
/// # Examples
///
/// ```
/// # #![allow(dead_code)]
/// use std::{io, thread};
///
/// fn main() -> io::Result<()> {
/// let count = thread::available_parallelism()?.get();
/// assert!(count >= 1_usize);
/// Ok(())
/// }
/// ```
#[doc(alias = "available_concurrency")] // Alias for a previous name we gave this API on unstable.
#[doc(alias = "hardware_concurrency")] // Alias for C++ `std::thread::hardware_concurrency`.
#[doc(alias = "num_cpus")] // Alias for a popular ecosystem crate which provides similar functionality.
#[stable(feature = "available_parallelism", since = "1.59.0")]
pub fn available_parallelism() -> io::Result<NonZeroUsize> {
imp::available_parallelism()
} | /// ```
/// use std::thread;
///
/// let computation = thread::spawn(|| { |
dict.list.controller.js | (function () {
'use strict';
angular.module('KingAdmin.pages.dict.dict')
.controller('DictListCtrl', DictListCtrl);
/** @ngInject */
function DictListCtrl($scope,$filter, toastr, DictService,DictClassService) {
var kt = this;
kt.dictlist = [];
kt.dictClassList = [];
DictClassService.getList({},function (data) {
kt.dictClassList = data.result;
});
kt.showClassCode = function(dictClassId) {
var selected = [];
if(dictClassId) {
selected = $filter('filter')(kt.dictClassList, {id: dictClassId});
}
return selected.length ? selected[0].code : '请选择分类';
};
kt.addRow = function () {
kt.inserted = {
id: null,
dictClassId:'',
code: null,
text: null,
remark: null,
};
kt.dictlist.push(kt.inserted);
}
kt.save = function (dict) {
if (dict.code == null || dict.code == '') {
toastr.warning('编号不能为空', "提示:", {"progressBar": true,});
return;
}
DictService.save(dict,function (data) {
kt.LoadPage();
});
}
kt.LoadPage = function (tableState) {
tableState = tableState || kt.tableState;
tableState.pagination.number = tableState.pagination.number || 5;
DictService.getSmartData(tableState,
function (data) {
tableState.pagination.numberOfPages = data.result.pages;
tableState.pagination.totalItemCount = data.result.total;
kt.tableState = tableState;
kt.dictlist = data.result.records;
});
};
//删除
kt.del = function (id) {
if(id==null){
kt.LoadPage();
return;
}
DictService.del({id: id},
function (data) {
kt.LoadPage();
})
};
kt.checkboxes = {
checked: false, | $scope.$watch('kt.checkboxes.checked', function (value) {
angular.forEach(kt.dictlist, function (item) {
kt.checkboxes.items[item.id] = value;
});
});
}
})(); | items: {}
}; |
prefer-heading.js | /**
* @fileoverview Prefer Heading: Prevent heading tags (h1 ... h6), use Gestalt Heading, instead
*/
// @flow strict
import {
hasAttributes,
hasImport,
hasUnsupportedAttributes,
hasSpreadAttributes,
isTag,
getHtmlTag,
} from './helpers/eslintASTHelpers.js';
import {
renameTagFixer,
renameTagWithPropsFixer,
updateGestaltImportFixer,
} from './helpers/eslintASTFixers.js';
import { type ESLintRule } from './helpers/eslintFlowTypes.js';
export const MESSAGES = {
fixMessageHeading: `Use Heading from Gestalt with accessibilityLevel (default autofix):\n
<Heading accessibilityLevel={| 1 | 2 | 3 | 4 | 5 | 6 |}>Text</Heading>\n
OR Use a presentational Heading from Gestalt:\n
<Heading accessibilityLevel="none">Text</Heading>\n
'none' hides Heading from assistive technology: see suggested options below to autofix`,
suggestionMessageA11yLevelNone: `Use a presentational Heading from Gestalt instead (accessibilityLevel="none")`,
};
const rule: ESLintRule = {
meta: {
type: 'suggestion',
docs: {
description: 'Prefer Heading: Prevent heading tags (h1 ... h6), use Gestalt Heading, instead',
category: 'Gestalt alternatives',
recommended: true,
url: 'https://gestalt.pinterest.systems/eslint%20plugin#gestaltprefer-link',
},
fixable: 'code',
schema: [],
messages: {
fixMessageHeading: MESSAGES.fixMessageHeading,
suggestionMessageA11yLevelNone: MESSAGES.suggestionMessageA11yLevelNone,
},
hasSuggestions: true,
},
create(context) {
let programNode;
let gestaltImportNode;
let importFixerRun = false;
const headingTags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'];
const importDeclarationFnc = (node) => {
if (!node) return;
const isGestaltImportNode = hasImport({ importNode: node, path: 'gestalt' });
if (!isGestaltImportNode) return;
gestaltImportNode = node;
};
const jSXElementFnc = (node) => {
const headingDisallowedAttributes = ['className'];
// First, exit if anchor tag should stay unmodified
if (
!isTag({ elementNode: node.openingElement, tagName: headingTags }) ||
hasSpreadAttributes({ elementNode: node.openingElement }) ||
hasAttributes({
elementNode: node.openingElement,
tagName: headingTags,
attributes: headingDisallowedAttributes,
}) ||
hasUnsupportedAttributes({
elementNode: node.openingElement,
tagName: headingTags,
supportedAttributes: [],
})
) {
return null;
}
const headingTag = getHtmlTag({ elementNode: node });
const a11yLevel = headingTag.replace('h', '');
const a11yLevelProp = `accessibilityLevel={${a11yLevel}}`;
const a11yLevelNoneProp = `accessibilityLevel="none"`;
// For any other anchor tag modification
return context.report({
node,
messageId: 'fixMessageHeading',
fix: (fixer) => {
const tagFixers =
a11yLevel === '1'
? renameTagFixer({
context,
elementNode: node,
fixer, | : renameTagWithPropsFixer({
context,
elementNode: node,
fixer,
gestaltImportNode,
newComponentName: 'Heading',
modifiedPropsString: a11yLevelProp,
tagName: headingTag,
});
const importFixers = updateGestaltImportFixer({
gestaltImportNode,
fixer,
newComponentName: 'Heading',
programNode,
});
const fixers = !importFixerRun ? [...tagFixers, importFixers] : tagFixers;
importFixerRun = true;
return fixers;
},
suggest: [
{
messageId: 'suggestionMessageA11yLevelNone',
fix: (fixer) => {
const tagFixers = renameTagWithPropsFixer({
context,
elementNode: node,
fixer,
gestaltImportNode,
newComponentName: 'Heading',
modifiedPropsString: a11yLevelNoneProp,
tagName: headingTag,
});
const importFixers = updateGestaltImportFixer({
gestaltImportNode,
fixer,
newComponentName: 'Heading',
programNode,
});
const fixers = [...tagFixers, importFixers];
return fixers;
},
},
],
});
};
return {
Program: (node) => {
programNode = node;
},
ImportDeclaration: importDeclarationFnc,
JSXElement: jSXElementFnc,
};
},
};
export default rule; | gestaltImportNode,
newComponentName: 'Heading',
tagName: headingTag,
}) |
router_connect.py | import zmq
import time
context = zmq.Context()
socket = context.socket(zmq.ROUTER)
socket.connect("tcp://127.0.0.1:5556")
time.sleep(1)
socket.send_multipart([b'router',b'Hello'])
address, msg = socket.recv_multipart() | print(msg) |
|
document.rs | use svg::node::{
element::{Circle, Definitions, Group, Path, Style, Title, Use, SVG},
Node, Text,
};
#[derive(Debug)]
pub struct Document {
view_box: (f64, f64, f64, f64),
title: Title,
stylesheets: Vec<String>,
routes_def: Definitions,
stops_def: Definitions,
routes_use: Group,
stops_use: Group,
}
impl Document {
pub fn new() -> Document {
Document::default()
}
pub fn set_title(&mut self, title: &str) |
pub fn add_route(&mut self, id: &str, style: &str, path: Path) {
self.routes_def.append(path);
self.routes_use.append(
Use::new()
.set("href", format!("#route-{}", id))
.set("class", format!("route bg {} {}", id, style)),
);
self.routes_use.append(
Use::new()
.set("href", format!("#route-{}", id))
.set("class", format!("route mg {} {}", id, style)),
);
self.routes_use.append(
Use::new()
.set("href", format!("#route-{}", id))
.set("class", format!("route fg {} {}", id, style)),
);
}
pub fn add_stop(&mut self, stop: Use) {
self.stops_use.append(stop);
}
pub fn set_view_box(&mut self, top: f64, left: f64, bottom: f64, right: f64) {
self.view_box = (left, top, right - left, bottom - top)
}
pub fn add_stylesheets(&mut self, stylesheets: &[String]) {
self.stylesheets.extend(stylesheets.iter().cloned())
}
/// Compiles the document to an SVG element
pub fn compile(self) -> SVG {
let style_content = self
.stylesheets
.into_iter()
.map(|s| format!("@import url({})\n;", s))
.collect::<String>();
SVG::new()
.set("viewBox", self.view_box)
.set("height", self.view_box.3)
.set("width", self.view_box.2)
.add(self.title)
.add(Style::new(style_content))
.add(self.routes_def)
.add(self.stops_def)
.add(self.routes_use)
.add(self.stops_use)
}
}
impl Default for Document {
fn default() -> Document {
Document {
view_box: Default::default(),
stylesheets: Default::default(),
title: Title::new(),
routes_def: Definitions::new(),
stops_def: Definitions::new().add(Circle::new().set("id", "stop")),
routes_use: Group::new().set("id", "routes"),
stops_use: Group::new().set("id", "stops"),
}
}
}
| {
self.title.append(Text::new(title));
} |
dataExtractor.py | #Review Seperator
def reviewToList(strDataLocation): #reviewToList(str_DataLocation)
file = open(strDataLocation)
listFile=(file.readlines())
firstReviewItem=0
lastReviewItem=0
listReviews = [] | if('<review_text>\n'==listFile[item]):
firstReviewItem = item+1
if('</review_text>\n'==listFile[item]):
ReviewItemRange = item - firstReviewItem
for i in range(ReviewItemRange):
reviewText = reviewText + (listFile[firstReviewItem])
firstReviewItem = firstReviewItem + 1
reviewText = reviewText.rstrip('\n')
listReviews.append(reviewText)
reviewText =""
return listReviews | reviewText =""
for item in range(len(listFile)): |
evm.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: ethermint/evm/v1alpha1/evm.proto
package types
import (
fmt "fmt"
github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Params defines the EVM module parameters
type Params struct {
// evm_denom represents the token denomination used to run the EVM state
// transitions.
EvmDenom string `protobuf:"bytes,1,opt,name=evm_denom,json=evmDenom,proto3" json:"evm_denom,omitempty" yaml:"evm_denom"`
// enable_create toggles state transitions that use the vm.Create function
EnableCreate bool `protobuf:"varint,2,opt,name=enable_create,json=enableCreate,proto3" json:"enable_create,omitempty" yaml:"enable_create"`
// enable_call toggles state transitions that use the vm.Call function
EnableCall bool `protobuf:"varint,3,opt,name=enable_call,json=enableCall,proto3" json:"enable_call,omitempty" yaml:"enable_call"`
// extra_eips defines the additional EIPs for the vm.Config
ExtraEIPs []int64 `protobuf:"varint,4,rep,packed,name=extra_eips,json=extraEips,proto3" json:"extra_eips,omitempty" yaml:"extra_eips"`
}
func (m *Params) Reset() { *m = Params{} }
func (*Params) ProtoMessage() {}
func (*Params) Descriptor() ([]byte, []int) {
return fileDescriptor_98f00fcca8b6b943, []int{0}
}
func (m *Params) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Params.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Params) XXX_Merge(src proto.Message) {
xxx_messageInfo_Params.Merge(m, src)
}
func (m *Params) XXX_Size() int {
return m.Size()
}
func (m *Params) XXX_DiscardUnknown() {
xxx_messageInfo_Params.DiscardUnknown(m)
}
var xxx_messageInfo_Params proto.InternalMessageInfo
func (m *Params) GetEvmDenom() string {
if m != nil {
return m.EvmDenom
}
return ""
}
func (m *Params) GetEnableCreate() bool {
if m != nil {
return m.EnableCreate
}
return false
}
func (m *Params) GetEnableCall() bool {
if m != nil {
return m.EnableCall
}
return false
}
func (m *Params) GetExtraEIPs() []int64 {
if m != nil {
return m.ExtraEIPs
}
return nil
}
// ChainConfig defines the Ethereum ChainConfig parameters using sdk.Int values
// instead of big.Int.
//
// NOTE 1: Since empty/uninitialized Ints (i.e with a nil big.Int value) are
// parsed to zero, we need to manually specify that negative Int values will be
// considered as nil. See getBlockValue for reference.
//
// NOTE 2: This type is not a configurable Param since the SDK does not allow
// for validation against a previous stored parameter values or the current
// block height (retrieved from context). If you want to update the config
// values, use an software upgrade procedure.
type ChainConfig struct {
// Homestead switch block (< 0 no fork, 0 = already homestead)
HomesteadBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=homestead_block,json=homesteadBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"homestead_block" yaml:"homestead_block"`
// TheDAO hard-fork switch block (< 0 no fork)
DAOForkBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=dao_fork_block,json=daoForkBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"dao_fork_block" yaml:"dao_fork_block"`
// Whether the nodes supports or opposes the DAO hard-fork
DAOForkSupport bool `protobuf:"varint,3,opt,name=dao_fork_support,json=daoForkSupport,proto3" json:"dao_fork_support,omitempty" yaml:"dao_fork_support"`
// EIP150 implements the Gas price changes
// (https://github.com/ethereum/EIPs/issues/150) EIP150 HF block (< 0 no fork)
EIP150Block github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=eip150_block,json=eip150Block,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"eip150_block" yaml:"eip150_block"`
// EIP150 HF hash (needed for header only clients as only gas pricing changed)
EIP150Hash string `protobuf:"bytes,5,opt,name=eip150_hash,json=eip150Hash,proto3" json:"eip150_hash,omitempty" yaml:"byzantium_block"`
// EIP155Block HF block
EIP155Block github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,6,opt,name=eip155_block,json=eip155Block,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"eip155_block" yaml:"eip155_block"`
// EIP158 HF block
EIP158Block github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,7,opt,name=eip158_block,json=eip158Block,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"eip158_block" yaml:"eip158_block"`
// Byzantium switch block (< 0 no fork, 0 = already on byzantium)
ByzantiumBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,8,opt,name=byzantium_block,json=byzantiumBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"byzantium_block" yaml:"byzantium_block"`
// Constantinople switch block (< 0 no fork, 0 = already activated)
ConstantinopleBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,9,opt,name=constantinople_block,json=constantinopleBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"constantinople_block" yaml:"constantinople_block"`
// Petersburg switch block (< 0 same as Constantinople)
PetersburgBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,10,opt,name=petersburg_block,json=petersburgBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"petersburg_block" yaml:"petersburg_block"`
// Istanbul switch block (< 0 no fork, 0 = already on istanbul)
IstanbulBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,11,opt,name=istanbul_block,json=istanbulBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"istanbul_block" yaml:"istanbul_block"`
// Eip-2384 (bomb delay) switch block (< 0 no fork, 0 = already activated)
MuirGlacierBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,12,opt,name=muir_glacier_block,json=muirGlacierBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"muir_glacier_block" yaml:"muir_glacier_block"`
// YOLO v2: https://github.com/ethereum/EIPs/pull/2657 (Ephemeral testnet)
YoloV2Block github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,13,opt,name=yolo_v2_block,json=yoloV2Block,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"yolo_v2_block" yaml:"yolo_v2_block"`
// EWASM switch block (< 0 no fork, 0 = already activated)
EWASMBlock github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,14,opt,name=ewasm_block,json=ewasmBlock,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"ewasm_block" yaml:"ewasm_block"`
}
func (m *ChainConfig) Reset() { *m = ChainConfig{} }
func (m *ChainConfig) String() string { return proto.CompactTextString(m) }
func (*ChainConfig) ProtoMessage() {}
func (*ChainConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_98f00fcca8b6b943, []int{1}
}
func (m *ChainConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ChainConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ChainConfig.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ChainConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_ChainConfig.Merge(m, src)
}
func (m *ChainConfig) XXX_Size() int {
return m.Size()
}
func (m *ChainConfig) XXX_DiscardUnknown() {
xxx_messageInfo_ChainConfig.DiscardUnknown(m)
}
var xxx_messageInfo_ChainConfig proto.InternalMessageInfo
func (m *ChainConfig) GetDAOForkSupport() bool {
if m != nil {
return m.DAOForkSupport
}
return false
}
func (m *ChainConfig) GetEIP150Hash() string {
if m != nil {
return m.EIP150Hash
}
return ""
}
// State represents a single Storage key value pair item.
type State struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *State) Reset() { *m = State{} }
func (m *State) String() string { return proto.CompactTextString(m) }
func (*State) ProtoMessage() {}
func (*State) Descriptor() ([]byte, []int) {
return fileDescriptor_98f00fcca8b6b943, []int{2}
}
func (m *State) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *State) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_State.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *State) XXX_Merge(src proto.Message) {
xxx_messageInfo_State.Merge(m, src)
}
func (m *State) XXX_Size() int {
return m.Size()
}
func (m *State) XXX_DiscardUnknown() {
xxx_messageInfo_State.DiscardUnknown(m)
}
var xxx_messageInfo_State proto.InternalMessageInfo
func (m *State) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *State) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
// TransactionLogs define the logs generated from a transaction execution
// with a given hash. It it used for import/export data as transactions are not
// persisted on blockchain state after an upgrade.
type TransactionLogs struct {
Hash string `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
Logs []*Log `protobuf:"bytes,2,rep,name=logs,proto3" json:"logs,omitempty"`
}
func (m *TransactionLogs) Reset() { *m = TransactionLogs{} }
func (m *TransactionLogs) String() string { return proto.CompactTextString(m) }
func (*TransactionLogs) ProtoMessage() {}
func (*TransactionLogs) Descriptor() ([]byte, []int) {
return fileDescriptor_98f00fcca8b6b943, []int{3}
}
func (m *TransactionLogs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TransactionLogs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_TransactionLogs.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *TransactionLogs) XXX_Merge(src proto.Message) {
xxx_messageInfo_TransactionLogs.Merge(m, src)
}
func (m *TransactionLogs) XXX_Size() int {
return m.Size()
}
func (m *TransactionLogs) XXX_DiscardUnknown() {
xxx_messageInfo_TransactionLogs.DiscardUnknown(m)
}
var xxx_messageInfo_TransactionLogs proto.InternalMessageInfo
func (m *TransactionLogs) GetHash() string {
if m != nil {
return m.Hash
}
return ""
}
func (m *TransactionLogs) GetLogs() []*Log {
if m != nil {
return m.Logs
}
return nil
}
// Log represents an protobuf compatible Ethereum Log that defines a contract
// log event. These events are generated by the LOG opcode and stored/indexed by
// the node.
type Log struct {
// address of the contract that generated the event
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// list of topics provided by the contract.
Topics []string `protobuf:"bytes,2,rep,name=topics,proto3" json:"topics,omitempty"`
// supplied by the contract, usually ABI-encoded
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
// block in which the transaction was included
BlockNumber uint64 `protobuf:"varint,4,opt,name=block_number,json=blockNumber,proto3" json:"blockNumber"`
// hash of the transaction
TxHash string `protobuf:"bytes,5,opt,name=tx_hash,json=txHash,proto3" json:"transactionHash"`
// index of the transaction in the block
TxIndex uint64 `protobuf:"varint,6,opt,name=tx_index,json=txIndex,proto3" json:"transactionIndex"`
// hash of the block in which the transaction was included
BlockHash string `protobuf:"bytes,7,opt,name=block_hash,json=blockHash,proto3" json:"blockHash"`
// index of the log in the block
Index uint64 `protobuf:"varint,8,opt,name=index,proto3" json:"logIndex"`
// The Removed field is true if this log was reverted due to a chain
// reorganisation. You must pay attention to this field if you receive logs
// through a filter query.
Removed bool `protobuf:"varint,9,opt,name=removed,proto3" json:"removed,omitempty"`
}
func (m *Log) Reset() { *m = Log{} }
func (m *Log) String() string { return proto.CompactTextString(m) }
func (*Log) ProtoMessage() {}
func (*Log) Descriptor() ([]byte, []int) {
return fileDescriptor_98f00fcca8b6b943, []int{4}
}
func (m *Log) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Log) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Log.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Log) XXX_Merge(src proto.Message) {
xxx_messageInfo_Log.Merge(m, src)
}
func (m *Log) XXX_Size() int {
return m.Size()
}
func (m *Log) XXX_DiscardUnknown() {
xxx_messageInfo_Log.DiscardUnknown(m)
}
var xxx_messageInfo_Log proto.InternalMessageInfo
func (m *Log) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
func (m *Log) GetTopics() []string {
if m != nil {
return m.Topics
}
return nil
}
func (m *Log) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
func (m *Log) GetBlockNumber() uint64 {
if m != nil {
return m.BlockNumber
}
return 0
}
func (m *Log) GetTxHash() string {
if m != nil {
return m.TxHash
}
return ""
}
func (m *Log) GetTxIndex() uint64 {
if m != nil {
return m.TxIndex
}
return 0
}
func (m *Log) GetBlockHash() string {
if m != nil {
return m.BlockHash
}
return ""
}
func (m *Log) GetIndex() uint64 {
if m != nil {
return m.Index
}
return 0
}
func (m *Log) GetRemoved() bool {
if m != nil {
return m.Removed
}
return false
}
func init() {
proto.RegisterType((*Params)(nil), "ethermint.evm.v1alpha1.Params")
proto.RegisterType((*ChainConfig)(nil), "ethermint.evm.v1alpha1.ChainConfig")
proto.RegisterType((*State)(nil), "ethermint.evm.v1alpha1.State")
proto.RegisterType((*TransactionLogs)(nil), "ethermint.evm.v1alpha1.TransactionLogs")
proto.RegisterType((*Log)(nil), "ethermint.evm.v1alpha1.Log")
}
func init() { proto.RegisterFile("ethermint/evm/v1alpha1/evm.proto", fileDescriptor_98f00fcca8b6b943) }
var fileDescriptor_98f00fcca8b6b943 = []byte{
// 1023 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x96, 0xcf, 0x6f, 0xdb, 0x36,
0x14, 0xc7, 0xe3, 0xd8, 0x49, 0x6c, 0xda, 0x71, 0x3c, 0xc6, 0xcb, 0xbc, 0x15, 0xb0, 0x02, 0x1e,
0xb6, 0x1c, 0x5a, 0xbb, 0xc9, 0x10, 0x2c, 0x28, 0xb0, 0x43, 0x9c, 0xa6, 0x8d, 0xb1, 0x74, 0x0b,
0x98, 0xa2, 0x01, 0x76, 0x11, 0x68, 0x8b, 0x95, 0xb5, 0x48, 0xa2, 0x26, 0xd2, 0xae, 0x3d, 0x60,
0xc0, 0xfe, 0x84, 0x1d, 0x77, 0xdc, 0x5f, 0x33, 0x14, 0x3b, 0xf5, 0x38, 0xec, 0x20, 0x0c, 0xce,
0x2d, 0x47, 0x9f, 0x77, 0x28, 0xf8, 0x43, 0x76, 0x9c, 0xe4, 0x62, 0xe4, 0x24, 0xbe, 0xc7, 0xf7,
0xbe, 0x1f, 0x3e, 0x4a, 0x7c, 0x14, 0xd8, 0xa6, 0xa2, 0x47, 0xe3, 0xc0, 0x0b, 0x45, 0x93, 0x0e,
0x82, 0xe6, 0x60, 0x97, 0xf8, 0x51, 0x8f, 0xec, 0x4a, 0xa3, 0x11, 0xc5, 0x4c, 0x30, 0xb8, 0x35,
0x8d, 0x68, 0x48, 0x67, 0x1a, 0xf1, 0x45, 0xd5, 0x65, 0x2e, 0x53, 0x21, 0x4d, 0x39, 0xd2, 0xd1,
0xe8, 0xff, 0x0c, 0x58, 0x3d, 0x23, 0x31, 0x09, 0x38, 0xdc, 0x05, 0x05, 0x3a, 0x08, 0x6c, 0x87,
0x86, 0x2c, 0xa8, 0x65, 0xb6, 0x33, 0x3b, 0x85, 0x56, 0x75, 0x92, 0x58, 0x95, 0x11, 0x09, 0xfc,
0x67, 0x68, 0x3a, 0x85, 0x70, 0x9e, 0x0e, 0x82, 0xe7, 0x72, 0x08, 0xbf, 0x05, 0xeb, 0x34, 0x24,
0x1d, 0x9f, 0xda, 0xdd, 0x98, 0x12, 0x41, 0x6b, 0xcb, 0xdb, 0x99, 0x9d, 0x7c, 0xab, 0x36, 0x49,
0xac, 0xaa, 0x49, 0xbb, 0x39, 0x8d, 0x70, 0x49, 0xdb, 0x47, 0xca, 0x84, 0xdf, 0x80, 0x62, 0x3a,
0x4f, 0x7c, 0xbf, 0x96, 0x55, 0xc9, 0x5b, 0x93, 0xc4, 0x82, 0xf3, 0xc9, 0xc4, 0xf7, 0x11, 0x06,
0x26, 0x95, 0xf8, 0x3e, 0x3c, 0x04, 0x80, 0x0e, 0x45, 0x4c, 0x6c, 0xea, 0x45, 0xbc, 0x96, 0xdb,
0xce, 0xee, 0x64, 0x5b, 0x68, 0x9c, 0x58, 0x85, 0x63, 0xe9, 0x3d, 0x6e, 0x9f, 0xf1, 0x49, 0x62,
0x7d, 0x62, 0x44, 0xa6, 0x81, 0x08, 0x17, 0x94, 0x71, 0xec, 0x45, 0xfc, 0x59, 0xee, 0x8f, 0x3f,
0xad, 0x25, 0xf4, 0x57, 0x09, 0x14, 0x8f, 0x7a, 0xc4, 0x0b, 0x8f, 0x58, 0xf8, 0xd6, 0x73, 0xe1,
0xcf, 0x60, 0xa3, 0xc7, 0x02, 0xca, 0x05, 0x25, 0x8e, 0xdd, 0xf1, 0x59, 0xf7, 0xd2, 0xec, 0xc4,
0xc9, 0xfb, 0xc4, 0x5a, 0xfa, 0x37, 0xb1, 0xbe, 0x74, 0x3d, 0xd1, 0xeb, 0x77, 0x1a, 0x5d, 0x16,
0x34, 0xbb, 0x8c, 0x07, 0x8c, 0x9b, 0xc7, 0x13, 0xee, 0x5c, 0x36, 0xc5, 0x28, 0xa2, 0xbc, 0xd1,
0x0e, 0xc5, 0x24, 0xb1, 0xb6, 0x34, 0xfe, 0x96, 0x1c, 0xc2, 0xe5, 0xa9, 0xa7, 0x25, 0x1d, 0xf0,
0x57, 0x50, 0x76, 0x08, 0xb3, 0xdf, 0xb2, 0xf8, 0xd2, 0x10, 0x97, 0x15, 0xf1, 0x62, 0x31, 0xe2,
0x38, 0xb1, 0x4a, 0xcf, 0x0f, 0x7f, 0x78, 0xc1, 0xe2, 0x4b, 0xa5, 0x3b, 0x49, 0xac, 0x4f, 0xf5,
0x0a, 0xe6, 0xd5, 0x11, 0x2e, 0x39, 0x84, 0x4d, 0xc3, 0xe0, 0x05, 0xa8, 0x4c, 0x03, 0x78, 0x3f,
0x8a, 0x58, 0x2c, 0xcc, 0x8b, 0x78, 0x32, 0x4e, 0xac, 0xb2, 0x91, 0x3c, 0xd7, 0x33, 0x93, 0xc4,
0xfa, 0xec, 0x96, 0xa8, 0xc9, 0x41, 0xb8, 0x6c, 0x64, 0x4d, 0x28, 0x7c, 0x07, 0x4a, 0xd4, 0x8b,
0x76, 0xf7, 0x9f, 0x9a, 0xaa, 0x72, 0xaa, 0xaa, 0xd7, 0x0b, 0x57, 0x55, 0x3c, 0x6e, 0x9f, 0xed,
0xee, 0x3f, 0x4d, 0x8b, 0xda, 0x34, 0x6f, 0xf5, 0x86, 0x34, 0xc2, 0x45, 0x6d, 0xea, 0x8a, 0xda,
0xc0, 0x98, 0x76, 0x8f, 0xf0, 0x5e, 0x6d, 0x45, 0x71, 0x77, 0xc6, 0x89, 0x05, 0xb4, 0xd2, 0x09,
0xe1, 0xbd, 0xd9, 0xfb, 0xe9, 0x8c, 0x7e, 0x21, 0xa1, 0xf0, 0xfa, 0x41, 0xaa, 0x05, 0x74, 0xb2,
0x8c, 0x9a, 0xd6, 0xb0, 0x6f, 0x6a, 0x58, 0x7d, 0x50, 0x0d, 0xfb, 0xf7, 0xd5, 0xb0, 0x3f, 0x5f,
0x83, 0x8e, 0x99, 0x82, 0x0f, 0x0c, 0x78, 0xed, 0x41, 0xe0, 0x83, 0xfb, 0xc0, 0x07, 0xf3, 0x60,
0x1d, 0x23, 0x0f, 0xc0, 0xad, 0x1d, 0xa9, 0xe5, 0x1f, 0x76, 0x00, 0xee, 0x6c, 0x70, 0x79, 0xea,
0xd1, 0xc8, 0xdf, 0x32, 0xa0, 0xda, 0x65, 0x21, 0x17, 0xd2, 0x19, 0xb2, 0xc8, 0xa7, 0x06, 0x5c,
0x50, 0xe0, 0x57, 0x0b, 0x83, 0x1f, 0x69, 0xf0, 0x7d, 0x9a, 0x08, 0x6f, 0xce, 0xbb, 0xf5, 0x12,
0x04, 0xa8, 0x44, 0x54, 0xd0, 0x98, 0x77, 0xfa, 0xb1, 0x6b, 0xe8, 0x40, 0xd1, 0xdb, 0x0b, 0xd3,
0xcd, 0x01, 0xb9, 0xad, 0x87, 0xf0, 0xc6, 0xcc, 0xa5, 0xa9, 0x21, 0x28, 0x7b, 0x72, 0x29, 0x9d,
0xbe, 0x6f, 0x98, 0x45, 0xc5, 0x7c, 0xb9, 0x30, 0xd3, 0x9c, 0xf4, 0x79, 0x35, 0x84, 0xd7, 0x53,
0x87, 0xe6, 0x8d, 0x00, 0x0c, 0xfa, 0x5e, 0x6c, 0xbb, 0x3e, 0xe9, 0x7a, 0x34, 0x36, 0xcc, 0x92,
0x62, 0x7e, 0xb7, 0x30, 0xf3, 0x73, 0xcd, 0xbc, 0xab, 0x88, 0x70, 0x45, 0x3a, 0x5f, 0x6a, 0x9f,
0x46, 0xff, 0x04, 0xd6, 0x47, 0xcc, 0x67, 0xf6, 0x60, 0xcf, 0x50, 0xd7, 0x15, 0xf5, 0xc5, 0xc2,
0x54, 0x73, 0xad, 0xcc, 0x89, 0x21, 0x5c, 0x94, 0xf6, 0x9b, 0x3d, 0xcd, 0xe2, 0xa0, 0x48, 0xdf,
0x11, 0x9e, 0x7e, 0xbe, 0x65, 0x45, 0xc2, 0x0b, 0x1f, 0x1d, 0x70, 0x7c, 0x71, 0x78, 0xfe, 0x2a,
0x3d, 0x39, 0xe9, 0x8d, 0x34, 0x13, 0x96, 0x9d, 0x42, 0x5a, 0x2a, 0x02, 0x35, 0xc1, 0xca, 0xb9,
0x90, 0x77, 0x5a, 0x05, 0x64, 0x2f, 0xe9, 0x48, 0xdf, 0x1a, 0x58, 0x0e, 0x61, 0x15, 0xac, 0x0c,
0x88, 0xdf, 0xd7, 0x97, 0x63, 0x01, 0x6b, 0x03, 0xbd, 0x01, 0x1b, 0xaf, 0x63, 0x12, 0x72, 0xd2,
0x15, 0x1e, 0x0b, 0x4f, 0x99, 0xcb, 0x21, 0x04, 0x39, 0xd5, 0xb1, 0x74, 0xae, 0x1a, 0xc3, 0x26,
0xc8, 0xf9, 0xcc, 0xe5, 0xb5, 0xe5, 0xed, 0xec, 0x4e, 0x71, 0xef, 0x51, 0xe3, 0xfe, 0xcb, 0xbd,
0x71, 0xca, 0x5c, 0xac, 0x02, 0xd1, 0xdf, 0xcb, 0x20, 0x7b, 0xca, 0x5c, 0x58, 0x03, 0x6b, 0xc4,
0x71, 0x62, 0xca, 0xb9, 0xd1, 0x4b, 0x4d, 0xb8, 0x05, 0x56, 0x05, 0x8b, 0xbc, 0xae, 0x16, 0x2d,
0x60, 0x63, 0x49, 0xbc, 0x43, 0x04, 0x51, 0xdd, 0xbf, 0x84, 0xd5, 0x18, 0xee, 0x81, 0x92, 0x2a,
0xd6, 0x0e, 0xfb, 0x41, 0x87, 0xc6, 0xaa, 0x89, 0xe7, 0x5a, 0x1b, 0xd7, 0x89, 0x55, 0x54, 0xfe,
0xef, 0x95, 0x1b, 0xdf, 0x34, 0xe0, 0x63, 0xb0, 0x26, 0x86, 0x37, 0x7b, 0xef, 0xe6, 0x75, 0x62,
0x6d, 0x88, 0x59, 0xb1, 0xb2, 0xb5, 0xe2, 0x55, 0x31, 0x3c, 0xd1, 0x05, 0xe6, 0xc5, 0xd0, 0xf6,
0x42, 0x87, 0x0e, 0x55, 0x7b, 0xcd, 0xb5, 0xaa, 0xd7, 0x89, 0x55, 0xb9, 0x11, 0xde, 0x96, 0x73,
0x78, 0x4d, 0x0c, 0xd5, 0x00, 0x3e, 0x06, 0x40, 0x2f, 0x49, 0x11, 0x74, 0x63, 0x5c, 0xbf, 0x4e,
0xac, 0x82, 0xf2, 0x2a, 0xed, 0xd9, 0x10, 0x22, 0xb0, 0xa2, 0xb5, 0xf3, 0x4a, 0xbb, 0x74, 0x9d,
0x58, 0x79, 0x9f, 0xb9, 0x5a, 0x53, 0x4f, 0xc9, 0xad, 0x8a, 0x69, 0xc0, 0x06, 0xd4, 0x51, 0x2d,
0x27, 0x8f, 0x53, 0xb3, 0x75, 0xf8, 0x7e, 0x5c, 0xcf, 0x7c, 0x18, 0xd7, 0x33, 0xff, 0x8d, 0xeb,
0x99, 0xdf, 0xaf, 0xea, 0x4b, 0x1f, 0xae, 0xea, 0x4b, 0xff, 0x5c, 0xd5, 0x97, 0x7e, 0xfc, 0xea,
0xee, 0x77, 0x34, 0xfb, 0x33, 0x1b, 0xaa, 0x7f, 0x33, 0xf5, 0x31, 0x75, 0x56, 0xd5, 0x7f, 0xd6,
0xd7, 0x1f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x98, 0x33, 0x7d, 0xb9, 0x09, 0x00, 0x00,
}
func (m *Params) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Params) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.ExtraEIPs) > 0 {
dAtA2 := make([]byte, len(m.ExtraEIPs)*10)
var j1 int
for _, num1 := range m.ExtraEIPs {
num := uint64(num1)
for num >= 1<<7 {
dAtA2[j1] = uint8(uint64(num)&0x7f | 0x80)
num >>= 7
j1++
}
dAtA2[j1] = uint8(num)
j1++
}
i -= j1
copy(dAtA[i:], dAtA2[:j1])
i = encodeVarintEvm(dAtA, i, uint64(j1))
i--
dAtA[i] = 0x22
}
if m.EnableCall {
i--
if m.EnableCall {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
if m.EnableCreate {
i--
if m.EnableCreate {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x10
}
if len(m.EvmDenom) > 0 {
i -= len(m.EvmDenom)
copy(dAtA[i:], m.EvmDenom)
i = encodeVarintEvm(dAtA, i, uint64(len(m.EvmDenom)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *ChainConfig) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ChainConfig) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ChainConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size := m.EWASMBlock.Size()
i -= size
if _, err := m.EWASMBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x72
{
size := m.YoloV2Block.Size()
i -= size
if _, err := m.YoloV2Block.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x6a
{
size := m.MuirGlacierBlock.Size()
i -= size
if _, err := m.MuirGlacierBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x62
{
size := m.IstanbulBlock.Size()
i -= size
if _, err := m.IstanbulBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x5a
{
size := m.PetersburgBlock.Size()
i -= size
if _, err := m.PetersburgBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x52
{
size := m.ConstantinopleBlock.Size()
i -= size
if _, err := m.ConstantinopleBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x4a
{
size := m.ByzantiumBlock.Size()
i -= size
if _, err := m.ByzantiumBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x42
{
size := m.EIP158Block.Size()
i -= size
if _, err := m.EIP158Block.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x3a
{
size := m.EIP155Block.Size()
i -= size
if _, err := m.EIP155Block.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x32
if len(m.EIP150Hash) > 0 {
i -= len(m.EIP150Hash)
copy(dAtA[i:], m.EIP150Hash)
i = encodeVarintEvm(dAtA, i, uint64(len(m.EIP150Hash)))
i--
dAtA[i] = 0x2a
}
{
size := m.EIP150Block.Size()
i -= size
if _, err := m.EIP150Block.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x22
if m.DAOForkSupport {
i--
if m.DAOForkSupport {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
{
size := m.DAOForkBlock.Size()
i -= size
if _, err := m.DAOForkBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
{
size := m.HomesteadBlock.Size()
i -= size
if _, err := m.HomesteadBlock.MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *State) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *State) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Value) > 0 {
i -= len(m.Value)
copy(dAtA[i:], m.Value)
i = encodeVarintEvm(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x12
}
if len(m.Key) > 0 {
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintEvm(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *TransactionLogs) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TransactionLogs) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *TransactionLogs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Logs) > 0 {
for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintEvm(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Hash) > 0 {
i -= len(m.Hash)
copy(dAtA[i:], m.Hash)
i = encodeVarintEvm(dAtA, i, uint64(len(m.Hash)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Log) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Log) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Log) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Removed {
i--
if m.Removed {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x48
}
if m.Index != 0 {
i = encodeVarintEvm(dAtA, i, uint64(m.Index))
i--
dAtA[i] = 0x40
}
if len(m.BlockHash) > 0 {
i -= len(m.BlockHash)
copy(dAtA[i:], m.BlockHash)
i = encodeVarintEvm(dAtA, i, uint64(len(m.BlockHash)))
i--
dAtA[i] = 0x3a
}
if m.TxIndex != 0 {
i = encodeVarintEvm(dAtA, i, uint64(m.TxIndex))
i--
dAtA[i] = 0x30
}
if len(m.TxHash) > 0 {
i -= len(m.TxHash)
copy(dAtA[i:], m.TxHash)
i = encodeVarintEvm(dAtA, i, uint64(len(m.TxHash)))
i--
dAtA[i] = 0x2a
}
if m.BlockNumber != 0 {
i = encodeVarintEvm(dAtA, i, uint64(m.BlockNumber))
i--
dAtA[i] = 0x20
}
if len(m.Data) > 0 {
i -= len(m.Data)
copy(dAtA[i:], m.Data)
i = encodeVarintEvm(dAtA, i, uint64(len(m.Data)))
i--
dAtA[i] = 0x1a
}
if len(m.Topics) > 0 {
for iNdEx := len(m.Topics) - 1; iNdEx >= 0; iNdEx-- {
i -= len(m.Topics[iNdEx])
copy(dAtA[i:], m.Topics[iNdEx])
i = encodeVarintEvm(dAtA, i, uint64(len(m.Topics[iNdEx])))
i--
dAtA[i] = 0x12
}
}
if len(m.Address) > 0 {
i -= len(m.Address)
copy(dAtA[i:], m.Address)
i = encodeVarintEvm(dAtA, i, uint64(len(m.Address)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintEvm(dAtA []byte, offset int, v uint64) int {
offset -= sovEvm(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Params) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.EvmDenom)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
if m.EnableCreate {
n += 2
}
if m.EnableCall {
n += 2
}
if len(m.ExtraEIPs) > 0 {
l = 0
for _, e := range m.ExtraEIPs {
l += sovEvm(uint64(e))
}
n += 1 + sovEvm(uint64(l)) + l
}
return n
}
func (m *ChainConfig) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.HomesteadBlock.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.DAOForkBlock.Size()
n += 1 + l + sovEvm(uint64(l))
if m.DAOForkSupport {
n += 2
}
l = m.EIP150Block.Size()
n += 1 + l + sovEvm(uint64(l))
l = len(m.EIP150Hash)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
l = m.EIP155Block.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.EIP158Block.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.ByzantiumBlock.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.ConstantinopleBlock.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.PetersburgBlock.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.IstanbulBlock.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.MuirGlacierBlock.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.YoloV2Block.Size()
n += 1 + l + sovEvm(uint64(l))
l = m.EWASMBlock.Size()
n += 1 + l + sovEvm(uint64(l))
return n
}
func (m *State) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
return n
}
func (m *TransactionLogs) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Hash)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
if len(m.Logs) > 0 {
for _, e := range m.Logs {
l = e.Size()
n += 1 + l + sovEvm(uint64(l))
}
}
return n
}
func (m *Log) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Address)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
if len(m.Topics) > 0 {
for _, s := range m.Topics {
l = len(s)
n += 1 + l + sovEvm(uint64(l))
}
}
l = len(m.Data)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
if m.BlockNumber != 0 {
n += 1 + sovEvm(uint64(m.BlockNumber))
}
l = len(m.TxHash)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
if m.TxIndex != 0 {
n += 1 + sovEvm(uint64(m.TxIndex))
}
l = len(m.BlockHash)
if l > 0 {
n += 1 + l + sovEvm(uint64(l))
}
if m.Index != 0 {
n += 1 + sovEvm(uint64(m.Index))
}
if m.Removed {
n += 2
}
return n
}
func sovEvm(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func | (x uint64) (n int) {
return sovEvm(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Params) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Params: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EvmDenom", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EvmDenom = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field EnableCreate", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.EnableCreate = bool(v != 0)
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field EnableCall", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.EnableCall = bool(v != 0)
case 4:
if wireType == 0 {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.ExtraEIPs = append(m.ExtraEIPs, v)
} else if wireType == 2 {
var packedLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
packedLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if packedLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + packedLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var elementCount int
var count int
for _, integer := range dAtA[iNdEx:postIndex] {
if integer < 128 {
count++
}
}
elementCount = count
if elementCount != 0 && len(m.ExtraEIPs) == 0 {
m.ExtraEIPs = make([]int64, 0, elementCount)
}
for iNdEx < postIndex {
var v int64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.ExtraEIPs = append(m.ExtraEIPs, v)
}
} else {
return fmt.Errorf("proto: wrong wireType = %d for field ExtraEIPs", wireType)
}
default:
iNdEx = preIndex
skippy, err := skipEvm(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthEvm
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ChainConfig) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ChainConfig: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ChainConfig: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field HomesteadBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.HomesteadBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DAOForkBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.DAOForkBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field DAOForkSupport", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.DAOForkSupport = bool(v != 0)
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EIP150Block", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.EIP150Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EIP150Hash", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.EIP150Hash = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EIP155Block", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.EIP155Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EIP158Block", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.EIP158Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ByzantiumBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ByzantiumBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ConstantinopleBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.ConstantinopleBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PetersburgBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.PetersburgBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IstanbulBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.IstanbulBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 12:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MuirGlacierBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.MuirGlacierBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 13:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field YoloV2Block", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.YoloV2Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 14:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field EWASMBlock", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.EWASMBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEvm(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthEvm
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *State) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: State: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: State: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEvm(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthEvm
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *TransactionLogs) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TransactionLogs: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TransactionLogs: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Hash = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Logs = append(m.Logs, &Log{})
if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipEvm(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthEvm
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Log) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Log: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Address = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Topics", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Topics = append(m.Topics, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BlockNumber", wireType)
}
m.BlockNumber = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BlockNumber |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TxHash", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TxHash = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TxIndex", wireType)
}
m.TxIndex = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TxIndex |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field BlockHash", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthEvm
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthEvm
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.BlockHash = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
}
m.Index = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Index |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowEvm
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Removed = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipEvm(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthEvm
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipEvm(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEvm
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEvm
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowEvm
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthEvm
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupEvm
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthEvm
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthEvm = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowEvm = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupEvm = fmt.Errorf("proto: unexpected end of group")
)
| sozEvm |
images.go | /*
Package images describes the publishing scheme for Sourcegraph images.
It is published as a standalone module to enable tooling in other repositories to more
easily use these definitions.
*/
package images
import (
"fmt"
)
const (
// SourcegraphDockerDevRegistry is a private registry for dev images, and requires authentication to pull from.
SourcegraphDockerDevRegistry = "us.gcr.io/sourcegraph-dev"
// SourcegraphDockerPublishRegistry is a public registry for final images, and does not require authentication to pull from.
SourcegraphDockerPublishRegistry = "index.docker.io/sourcegraph"
)
// DevRegistryImage returns the name of the image for the given app and tag on the
// private dev registry.
func DevRegistryImage(app, tag string) string {
root := fmt.Sprintf("%s/%s", SourcegraphDockerDevRegistry, app)
return maybeTaggedImage(root, tag)
}
// PublishedRegistryImage returns the name of the image for the given app and tag on the
// publish registry.
func PublishedRegistryImage(app, tag string) string {
root := fmt.Sprintf("%s/%s", SourcegraphDockerPublishRegistry, app)
return maybeTaggedImage(root, tag)
}
func maybeTaggedImage(rootImage, tag string) string {
if tag != "" {
return fmt.Sprintf("%s:%s", rootImage, tag)
}
return rootImage
}
// SourcegraphDockerImages denotes all Docker images that are published by Sourcegraph.
//
// In general:
//
// - dev images (candidates - see `candidateImageTag`) are published to `SourcegraphDockerDevRegistry`
// - final images (releases, `insiders`) are published to `SourcegraphDockerPublishRegistry`
// - app must be a legal Docker image name (e.g. no `/`)
//
// The `addDockerImages` pipeline step determines what images are built and published.
var SourcegraphDockerImages = append(DeploySourcegraphDockerImages,
"server")
// DeploySourcegraphDockerImages denotes all Docker images that are included in a typical
// deploy-sourcegraph installation. | "alpine-3.14",
"cadvisor",
"codeinsights-db",
"codeintel-db",
"frontend",
"github-proxy",
"gitserver",
"grafana",
"indexed-searcher",
"jaeger-agent",
"jaeger-all-in-one",
"minio",
"postgres-12.6-alpine",
"postgres_exporter",
"precise-code-intel-worker",
"prometheus",
"redis-cache",
"redis-store",
"redis_exporter",
"repo-updater",
"search-indexer",
"searcher",
"symbols",
"syntax-highlighter",
"worker",
"migrator",
}
// CandidateImageTag provides the tag for a candidate image built for this Buildkite run.
//
// Note that the availability of this image depends on whether a candidate gets built,
// as determined in `addDockerImages()`.
func CandidateImageTag(commit, buildNumber string) string {
return fmt.Sprintf("%s_%s_candidate", commit, buildNumber)
} | //
// Used to cross check images in the deploy-sourcegraph repo. If you are adding or removing an image to https://github.com/sourcegraph/deploy-sourcegraph
// it must also be added to this list.
var DeploySourcegraphDockerImages = []string{ |
dimacs_parser.rs | use crate::unicorn::bitblasting::{Gate, GateModel, GateRef};
use crate::unicorn::{Node, NodeType};
use anyhow::{Context, Result};
use regex::Regex;
use std::cell::RefCell;
use std::collections::HashMap;
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
use std::path::Path;
use std::rc::Rc;
use std::str::FromStr;
//
// Public Interface
//
pub fn load_dimacs_as_gatemodel(path: &Path) -> Result<GateModel> {
let mut parser = DimacsParser::new();
parser.parse_dimacs_text_file(path)?;
Ok(parser.into_model())
}
//
// Private Implementation
//
fn input_gate(i: usize) -> GateRef {
let name = format!("b{}", i);
GateRef::from(Gate::InputBit { name })
}
fn not_gate(a: GateRef) -> GateRef {
GateRef::from(Gate::Not { value: a })
}
fn or_gate(a: GateRef, b: GateRef) -> GateRef {
GateRef::from(Gate::Or { left: a, right: b })
}
fn and_gate(a: GateRef, b: GateRef) -> GateRef {
GateRef::from(Gate::And { left: a, right: b })
}
struct DimacsParser {
num_variables: usize,
num_clauses: usize,
gate_variables: Vec<GateRef>,
gate_negations: Vec<GateRef>,
gate_clauses: Vec<GateRef>,
}
impl DimacsParser {
fn new() -> Self {
Self {
num_variables: 0,
num_clauses: 0,
gate_variables: Vec::new(),
gate_negations: Vec::new(),
gate_clauses: Vec::new(),
}
}
fn initialize_variables(&mut self) {
assert!(self.gate_variables.is_empty());
assert!(self.gate_negations.is_empty());
for i in 0..self.num_variables {
let gate_var = input_gate(i);
let gate_neg = not_gate(gate_var.clone());
self.gate_variables.push(gate_var);
self.gate_negations.push(gate_neg);
}
}
fn literal_to_gate(&self, literal: i32) -> GateRef {
if literal < 0 {
self.gate_negations[i32::abs(literal) as usize - 1].clone()
} else {
self.gate_variables[literal as usize - 1].clone()
}
}
fn add_clause(&mut self, literals: Vec<i32>) {
let gate_literals = literals.iter().map(|l| self.literal_to_gate(*l));
let gate = gate_literals.reduce(or_gate).unwrap();
self.gate_clauses.push(gate);
}
fn into_model(self) -> GateModel {
let gate = self.gate_clauses.into_iter().reduce(and_gate).unwrap();
// TODO: The fact that we are requiring a node here just to communicate
// random Nid values to Qubot is a bit of a hack. Fix this!
let node = Rc::new(RefCell::new(Node::Input {
nid: 99999999,
sort: NodeType::Bit,
name: "from-dimacs-cnf".to_string(),
}));
GateModel {
bad_state_gates: vec![gate],
bad_state_nodes: vec![node],
constraints: HashMap::new(),
input_gates: Vec::new(),
mapping: HashMap::new(),
mapping_adders: HashMap::new(),
constraint_based_dependencies: HashMap::new(),
}
}
fn | (&mut self, path: &Path) -> Result<()> {
let re_magic: Regex = Regex::new(r"^p cnf ([0-9]+) ([0-9]+)$").unwrap();
let re_clause: Regex = Regex::new(r"^((-?[1-9][0-9]* )+)0$").unwrap();
let mut has_seen_magic_line = false;
let file = File::open(path)?;
let reader = BufReader::new(file);
for line in reader.lines() {
let line = line.unwrap();
// Skip all comment lines.
if line.starts_with("c ") {
continue;
}
// Recognize CNF magic line.
if let Some(caps) = re_magic.captures(&line) {
assert!(!has_seen_magic_line);
let num_variables = caps.get(1).context("missing #variables")?;
let num_clauses = caps.get(2).context("missing #clauses")?;
self.num_variables = usize::from_str(num_variables.as_str())?;
self.num_clauses = usize::from_str(num_clauses.as_str())?;
self.initialize_variables();
has_seen_magic_line = true;
continue;
}
// Recognize a clause line.
if let Some(caps) = re_clause.captures(&line) {
assert!(has_seen_magic_line);
let clause = caps.get(1).context("missing literals")?.as_str().trim();
self.add_clause(
clause
.split(' ')
.map(|l| i32::from_str(l).expect("range"))
.collect(),
);
continue;
}
panic!("Unrecognized line: {}", line);
}
assert!(self.gate_variables.len() == self.num_variables);
assert!(self.gate_negations.len() == self.num_variables);
assert!(self.gate_clauses.len() == self.num_clauses);
Ok(())
}
}
| parse_dimacs_text_file |
root.tsx | import {
Links,
LiveReload,
Meta,
Outlet,
Scripts,
ScrollRestoration
} from "remix";
import globalStylesUrl from "~/styles/global.css";
export function links() {
return [{ rel: "stylesheet", href: globalStylesUrl }];
}
export default function | () {
return (
<html lang="en">
<head>
<meta charSet="utf-8" />
<meta name="viewport" content="width=device-width,initial-scale=1" />
<Meta />
<Links />
</head>
<body>
<Outlet />
<ScrollRestoration />
<Scripts />
{process.env.NODE_ENV === "development" && <LiveReload />}
</body>
</html>
);
}
| App |
cmpvupd4.rs | #[doc = "Writer for register CMPVUPD4"]
pub type W = crate::W<u32, super::CMPVUPD4>;
#[doc = "Write proxy for field `CVUPD`"]
pub struct CVUPD_W<'a> {
w: &'a mut W,
}
impl<'a> CVUPD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0x00ff_ffff) | ((value as u32) & 0x00ff_ffff);
self.w
}
}
#[doc = "Write proxy for field `CVMUPD`"]
pub struct CVMUPD_W<'a> { | }
impl<'a> CVMUPD_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 24)) | (((value as u32) & 0x01) << 24);
self.w
}
}
impl W {
#[doc = "Bits 0:23 - Comparison x Value Update"]
#[inline(always)]
pub fn cvupd(&mut self) -> CVUPD_W {
CVUPD_W { w: self }
}
#[doc = "Bit 24 - Comparison x Value Mode Update"]
#[inline(always)]
pub fn cvmupd(&mut self) -> CVMUPD_W {
CVMUPD_W { w: self }
}
} | w: &'a mut W, |
huya.py | import base64
import logging
import re
from html import unescape as html_unescape
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink.utils.parse import parse_json
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://(?:www\.)?huya\.com/(?P<channel>[^/]+)'
))
class Huya(Plugin):
_re_stream = re.compile(r'"stream"\s?:\s?"([^"]+)"')
_schema_data = validate.Schema(
{
# 'status': int,
# 'msg': validate.any(None, str),
'data': [{
'gameStreamInfoList': [{
'sCdnType': str,
'sStreamName': str,
'sFlvUrl': str,
'sFlvUrlSuffix': str,
'sFlvAntiCode': validate.all(str, validate.transform(lambda v: html_unescape(v))),
# 'sHlsUrl': str,
# 'sHlsUrlSuffix': str,
# 'sHlsAntiCode': validate.all(str, validate.transform(lambda v: html_unescape(v))),
validate.optional('iIsMultiStream'): int,
'iPCPriorityRate': int,
}]
}],
# 'vMultiStreamInfo': [{
# 'sDisplayName': str,
# 'iBitRate': int,
# }],
},
validate.get('data'),
validate.get(0),
validate.get('gameStreamInfoList'),
)
QUALITY_WEIGHTS = {}
@classmethod
def | (cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, 'huya'
return Plugin.stream_weight(key)
def _get_streams(self):
res = self.session.http.get(self.url)
data = self._re_stream.search(res.text)
if not data:
return
data = parse_json(base64.b64decode(data.group(1)), schema=self._schema_data)
for info in data:
log.trace(f'{info!r}')
flv_url = f'{info["sFlvUrl"]}/{info["sStreamName"]}.{info["sFlvUrlSuffix"]}?{info["sFlvAntiCode"]}'
name = f'source_{info["sCdnType"].lower()}'
self.QUALITY_WEIGHTS[name] = info['iPCPriorityRate']
yield name, HTTPStream(self.session, flv_url)
log.debug(f'QUALITY_WEIGHTS: {self.QUALITY_WEIGHTS!r}')
__plugin__ = Huya
| stream_weight |
config.go | // Package config provides helper functions for reading a
// JSON formatted configuration file into an arbitrary struct
package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"strings"
"github.com/mtibben/gocase"
)
// Load reads the JSON-encoded file and marshalls
// the contents into the value pointed at by v.
// Panics if unsuccessful
func Load(filename string, v interface{}) {
Parse(read(filename), v)
}
func read(filename string) []byte {
data, err := ioutil.ReadFile(filename)
if err != nil {
panic(err.Error())
}
return data
}
// Parse parses the JSON-encoded data and stores the result
// into the value pointed at by c.
// Panics if unsuccessful
func | (jsondata []byte, v interface{}) {
err := json.Unmarshal(jsondata, v)
if err != nil {
panic(err.Error())
}
}
type MarshalledEnvironmentVar struct {
EnvKey string
EnvVal string
StructFieldName string
Error error
}
// LoadFromEnv iterates through a
// struct's fields and tries to find matching
// environment variables.
// Returns a map of environment key and values that were
// successfully set into the struct
func LoadFromEnv(v interface{}, prefix string) (result []MarshalledEnvironmentVar) {
pointerValue := reflect.ValueOf(v)
structValue := pointerValue.Elem()
structType := structValue.Type()
for i := 0; i < structValue.NumField(); i++ {
structField := structType.Field(i)
fieldValue := structValue.Field(i)
if fieldValue.CanSet() {
envKey := strings.ToUpper(prefix) + gocase.ToUpperSnake(structField.Name)
envVal := os.Getenv(envKey)
if envVal != "" {
// create a json blob with the env data
jsonStr := ""
if fieldValue.Kind() == reflect.String {
jsonStr = fmt.Sprintf(`{"%s": "%s"}`, structField.Name, envVal)
} else {
jsonStr = fmt.Sprintf(`{"%s": %s}`, structField.Name, envVal)
}
err := json.Unmarshal([]byte(jsonStr), v)
result = append(result, MarshalledEnvironmentVar{envKey, envVal, structField.Name, err})
}
}
}
return
}
| Parse |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.