file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
autogate_s2_trainer_callback.py | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""AutoGate top-k version Stage2 TrainerCallback."""
import logging
import pandas as pd
from vega.common import ClassFactory, ClassType
from vega.common import FileOps
from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback
from vega.core.pipeline.conf import ModelConfig
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AutoGateS2TrainerCallback(CtrTrainerCallback):
"""AutoGateS2TrainerCallback module."""
def __init__(self):
"""Construct AutoGateS2TrainerCallback class."""
super(CtrTrainerCallback, self).__init__()
self.sieve_board = pd.DataFrame(
columns=['selected_feature_pairs', 'score'])
self.selected_pairs = list()
logging.info("init autogate s2 trainer callback")
def | (self, logs=None):
"""Call before_train of the managed callbacks."""
super().before_train(logs)
"""Be called before the training process."""
hpo_result = FileOps.load_pickle(FileOps.join_path(
self.trainer.local_output_path, 'best_config.pickle'))
logging.info("loading stage1_hpo_result \n{}".format(hpo_result))
feature_interaction_score = hpo_result['feature_interaction_score']
print('feature_interaction_score:', feature_interaction_score)
sorted_pairs = sorted(feature_interaction_score.items(),
key=lambda x: abs(x[1]), reverse=True)
if ModelConfig.model_desc:
fis_ratio = ModelConfig.model_desc["custom"]["fis_ratio"]
else:
fis_ratio = 1.0
top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))
self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))
# add selected_pairs
setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)
def after_train(self, logs=None):
"""Call after_train of the managed callbacks."""
curr_auc = float(self.trainer.valid_metrics.results['auc'])
self.sieve_board = self.sieve_board.append(
{
'selected_feature_pairs': self.selected_pairs,
'score': curr_auc
}, ignore_index=True)
result_file = FileOps.join_path(
self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))
self.sieve_board.to_csv(result_file, sep='\t')
| before_train |
setflag.go | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parserprovider
import (
"bytes"
"fmt"
"strings"
"github.com/spf13/viper"
"go.opentelemetry.io/collector/config/configparser"
)
const setFlagFileType = "properties"
type setFlagProvider struct {
base ParserProvider
}
// NewSetFlag returns a config.ParserProvider, that wraps a "base" config.ParserProvider, then
// overrides properties from set flag(s) in the loaded Parser.
//
// The implementation reads set flag(s) from the cmd and concatenates them as a "properties" file.
// Then the properties file is read and properties are set to the loaded Parser.
func NewSetFlag(base ParserProvider) ParserProvider |
func (sfl *setFlagProvider) Get() (*configparser.Parser, error) {
flagProperties := getSetFlag()
if len(flagProperties) == 0 {
return sfl.base.Get()
}
b := &bytes.Buffer{}
for _, property := range flagProperties {
property = strings.TrimSpace(property)
if _, err := fmt.Fprintf(b, "%s\n", property); err != nil {
return nil, err
}
}
viperFlags := viper.NewWithOptions(viper.KeyDelimiter(configparser.KeyDelimiter))
viperFlags.SetConfigType(setFlagFileType)
if err := viperFlags.ReadConfig(b); err != nil {
return nil, fmt.Errorf("failed to read set flag config: %v", err)
}
cp, err := sfl.base.Get()
if err != nil {
return nil, err
}
// Viper implementation of v.MergeConfig(io.Reader) or v.MergeConfigMap(map[string]interface)
// does not work properly. This is b/c if it attempts to merge into a nil object it will fail here
// https://github.com/spf13/viper/blob/3826be313591f83193f048520482a7b3cf17d506/viper.go#L1709
// The workaround is to call v.Set(string, interface) on all root properties from the config file
// this will correctly preserve the original config and set them up for viper to overlay them with
// the --set params. It should also be noted that setting the root keys is important. This is
// b/c the viper .AllKeys() method does not return empty objects.
// For instance with the following yaml structure:
// a:
// b:
// c: {}
//
// viper.AllKeys() would only return a.b, but not a.c. However otel expects {} to behave
// the same as nil object in its config file. Therefore we extract and set the root keys only
// to catch both a.b and a.c.
rootKeys := map[string]struct{}{}
for _, k := range viperFlags.AllKeys() {
keys := strings.Split(k, configparser.KeyDelimiter)
if len(keys) > 0 {
rootKeys[keys[0]] = struct{}{}
}
}
for k := range rootKeys {
cp.Set(k, cp.Get(k))
}
// now that we've copied the config into the viper "overrides" copy the --set flags
// as well
for _, k := range viperFlags.AllKeys() {
cp.Set(k, viperFlags.Get(k))
}
return cp, nil
}
| {
return &setFlagProvider{
base: base,
}
} |
cachestate.go | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package datareq
import (
"fmt"
"time"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
"github.com/apache/trafficcontrol/traffic_monitor/cache"
"github.com/apache/trafficcontrol/traffic_monitor/ds"
"github.com/apache/trafficcontrol/traffic_monitor/dsdata"
"github.com/apache/trafficcontrol/traffic_monitor/peer"
"github.com/apache/trafficcontrol/traffic_monitor/threadsafe"
"github.com/apache/trafficcontrol/traffic_monitor/todata"
"github.com/json-iterator/go"
)
// CacheStatus contains summary stat data about the given cache.
// TODO make fields nullable, so error fields can be omitted, letting API callers still get updates for unerrored fields
type CacheStatus struct {
Type *string `json:"type,omitempty"`
Status *string `json:"status,omitempty"`
StatusPoller *string `json:"status_poller,omitempty"`
LoadAverage *float64 `json:"load_average,omitempty"`
// QueryTimeMilliseconds is the time it took this app to perform a complete query and process the data, end-to-end, for the latest health query.
QueryTimeMilliseconds *int64 `json:"query_time_ms,omitempty"`
// HealthTimeMilliseconds is the time it took to make the HTTP request and get back the full response, for the latest health query.
HealthTimeMilliseconds *int64 `json:"health_time_ms,omitempty"`
// StatTimeMilliseconds is the time it took to make the HTTP request and get back the full response, for the latest stat query.
StatTimeMilliseconds *int64 `json:"stat_time_ms,omitempty"`
// StatSpanMilliseconds is the length of time between completing the most recent two stat queries. This can be used as a rough gauge of the end-to-end query processing time.
StatSpanMilliseconds *int64 `json:"stat_span_ms,omitempty"`
// HealthSpanMilliseconds is the length of time between completing the most recent two health queries. This can be used as a rough gauge of the end-to-end query processing time.
HealthSpanMilliseconds *int64 `json:"health_span_ms,omitempty"`
BandwidthKbps *float64 `json:"bandwidth_kbps,omitempty"`
BandwidthCapacityKbps *float64 `json:"bandwidth_capacity_kbps,omitempty"`
ConnectionCount *int64 `json:"connection_count,omitempty"`
}
func srvAPICacheStates(
toData todata.TODataThreadsafe,
statInfoHistory threadsafe.ResultInfoHistory,
statResultHistory threadsafe.ResultStatHistory,
healthHistory threadsafe.ResultHistory,
lastHealthDurations threadsafe.DurationMap,
localStates peer.CRStatesThreadsafe,
lastStats threadsafe.LastStats,
localCacheStatus threadsafe.CacheAvailableStatus,
statMaxKbpses threadsafe.CacheKbpses,
monitorConfig threadsafe.TrafficMonitorConfigMap,
) ([]byte, error) {
json := jsoniter.ConfigFastest
return json.Marshal(createCacheStatuses(toData.Get().ServerTypes, statInfoHistory.Get(), statResultHistory, healthHistory.Get(), lastHealthDurations.Get(), localStates.Get().Caches, lastStats.Get(), localCacheStatus, statMaxKbpses, monitorConfig.Get().TrafficServer))
}
func createCacheStatuses(
cacheTypes map[tc.CacheName]tc.CacheType,
statInfoHistory cache.ResultInfoHistory,
statResultHistory threadsafe.ResultStatHistory,
healthHistory map[tc.CacheName][]cache.Result,
lastHealthDurations map[tc.CacheName]time.Duration,
cacheStates map[tc.CacheName]tc.IsAvailable,
lastStats dsdata.LastStats,
localCacheStatusThreadsafe threadsafe.CacheAvailableStatus,
statMaxKbpses threadsafe.CacheKbpses,
servers map[string]tc.TrafficServer,
) map[tc.CacheName]CacheStatus {
conns := createCacheConnections(statResultHistory)
statii := map[tc.CacheName]CacheStatus{}
localCacheStatus := localCacheStatusThreadsafe.Get().Copy() // TODO test whether copy is necessary
maxKbpses := statMaxKbpses.Get()
for cacheNameStr, serverInfo := range servers {
cacheName := tc.CacheName(cacheNameStr)
status, statusPoller := cacheStatusAndPoller(cacheName, serverInfo, localCacheStatus)
cacheTypeStr := ""
if cacheType, ok := cacheTypes[cacheName]; !ok {
log.Infof("Error getting cache type for %v: not in types\n", cacheName)
} else {
cacheTypeStr = string(cacheType)
}
loadAverage := 0.0
if infoHistory, ok := statInfoHistory[cacheName]; !ok {
log.Infof("createCacheStatuses stat info history missing cache %s\n", cacheName)
} else if len(infoHistory) < 1 {
log.Infof("createCacheStatuses stat info history empty for cache %s\n", cacheName)
} else {
loadAverage = infoHistory[0].Vitals.LoadAvg
}
healthQueryTime, err := latestQueryTimeMS(cacheName, lastHealthDurations)
if err != nil {
log.Infof("Error getting cache %v health query time: %v\n", cacheName, err)
}
statTime, err := latestResultInfoTimeMS(cacheName, statInfoHistory)
if err != nil {
log.Infof("Error getting cache %v stat result time: %v\n", cacheName, err)
}
healthTime, err := latestResultTimeMS(cacheName, healthHistory)
if err != nil {
log.Infof("Error getting cache %v health result time: %v\n", cacheName, err)
}
statSpan, err := infoResultSpanMS(cacheName, statInfoHistory)
if err != nil {
log.Infof("Error getting cache %v stat span: %v\n", cacheName, err)
}
healthSpan, err := resultSpanMS(cacheName, healthHistory)
if err != nil {
log.Infof("Error getting cache %v health span: %v\n", cacheName, err)
}
var kbps *float64
if lastStat, ok := lastStats.Caches[cacheName]; !ok {
log.Infof("cache not in last kbps cache %s\n", cacheName)
} else {
kbpsVal := lastStat.Bytes.PerSec / float64(ds.BytesPerKilobit)
kbps = &kbpsVal
}
var maxKbps *float64
if v, ok := maxKbpses[cacheName]; !ok {
log.Infof("cache not in max kbps cache %s\n", cacheName)
} else {
fv := float64(v)
maxKbps = &fv
}
var connections *int64
connectionsVal, ok := conns[cacheName]
if !ok {
log.Infof("cache not in connections %s\n", cacheName)
} else {
connections = &connectionsVal
}
statii[cacheName] = CacheStatus{
Type: &cacheTypeStr,
LoadAverage: &loadAverage,
QueryTimeMilliseconds: &healthQueryTime,
StatTimeMilliseconds: &statTime,
HealthTimeMilliseconds: &healthTime,
StatSpanMilliseconds: &statSpan,
HealthSpanMilliseconds: &healthSpan,
BandwidthKbps: kbps,
BandwidthCapacityKbps: maxKbps,
ConnectionCount: connections,
Status: &status,
StatusPoller: &statusPoller,
}
}
return statii
}
func cacheStatusAndPoller(server tc.CacheName, serverInfo tc.TrafficServer, localCacheStatus cache.AvailableStatuses) (string, string) {
switch status := tc.CacheStatusFromString(serverInfo.ServerStatus); status {
case tc.CacheStatusAdminDown:
fallthrough
case tc.CacheStatusOnline:
fallthrough
case tc.CacheStatusOffline:
return status.String(), ""
}
statusVal, ok := localCacheStatus[server]
if !ok {
log.Infof("cache not in statuses %s\n", server)
return "ERROR - not in statuses", ""
}
if statusVal.Why != "" {
return statusVal.Why, statusVal.Poller
}
if statusVal.Available {
return fmt.Sprintf("%s - available", statusVal.Status), statusVal.Poller
}
return fmt.Sprintf("%s - unavailable", statusVal.Status), statusVal.Poller
}
func createCacheConnections(statResultHistory threadsafe.ResultStatHistory) map[tc.CacheName]int64 {
conns := map[tc.CacheName]int64{}
statResultHistory.Range(func(server tc.CacheName, history threadsafe.ResultStatValHistory) bool {
// for server, history := range statResultHistory {
vals := history.Load("proxy.process.http.current_client_connections")
if len(vals) == 0 {
return true
}
v, ok := vals[0].Val.(float64)
if !ok {
return true // TODO log warning? error?
}
conns[server] = int64(v)
return true
})
return conns
}
// infoResultSpanMS returns the length of time between the most recent two results. That is, how long could the cache have been down before we would have noticed it? Note this returns the time between the most recent two results, irrespective if they errored.
// Note this is unrelated to the Stat Span field.
func infoResultSpanMS(cacheName tc.CacheName, history cache.ResultInfoHistory) (int64, error) {
results, ok := history[cacheName]
if !ok {
return 0, fmt.Errorf("cache %v has no history", cacheName)
}
if len(results) == 0 {
return 0, fmt.Errorf("cache %v history empty", cacheName)
}
if len(results) < 2 {
return 0, fmt.Errorf("cache %v history only has one result, can't compute span between results", cacheName)
}
latestResult := results[0]
penultimateResult := results[1]
span := latestResult.Time.Sub(penultimateResult.Time)
return int64(span / time.Millisecond), nil
}
// resultSpanMS returns the length of time between the most recent two results. That is, how long could the cache have been down before we would have noticed it? Note this returns the time between the most recent two results, irrespective if they errored.
// Note this is unrelated to the Stat Span field.
func resultSpanMS(cacheName tc.CacheName, history map[tc.CacheName][]cache.Result) (int64, error) {
results, ok := history[cacheName]
if !ok {
return 0, fmt.Errorf("cache %v has no history", cacheName)
}
if len(results) == 0 {
return 0, fmt.Errorf("cache %v history empty", cacheName)
}
if len(results) < 2 {
return 0, fmt.Errorf("cache %v history only has one result, can't compute span between results", cacheName)
}
latestResult := results[0]
penultimateResult := results[1]
span := latestResult.Time.Sub(penultimateResult.Time)
return int64(span / time.Millisecond), nil
}
func latestQueryTimeMS(cacheName tc.CacheName, lastDurations map[tc.CacheName]time.Duration) (int64, error) {
queryTime, ok := lastDurations[cacheName]
if !ok {
return 0, fmt.Errorf("cache %v not in last durations\n", cacheName)
}
return int64(queryTime / time.Millisecond), nil
}
// latestResultTimeMS returns the length of time in milliseconds that it took to request the most recent non-errored result.
func | (cacheName tc.CacheName, history map[tc.CacheName][]cache.Result) (int64, error) {
results, ok := history[cacheName]
if !ok {
return 0, fmt.Errorf("cache %v has no history", cacheName)
}
if len(results) == 0 {
return 0, fmt.Errorf("cache %v history empty", cacheName)
}
result := cache.Result{}
foundResult := false
for _, r := range results {
if r.Error == nil {
result = r
foundResult = true
break
}
}
if !foundResult {
return 0, fmt.Errorf("cache %v No unerrored result", cacheName)
}
return int64(result.RequestTime / time.Millisecond), nil
}
// latestResultInfoTimeMS returns the length of time in milliseconds that it took to request the most recent non-errored result info.
func latestResultInfoTimeMS(cacheName tc.CacheName, history cache.ResultInfoHistory) (int64, error) {
results, ok := history[cacheName]
if !ok {
return 0, fmt.Errorf("cache %v has no history", cacheName)
}
if len(results) == 0 {
return 0, fmt.Errorf("cache %v history empty", cacheName)
}
result := cache.ResultInfo{}
foundResult := false
for _, r := range results {
if r.Error == nil {
result = r
foundResult = true
break
}
}
if !foundResult {
return 0, fmt.Errorf("cache %v No unerrored result", cacheName)
}
return int64(result.RequestTime / time.Millisecond), nil
}
| latestResultTimeMS |
lib.rs | pub mod i18n;
pub mod student; | pub mod banner;
pub mod gacha; |
|
Point.ts | export class Point
{
constructor( x:number, y:number )
{
this.x = x;
this.y = y;
}
public getX():number
{
return this.x;
}
public getY():number
{
return this.y;
}
public setX( x:number )
{
this.x = x;
}
public setY( y:number)
{
this.y = y;
}
// X Coordinate
private x:number;
// Y Coordinate
private y:number; | } |
|
pool.go | // MIT License
// Copyright (c) 2018 Andy Pan
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package ants
import (
"sync"
"sync/atomic"
"time"
"github.com/panjf2000/ants/v2/internal"
)
// Pool accepts the tasks from client, it limits the total of goroutines to a given number by recycling goroutines.
type Pool struct {
// capacity of the pool, a negative value means that the capacity of pool is limitless, an infinite pool is used to
// avoid potential issue of endless blocking caused by nested usage of a pool: submitting a task to pool
// which submits a new task to the same pool.
capacity int32
// running is the number of the currently running goroutines.
running int32
// lock for protecting the worker queue.
lock sync.Locker
// workers is a slice that store the available workers.
workers workerArray
// state is used to notice the pool to closed itself.
state int32
// cond for waiting to get a idle worker.
cond *sync.Cond
// workerCache speeds up the obtainment of a usable worker in function:retrieveWorker.
workerCache sync.Pool
// blockingNum is the number of the goroutines already been blocked on pool.Submit, protected by pool.lock
blockingNum int
options *Options
}
// purgePeriodically clears expired workers periodically which runs in an individual goroutine, as a scavenger.
func (p *Pool) purgePeriodically() {
heartbeat := time.NewTicker(p.options.ExpiryDuration)
defer heartbeat.Stop()
for range heartbeat.C {
if p.IsClosed() {
break
}
p.lock.Lock()
expiredWorkers := p.workers.retrieveExpiry(p.options.ExpiryDuration)
p.lock.Unlock()
// Notify obsolete workers to stop.
// This notification must be outside the p.lock, since w.task
// may be blocking and may consume a lot of time if many workers
// are located on non-local CPUs.
for i := range expiredWorkers {
expiredWorkers[i].task <- nil
expiredWorkers[i] = nil
}
// There might be a situation that all workers have been cleaned up(no any worker is running)
// while some invokers still get stuck in "p.cond.Wait()",
// then it ought to wake all those invokers.
if p.Running() == 0 {
p.cond.Broadcast()
}
}
}
// NewPool generates an instance of ants pool.
func NewPool(size int, options ...Option) (*Pool, error) |
// ---------------------------------------------------------------------------
// Submit submits a task to this pool.
func (p *Pool) Submit(task func()) error {
if p.IsClosed() {
return ErrPoolClosed
}
var w *goWorker
if w = p.retrieveWorker(); w == nil {
return ErrPoolOverload
}
w.task <- task
return nil
}
// Running returns the number of the currently running goroutines.
func (p *Pool) Running() int {
return int(atomic.LoadInt32(&p.running))
}
// Free returns the available goroutines to work, -1 indicates this pool is unlimited.
func (p *Pool) Free() int {
c := p.Cap()
if c < 0 {
return -1
}
return c - p.Running()
}
// Cap returns the capacity of this pool.
func (p *Pool) Cap() int {
return int(atomic.LoadInt32(&p.capacity))
}
// Tune changes the capacity of this pool, note that it is noneffective to the infinite or pre-allocation pool.
func (p *Pool) Tune(size int) {
if capacity := p.Cap(); capacity == -1 || size <= 0 || size == capacity || p.options.PreAlloc {
return
}
atomic.StoreInt32(&p.capacity, int32(size))
}
// IsClosed indicates whether the pool is closed.
func (p *Pool) IsClosed() bool {
return atomic.LoadInt32(&p.state) == CLOSED
}
// Release closes this pool and releases the worker queue.
func (p *Pool) Release() {
atomic.StoreInt32(&p.state, CLOSED)
p.lock.Lock()
p.workers.reset()
p.lock.Unlock()
// There might be some callers waiting in retrieveWorker(), so we need to wake them up to prevent
// those callers blocking infinitely.
p.cond.Broadcast()
}
// Reboot reboots a closed pool.
func (p *Pool) Reboot() {
if atomic.CompareAndSwapInt32(&p.state, CLOSED, OPENED) {
go p.purgePeriodically()
}
}
// ---------------------------------------------------------------------------
// incRunning increases the number of the currently running goroutines.
func (p *Pool) incRunning() {
atomic.AddInt32(&p.running, 1)
}
// decRunning decreases the number of the currently running goroutines.
func (p *Pool) decRunning() {
atomic.AddInt32(&p.running, -1)
}
// retrieveWorker returns an available worker to run the tasks.
func (p *Pool) retrieveWorker() (w *goWorker) {
spawnWorker := func() {
w = p.workerCache.Get().(*goWorker)
w.run()
}
p.lock.Lock()
w = p.workers.detach()
if w != nil { // first try to fetch the worker from the queue
p.lock.Unlock()
} else if capacity := p.Cap(); capacity == -1 || capacity > p.Running() {
// if the worker queue is empty and we don't run out of the pool capacity,
// then just spawn a new worker goroutine.
p.lock.Unlock()
spawnWorker()
} else { // otherwise, we'll have to keep them blocked and wait for at least one worker to be put back into pool.
if p.options.Nonblocking {
p.lock.Unlock()
return
}
retry:
if p.options.MaxBlockingTasks != 0 && p.blockingNum >= p.options.MaxBlockingTasks {
p.lock.Unlock()
return
}
p.blockingNum++
p.cond.Wait() // block and wait for an available worker
p.blockingNum--
var nw int
if nw = p.Running(); nw == 0 { // awakened by the scavenger
p.lock.Unlock()
if !p.IsClosed() {
spawnWorker()
}
return
}
if w = p.workers.detach(); w == nil {
if nw < capacity {
p.lock.Unlock()
spawnWorker()
return
}
goto retry
}
p.lock.Unlock()
}
return
}
// revertWorker puts a worker back into free pool, recycling the goroutines.
func (p *Pool) revertWorker(worker *goWorker) bool {
if capacity := p.Cap(); (capacity > 0 && p.Running() > capacity) || p.IsClosed() {
return false
}
worker.recycleTime = time.Now()
p.lock.Lock()
// To avoid memory leaks, add a double check in the lock scope.
// Issue: https://github.com/panjf2000/ants/issues/113
if p.IsClosed() {
p.lock.Unlock()
return false
}
err := p.workers.insert(worker)
if err != nil {
p.lock.Unlock()
return false
}
// Notify the invoker stuck in 'retrieveWorker()' of there is an available worker in the worker queue.
p.cond.Signal()
p.lock.Unlock()
return true
}
| {
opts := loadOptions(options...)
if size <= 0 {
size = -1
}
if expiry := opts.ExpiryDuration; expiry < 0 {
return nil, ErrInvalidPoolExpiry
} else if expiry == 0 {
opts.ExpiryDuration = DefaultCleanIntervalTime
}
if opts.Logger == nil {
opts.Logger = defaultLogger
}
p := &Pool{
capacity: int32(size),
lock: internal.NewSpinLock(),
options: opts,
}
p.workerCache.New = func() interface{} {
return &goWorker{
pool: p,
task: make(chan func(), workerChanCap),
}
}
if p.options.PreAlloc {
if size == -1 {
return nil, ErrInvalidPreAllocSize
}
p.workers = newWorkerArray(loopQueueType, size)
} else {
p.workers = newWorkerArray(stackType, 0)
}
p.cond = sync.NewCond(p.lock)
// Start a goroutine to clean up expired workers periodically.
go p.purgePeriodically()
return p, nil
} |
error.py | # coding: utf-8
from __future__ import absolute_import
import warnings
import textwrap
from ruamel.yaml.compat import utf8
if False: # MYPY
from typing import Any, Dict, Optional, List, Text # NOQA
__all__ = [
'FileMark', 'StringMark', 'CommentMark', 'YAMLError', 'MarkedYAMLError',
'ReusedAnchorWarning', 'UnsafeLoaderWarning', 'MarkedYAMLWarning',
'MarkedYAMLFutureWarning',
]
class StreamMark(object):
__slots__ = 'name', 'index', 'line', 'column',
def __init__(self, name, index, line, column):
# type: (Any, int, int, int) -> None
self.name = name
self.index = index
self.line = line
self.column = column
def __str__(self):
# type: () -> Any
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line + 1, self.column + 1)
return where
class FileMark(StreamMark):
__slots__ = ()
class StringMark(StreamMark):
__slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer',
def __init__(self, name, index, line, column, buffer, pointer):
# type: (Any, int, int, int, Any, Any) -> None
StreamMark.__init__(self, name, index, line, column)
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
# type: (int, int) -> Any
if self.buffer is None: # always False
return None
head = ''
start = self.pointer
while (start > 0 and
self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029'):
start -= 1
if self.pointer - start > max_length / 2 - 1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while (end < len(self.buffer) and
self.buffer[end] not in u'\0\r\n\x85\u2028\u2029'):
end += 1
if end - self.pointer > max_length / 2 - 1:
tail = ' ... '
end -= 5
break
snippet = utf8(self.buffer[start:end])
caret = '^'
caret = '^ (line: {})'.format(self.line + 1)
return ' ' * indent + head + snippet + tail + '\n' \
+ ' ' * (indent + self.pointer - start + len(head)) + caret
def __str__(self):
# type: () -> Any
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line + 1, self.column + 1)
if snippet is not None:
where += ":\n" + snippet
return where
class CommentMark(object):
__slots__ = 'column',
def __init__(self, column):
# type: (Any) -> None
self.column = column
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None, warn=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
# warn is ignored
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None or
self.context_mark.name != self.problem_mark.name or
self.context_mark.line != self.problem_mark.line or
self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note) # type: ignore
lines.append(note)
return '\n'.join(lines)
class YAMLStreamError(Exception):
pass
class YAMLWarning(Warning):
pass
class MarkedYAMLWarning(YAMLWarning):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None, warn=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
self.warn = warn
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None or
self.context_mark.name != self.problem_mark.name or
self.context_mark.line != self.problem_mark.line or
self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note) # type: ignore
lines.append(note)
if self.warn is not None and self.warn:
warn = textwrap.dedent(self.warn) # type: ignore
lines.append(warn)
return '\n'.join(lines)
class ReusedAnchorWarning(YAMLWarning):
pass
class UnsafeLoaderWarning(YAMLWarning):
text = """
The default 'Loader' for 'load(stream)' without further arguments can be unsafe.
Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK.
Alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
In most other cases you should consider using 'safe_load(stream)'"""
pass
warnings.simplefilter('once', UnsafeLoaderWarning)
class MantissaNoDotYAML1_1Warning(YAMLWarning):
|
warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
class YAMLFutureWarning(Warning):
pass
class MarkedYAMLFutureWarning(YAMLFutureWarning):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None, warn=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
self.warn = warn
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None or
self.context_mark.name != self.problem_mark.name or
self.context_mark.line != self.problem_mark.line or
self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note) # type: ignore
lines.append(note)
if self.warn is not None and self.warn:
warn = textwrap.dedent(self.warn) # type: ignore
lines.append(warn)
return '\n'.join(lines)
| def __init__(self, node, flt_str):
# type: (Any, Any) -> None
self.node = node
self.flt = flt_str
def __str__(self):
# type: () -> Any
line = self.node.start_mark.line
col = self.node.start_mark.column
return """
In YAML 1.1 floating point values should have a dot ('.') in their mantissa.
See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification
( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2
Correct your float: "{}" on line: {}, column: {}
or alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
""".format(self.flt, line, col)
|
controller.go | /*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
|
clientset "github.com/knative/eventing/pkg/client/clientset/versioned"
informers "github.com/knative/eventing/pkg/client/informers/externalversions"
)
type Interface interface {
Run(threadiness int, stopCh <-chan struct{}) error
}
type Constructor func(
kubernetes.Interface,
clientset.Interface,
sharedclientset.Interface,
*rest.Config,
kubeinformers.SharedInformerFactory,
informers.SharedInformerFactory,
sharedinformers.SharedInformerFactory,
) Interface | sharedclientset "github.com/knative/pkg/client/clientset/versioned"
sharedinformers "github.com/knative/pkg/client/informers/externalversions" |
node_selector_matcher_spec.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {AttributeMarker, TAttributes, TNode, TNodeType} from '../../src/render3/interfaces/node';
import {CssSelector, CssSelectorList, NG_PROJECT_AS_ATTR_NAME, SelectorFlags,} from '../../src/render3/interfaces/projection';
import {getProjectAsAttrValue, isNodeMatchingSelectorList, isNodeMatchingSelector} from '../../src/render3/node_selector_matcher';
import {createTNode} from '@angular/core/src/render3/instructions';
function testLStaticData(tagName: string, attrs: TAttributes | null): TNode {
return createTNode(TNodeType.Element, 0, tagName, attrs, null, null);
}
describe('css selector matching', () => {
function isMatching(tagName: string, attrs: TAttributes | null, selector: CssSelector): boolean {
return isNodeMatchingSelector(
createTNode(TNodeType.Element, 0, tagName, attrs, null, null), selector);
}
describe('isNodeMatchingSimpleSelector', () => {
describe('element matching', () => {
it('should match element name only if names are the same', () => {
expect(isMatching('span', null, ['span']))
.toBeTruthy(`Selector 'span' should match <span>`);
expect(isMatching('span', null, ['div']))
.toBeFalsy(`Selector 'div' should NOT match <span>`);
});
/**
* We assume that compiler will lower-case tag names both in LNode
* and in a selector.
*/
it('should match element name case-sensitively', () => {
expect(isMatching('span', null, ['SPAN']))
.toBeFalsy(`Selector 'SPAN' should NOT match <span>`);
expect(isMatching('SPAN', null, ['span']))
.toBeFalsy(`Selector 'span' should NOT match <SPAN>'`);
});
});
describe('attributes matching', () => {
// TODO: do we need to differentiate no value and empty value? that is: title vs. title="" ?
it('should match single attribute without value', () => {
expect(isMatching('span', ['title', ''], [
'', 'title', ''
])).toBeTruthy(`Selector '[title]' should match <span title>`);
expect(isMatching('span', ['title', 'my title'], [
'', 'title', ''
])).toBeTruthy(`Selector '[title]' should match <span title="my title">`);
expect(isMatching('span', ['name', 'name'], [
'', 'title', ''
])).toBeFalsy(`Selector '[title]' should NOT match <span name="name">`);
expect(isMatching('span', null, [
'', 'title', ''
])).toBeFalsy(`Selector '[title]' should NOT match <span>`);
expect(isMatching('span', ['title', ''], [
'', 'other', ''
])).toBeFalsy(`Selector '[other]' should NOT match <span title="">'`);
});
it('should match namespaced attributes', () => {
expect(isMatching(
'span', [AttributeMarker.NamespaceURI, 'http://some/uri', 'title', 'name'],
['', 'title', '']));
});
it('should match selector with one attribute without value when element has several attributes',
() => {
expect(isMatching('span', ['id', 'my_id', 'title', 'test_title'], [
'', 'title', ''
])).toBeTruthy(`Selector '[title]' should match <span id="my_id" title="test_title">`);
});
it('should match single attribute with value', () => {
expect(isMatching('span', ['title', 'My Title'], [
'', 'title', 'My Title'
])).toBeTruthy(`Selector '[title="My Title"]' should match <span title="My Title">'`);
expect(isMatching('span', ['title', 'My Title'], [
'', 'title', 'Other Title'
])).toBeFalsy(`Selector '[title="Other Title"]' should NOT match <span title="My Title">`);
});
it('should not match attribute when element name does not match', () => {
expect(isMatching('span', ['title', 'My Title'], [
'div', 'title', ''
])).toBeFalsy(`Selector 'div[title]' should NOT match <span title="My Title">`);
expect(isMatching('span', ['title', 'My Title'], [
'div', 'title', 'My Title'
])).toBeFalsy(`Selector 'div[title="My Title"]' should NOT match <span title="My Title">`);
});
it('should match multiple attributes', () => {
// selector: '[title=title][name=name]'
const selector = ['', 'title', 'title', 'name', 'name'];
// <span title="title" name="name">
expect(isMatching('span', ['title', 'title', 'name', 'name'], selector))
.toBeTruthy(
`Selector '[title=title][name=name]' should NOT match <span title="title" name="name">`);
// <span title="title">
expect(isMatching('span', ['title', 'title'], selector))
.toBeFalsy(`Selector '[title=title][name=name]' should NOT match <span title="title">`);
// <span name="name">
expect(isMatching('span', ['name', 'name'], selector))
.toBeFalsy(`Selector '[title=title][name=name]' should NOT match <span name="name">`);
});
it('should handle attribute values that match attribute names', () => {
// selector: [name=name]
const selector = ['', 'name', 'name'];
// <span title="name">
expect(isMatching('span', ['title', 'name'], selector))
.toBeFalsy(`Selector '[name=name]' should NOT match <span title="name">`);
// <span title="name" name="name">
expect(isMatching('span', ['title', 'name', 'name', 'name'], selector))
.toBeTruthy(`Selector '[name=name]' should match <span title="name" name="name">`);
});
/**
* We assume that compiler will lower-case all attribute names when generating code
*/
it('should match attribute name case-sensitively', () => {
expect(isMatching('span', ['foo', ''], [
'', 'foo', ''
])).toBeTruthy(`Selector '[foo]' should match <span foo>`);
expect(isMatching('span', ['foo', ''], [
'', 'Foo', ''
])).toBeFalsy(`Selector '[Foo]' should NOT match <span foo>`);
});
it('should match attribute values case-sensitively', () => {
expect(isMatching('span', ['foo', 'Bar'], [
'', 'foo', 'Bar'
])).toBeTruthy(`Selector '[foo="Bar"]' should match <span foo="Bar">`);
expect(isMatching('span', ['foo', 'Bar'], [
'', 'Foo', 'bar'
])).toBeFalsy(`Selector '[Foo="bar"]' should match <span foo="Bar">`);
});
it('should match class as an attribute', () => {
expect(isMatching('span', ['class', 'foo'], [
'', 'class', ''
])).toBeTruthy(`Selector '[class]' should match <span class="foo">`);
expect(isMatching('span', ['class', 'foo'], [
'', 'class', 'foo'
])).toBeTruthy(`Selector '[class="foo"]' should match <span class="foo">`);
});
it('should take optional binding attribute names into account', () => {
expect(isMatching('span', [AttributeMarker.SelectOnly, 'directive'], [
'', 'directive', ''
])).toBeTruthy(`Selector '[directive]' should match <span [directive]="exp">`);
});
it('should not match optional binding attribute names if attribute selector has value',
() => {
expect(isMatching('span', [AttributeMarker.SelectOnly, 'directive'], [
'', 'directive', 'value'
])).toBeFalsy(`Selector '[directive=value]' should not match <span [directive]="exp">`);
});
it('should not match optional binding attribute names if attribute selector has value and next name equals to value',
() => {
expect(isMatching(
'span', [AttributeMarker.SelectOnly, 'directive', 'value'],
['', 'directive', 'value']))
.toBeFalsy(
`Selector '[directive=value]' should not match <span [directive]="exp" [value]="otherExp">`);
});
});
describe('class matching', () => {
it('should match with a class selector when an element has multiple classes', () => {
expect(isMatching('span', ['class', 'foo bar'], [
'', SelectorFlags.CLASS, 'foo'
])).toBeTruthy(`Selector '.foo' should match <span class="foo bar">`);
expect(isMatching('span', ['class', 'foo bar'], [
'', SelectorFlags.CLASS, 'bar'
])).toBeTruthy(`Selector '.bar' should match <span class="foo bar">`);
expect(isMatching('span', ['class', 'foo bar'], [
'', SelectorFlags.CLASS, 'baz'
])).toBeFalsy(`Selector '.baz' should NOT match <span class="foo bar">`);
});
it('should not match on partial class name', () => {
expect(isMatching('span', ['class', 'foobar'], [
'', SelectorFlags.CLASS, 'foo'
])).toBeFalsy(`Selector '.foo' should NOT match <span class="foobar">`);
expect(isMatching('span', ['class', 'foobar'], [
'', SelectorFlags.CLASS, 'bar'
])).toBeFalsy(`Selector '.bar' should NOT match <span class="foobar">`);
expect(isMatching('span', ['class', 'foobar'], [
'', SelectorFlags.CLASS, 'ob'
])).toBeFalsy(`Selector '.ob' should NOT match <span class="foobar">`);
expect(isMatching('span', ['class', 'foobar'], [
'', SelectorFlags.CLASS, 'foobar'
])).toBeTruthy(`Selector '.foobar' should match <span class="foobar">`);
});
it('should support selectors with multiple classes', () => {
expect(isMatching('span', ['class', 'foo bar'], [
'', SelectorFlags.CLASS, 'foo', 'bar'
])).toBeTruthy(`Selector '.foo.bar' should match <span class="foo bar">`);
expect(isMatching('span', ['class', 'foo'], [
'', SelectorFlags.CLASS, 'foo', 'bar'
])).toBeFalsy(`Selector '.foo.bar' should NOT match <span class="foo">`);
expect(isMatching('span', ['class', 'bar'], [
'', SelectorFlags.CLASS, 'foo', 'bar'
])).toBeFalsy(`Selector '.foo.bar' should NOT match <span class="bar">`);
});
it('should support selectors with multiple classes regardless of class name order', () => {
expect(isMatching('span', ['class', 'foo bar'], [
'', SelectorFlags.CLASS, 'bar', 'foo'
])).toBeTruthy(`Selector '.bar.foo' should match <span class="foo bar">`);
expect(isMatching('span', ['class', 'bar foo'], [
'', SelectorFlags.CLASS, 'foo', 'bar'
])).toBeTruthy(`Selector '.foo.bar' should match <span class="bar foo">`);
expect(isMatching('span', ['class', 'bar foo'], [
'', SelectorFlags.CLASS, 'bar', 'foo'
])).toBeTruthy(`Selector '.bar.foo' should match <span class="bar foo">`);
});
it('should match class name case-sensitively', () => {
expect(isMatching('span', ['class', 'Foo'], [
'', SelectorFlags.CLASS, 'Foo'
])).toBeTruthy(`Selector '.Foo' should match <span class="Foo">`);
expect(isMatching('span', ['class', 'Foo'], [
'', SelectorFlags.CLASS, 'foo'
])).toBeFalsy(`Selector '.foo' should NOT match <span class-"Foo">`);
});
it('should work without a class attribute', () => {
// selector: '.foo'
const selector = ['', SelectorFlags.CLASS, 'foo'];
// <div title="title">
expect(isMatching('div', ['title', 'title'], selector))
.toBeFalsy(`Selector '.foo' should NOT match <div title="title">`);
// <div>
expect(isMatching('div', null, selector))
.toBeFalsy(`Selector '.foo' should NOT match <div>`);
});
it('should work with elements, attributes, and classes', () => {
// selector: 'div.foo[title=title]'
const selector = ['div', 'title', 'title', SelectorFlags.CLASS, 'foo'];
// <div class="foo" title="title">
expect(isMatching('div', ['class', 'foo', 'title', 'title'], selector)).toBeTruthy();
// <div title="title">
expect(isMatching('div', ['title', 'title'], selector)).toBeFalsy();
// <div class="foo">
expect(isMatching('div', ['class', 'foo'], selector)).toBeFalsy();
});
});
});
describe('negations', () => {
it('should match when negation part is null', () => {
expect(isMatching('span', null, ['span'])).toBeTruthy(`Selector 'span' should match <span>`);
}); | '', SelectorFlags.NOT | SelectorFlags.ELEMENT, 'span'
])).toBeFalsy(`Selector ':not(span)' should NOT match <span foo="">`);
expect(isMatching('span', ['foo', ''], [
'span', SelectorFlags.NOT | SelectorFlags.ATTRIBUTE, 'foo', ''
])).toBeFalsy(`Selector 'span:not([foo])' should NOT match <span foo="">`);
});
it('should not match negative selector with tag name and attributes', () => {
// selector: ':not(span[foo])'
const selector = ['', SelectorFlags.NOT | SelectorFlags.ELEMENT, 'span', 'foo', ''];
// <span foo="">
expect(isMatching('span', ['foo', ''], selector)).toBeFalsy();
// <span bar="">
expect(isMatching('span', ['bar', ''], selector)).toBeTruthy();
});
it('should not match negative classes', () => {
// selector: ':not(.foo.bar)'
const selector = ['', SelectorFlags.NOT | SelectorFlags.CLASS, 'foo', 'bar'];
// <span class="foo bar">
expect(isMatching('span', ['class', 'foo bar'], selector)).toBeFalsy();
// <span class="foo">
expect(isMatching('span', ['class', 'foo'], selector)).toBeTruthy();
// <span class="bar">
expect(isMatching('span', ['class', 'bar'], selector)).toBeTruthy();
});
it('should not match negative selector with classes and attributes', () => {
// selector: ':not(.baz[title])
const selector = [
'', SelectorFlags.NOT | SelectorFlags.ATTRIBUTE, 'title', '', SelectorFlags.CLASS, 'baz'
];
// <div class="baz">
expect(isMatching('div', ['class', 'baz'], selector)).toBeTruthy();
// <div title="title">
expect(isMatching('div', ['title', 'title'], selector)).toBeTruthy();
// <div class="baz" title="title">
expect(isMatching('div', ['class', 'baz', 'title', 'title'], selector)).toBeFalsy();
});
it('should not match negative selector with attribute selector after', () => {
// selector: ':not(.baz[title]):not([foo])'
const selector = [
'', SelectorFlags.NOT | SelectorFlags.ATTRIBUTE, 'title', '', SelectorFlags.CLASS, 'baz',
SelectorFlags.NOT | SelectorFlags.ATTRIBUTE, 'foo', ''
];
// <div class="baz">
expect(isMatching('div', ['class', 'baz'], selector)).toBeTruthy();
// <div class="baz" title="">
expect(isMatching('div', ['class', 'baz', 'title', ''], selector)).toBeFalsy();
// <div class="baz" foo="">
expect(isMatching('div', ['class', 'baz', 'foo', ''], selector)).toBeFalsy();
});
it('should not match with multiple negative selectors', () => {
// selector: ':not(span):not([foo])'
const selector = [
'', SelectorFlags.NOT | SelectorFlags.ELEMENT, 'span',
SelectorFlags.NOT | SelectorFlags.ATTRIBUTE, 'foo', ''
];
// <div foo="">
expect(isMatching('div', ['foo', ''], selector)).toBeFalsy();
// <span bar="">
expect(isMatching('span', ['bar', ''], selector)).toBeFalsy();
// <div bar="">
expect(isMatching('div', ['bar', ''], selector)).toBeTruthy();
});
it('should evaluate complex selector with negative selectors', () => {
// selector: 'div.foo.bar[name=name]:not(.baz):not([title])'
const selector = [
'div', 'name', 'name', SelectorFlags.CLASS, 'foo', 'bar',
SelectorFlags.NOT | SelectorFlags.ATTRIBUTE, 'title', '',
SelectorFlags.NOT | SelectorFlags.CLASS, 'baz'
];
// <div name="name" class="foo bar">
expect(isMatching('div', ['name', 'name', 'class', 'foo bar'], selector)).toBeTruthy();
// <div name="name" class="foo bar baz">
expect(isMatching('div', ['name', 'name', 'class', 'foo bar baz'], selector)).toBeFalsy();
// <div name="name" title class="foo bar">
expect(isMatching('div', ['name', 'name', 'title', '', 'class', 'foo bar'], selector))
.toBeFalsy();
});
});
describe('isNodeMatchingSelectorList', () => {
function isAnyMatching(
tagName: string, attrs: string[] | null, selector: CssSelectorList): boolean {
return isNodeMatchingSelectorList(testLStaticData(tagName, attrs), selector);
}
it('should match when there is only one simple selector without negations', () => {
expect(isAnyMatching('span', null, [['span']]))
.toBeTruthy(`Selector 'span' should match <span>`);
expect(isAnyMatching('span', null, [['div']]))
.toBeFalsy(`Selector 'div' should NOT match <span>`);
});
it('should match when there are multiple parts and only one is matching', () => {
expect(isAnyMatching('span', ['foo', 'bar'], [
['div'], ['', 'foo', 'bar']
])).toBeTruthy(`Selector 'div, [foo=bar]' should match <span foo="bar">`);
});
it('should not match when there are multiple parts and none is matching', () => {
expect(isAnyMatching('span', ['foo', 'bar'], [
['div'], ['', 'foo', 'baz']
])).toBeFalsy(`Selector 'div, [foo=baz]' should NOT match <span foo="bar">`);
});
});
describe('reading the ngProjectAs attribute value', function() {
function testTNode(attrs: string[] | null) { return testLStaticData('tag', attrs); }
it('should get ngProjectAs value if present', function() {
expect(getProjectAsAttrValue(testTNode([NG_PROJECT_AS_ATTR_NAME, 'tag[foo=bar]'])))
.toBe('tag[foo=bar]');
});
it('should return null if there are no attributes',
function() { expect(getProjectAsAttrValue(testTNode(null))).toBe(null); });
it('should return if ngProjectAs is not present', function() {
expect(getProjectAsAttrValue(testTNode(['foo', 'bar']))).toBe(null);
});
it('should not accidentally identify ngProjectAs in attribute values', function() {
expect(getProjectAsAttrValue(testTNode(['foo', NG_PROJECT_AS_ATTR_NAME]))).toBe(null);
});
});
}); |
it('should not match when negation part does not match', () => {
expect(isMatching('span', ['foo', ''], [ |
networkpolicy_controller_test.go | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networkpolicy
import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/watch"
k8stesting "k8s.io/client-go/testing"
"github.com/vmware-tanzu/antrea/pkg/apis/controlplane/v1beta1"
"github.com/vmware-tanzu/antrea/pkg/client/clientset/versioned"
"github.com/vmware-tanzu/antrea/pkg/client/clientset/versioned/fake"
)
const testNamespace = "ns1"
type antreaClientGetter struct {
clientset versioned.Interface
}
func (g *antreaClientGetter) GetAntreaClient() (versioned.Interface, error) {
return g.clientset, nil
}
func newTestController() (*Controller, *fake.Clientset, *mockReconciler) {
clientset := &fake.Clientset{}
ch := make(chan v1beta1.PodReference, 100)
controller := NewNetworkPolicyController(&antreaClientGetter{clientset}, nil, nil, "node1", ch)
reconciler := newMockReconciler()
controller.reconciler = reconciler
return controller, clientset, reconciler
}
// mockReconciler implements Reconciler. It simply records the latest states of rules
// it has been asked to reconcile, and provides two channels to receive its notifications
// for testing.
type mockReconciler struct {
sync.Mutex
lastRealized map[string]*CompletedRule
updated chan string
deleted chan string
}
func newMockReconciler() *mockReconciler {
return &mockReconciler{
lastRealized: map[string]*CompletedRule{},
updated: make(chan string, 10),
deleted: make(chan string, 10),
}
}
func (r *mockReconciler) Reconcile(rule *CompletedRule) error {
r.Lock()
defer r.Unlock()
r.lastRealized[rule.ID] = rule
r.updated <- rule.ID
return nil
}
func (r *mockReconciler) BatchReconcile(rules []*CompletedRule) error {
r.Lock()
defer r.Unlock()
for _, rule := range rules {
r.lastRealized[rule.ID] = rule
r.updated <- rule.ID
}
return nil
}
func (r *mockReconciler) Forget(ruleID string) error {
r.Lock()
defer r.Unlock()
delete(r.lastRealized, ruleID)
r.deleted <- ruleID
return nil
}
func (r *mockReconciler) getLastRealized(ruleID string) (*CompletedRule, bool) {
r.Lock()
defer r.Unlock()
lastRealized, exists := r.lastRealized[ruleID]
return lastRealized, exists
}
var _ Reconciler = &mockReconciler{}
func newAddressGroup(name string, addresses []v1beta1.GroupMemberPod) *v1beta1.AddressGroup {
return &v1beta1.AddressGroup{
ObjectMeta: v1.ObjectMeta{Name: name},
Pods: addresses,
}
}
func newAppliedToGroup(name string, pods []v1beta1.GroupMemberPod) *v1beta1.AppliedToGroup {
return &v1beta1.AppliedToGroup{
ObjectMeta: v1.ObjectMeta{Name: name},
Pods: pods,
}
}
func newNetworkPolicy(uid string, from, to, appliedTo []string, services []v1beta1.Service) *v1beta1.NetworkPolicy {
networkPolicyRule1 := v1beta1.NetworkPolicyRule{
Direction: v1beta1.DirectionIn,
From: v1beta1.NetworkPolicyPeer{AddressGroups: from},
To: v1beta1.NetworkPolicyPeer{AddressGroups: to},
Services: services,
}
return &v1beta1.NetworkPolicy{
ObjectMeta: v1.ObjectMeta{UID: types.UID(uid), Name: uid, Namespace: testNamespace},
Rules: []v1beta1.NetworkPolicyRule{networkPolicyRule1},
AppliedToGroups: appliedTo,
}
}
func getNetworkPolicyWithMultipleRules(uid string, from, to, appliedTo []string, services []v1beta1.Service) *v1beta1.NetworkPolicy {
networkPolicyRule1 := v1beta1.NetworkPolicyRule{
Direction: v1beta1.DirectionIn,
From: v1beta1.NetworkPolicyPeer{AddressGroups: from},
To: v1beta1.NetworkPolicyPeer{},
Services: services,
}
networkPolicyRule2 := v1beta1.NetworkPolicyRule{
Direction: v1beta1.DirectionOut,
From: v1beta1.NetworkPolicyPeer{},
To: v1beta1.NetworkPolicyPeer{AddressGroups: to},
Services: services,
}
return &v1beta1.NetworkPolicy{
ObjectMeta: v1.ObjectMeta{UID: types.UID(uid), Name: uid, Namespace: testNamespace},
Rules: []v1beta1.NetworkPolicyRule{networkPolicyRule1, networkPolicyRule2},
AppliedToGroups: appliedTo,
}
}
func TestAddSingleGroupRule(t *testing.T) {
controller, clientset, reconciler := newTestController()
addressGroupWatcher := watch.NewFake()
appliedToGroupWatcher := watch.NewFake()
networkPolicyWatcher := watch.NewFake()
clientset.AddWatchReactor("addressgroups", k8stesting.DefaultWatchReactor(addressGroupWatcher, nil))
clientset.AddWatchReactor("appliedtogroups", k8stesting.DefaultWatchReactor(appliedToGroupWatcher, nil))
clientset.AddWatchReactor("networkpolicies", k8stesting.DefaultWatchReactor(networkPolicyWatcher, nil))
protocolTCP := v1beta1.ProtocolTCP
port := intstr.FromInt(80)
services := []v1beta1.Service{{Protocol: &protocolTCP, Port: &port}}
desiredRule := &CompletedRule{
rule: &rule{Direction: v1beta1.DirectionIn, Services: services},
FromAddresses: v1beta1.NewGroupMemberSet(newAddressGroupMember("1.1.1.1"), newAddressGroupMember("2.2.2.2")),
ToAddresses: v1beta1.NewGroupMemberSet(),
Pods: v1beta1.NewGroupMemberPodSet(newAppliedToGroupMember("pod1", "ns1")),
}
stopCh := make(chan struct{})
defer close(stopCh)
go controller.Run(stopCh)
// policy1 comes first, no rule will be synced due to missing addressGroup1 and appliedToGroup1.
policy1 := newNetworkPolicy("policy1", []string{"addressGroup1"}, []string{}, []string{"appliedToGroup1"}, services)
networkPolicyWatcher.Add(policy1)
networkPolicyWatcher.Action(watch.Bookmark, nil)
select {
case ruleID := <-reconciler.updated:
t.Fatalf("Expected no update, got %v", ruleID)
case <-time.After(time.Millisecond * 100):
}
assert.Equal(t, policy1, controller.GetNetworkPolicy(policy1.Name, policy1.Namespace))
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 0, controller.GetAddressGroupNum())
assert.Equal(t, 0, controller.GetAppliedToGroupNum())
// addressGroup1 comes, no rule will be synced due to missing appliedToGroup1 data.
addressGroupWatcher.Add(newAddressGroup("addressGroup1", []v1beta1.GroupMemberPod{*newAddressGroupMemberPod("1.1.1.1"), *newAddressGroupMemberPod("2.2.2.2")}))
addressGroupWatcher.Action(watch.Bookmark, nil)
select {
case ruleID := <-reconciler.updated:
t.Fatalf("Expected no update, got %v", ruleID)
case <-time.After(time.Millisecond * 100):
}
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 1, controller.GetAddressGroupNum())
assert.Equal(t, 0, controller.GetAppliedToGroupNum())
// appliedToGroup1 comes, policy1 will be synced.
appliedToGroupWatcher.Add(newAppliedToGroup("appliedToGroup1", []v1beta1.GroupMemberPod{*newAppliedToGroupMember("pod1", "ns1")}))
appliedToGroupWatcher.Action(watch.Bookmark, nil)
select {
case ruleID := <-reconciler.updated:
actualRule, _ := reconciler.getLastRealized(ruleID)
if actualRule.Direction != desiredRule.Direction {
t.Errorf("Expected Direction %v, got %v", actualRule.Direction, desiredRule.Direction)
}
if !assert.ElementsMatch(t, actualRule.Services, desiredRule.Services) {
t.Errorf("Expected Services %v, got %v", actualRule.Services, desiredRule.Services)
}
if !actualRule.FromAddresses.Equal(desiredRule.FromAddresses) {
t.Errorf("Expected FromAddresses %v, got %v", actualRule.FromAddresses, desiredRule.FromAddresses)
}
if !actualRule.ToAddresses.Equal(desiredRule.ToAddresses) {
t.Errorf("Expected ToAddresses %v, got %v", actualRule.ToAddresses, desiredRule.ToAddresses)
}
if !actualRule.Pods.Equal(desiredRule.Pods) {
t.Errorf("Expected Pods %v, got %v", actualRule.Pods, desiredRule.Pods)
}
case <-time.After(time.Millisecond * 100):
t.Fatal("Expected one update, got none")
}
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 1, controller.GetAddressGroupNum())
assert.Equal(t, 1, controller.GetAppliedToGroupNum())
}
func TestAddMultipleGroupsRule(t *testing.T) {
controller, clientset, reconciler := newTestController()
addressGroupWatcher := watch.NewFake()
appliedToGroupWatcher := watch.NewFake()
networkPolicyWatcher := watch.NewFake()
clientset.AddWatchReactor("addressgroups", k8stesting.DefaultWatchReactor(addressGroupWatcher, nil))
clientset.AddWatchReactor("appliedtogroups", k8stesting.DefaultWatchReactor(appliedToGroupWatcher, nil))
clientset.AddWatchReactor("networkpolicies", k8stesting.DefaultWatchReactor(networkPolicyWatcher, nil))
protocolTCP := v1beta1.ProtocolTCP
port := intstr.FromInt(80)
services := []v1beta1.Service{{Protocol: &protocolTCP, Port: &port}}
desiredRule := &CompletedRule{
rule: &rule{Direction: v1beta1.DirectionIn, Services: services},
FromAddresses: v1beta1.NewGroupMemberSet(newAddressGroupMember("1.1.1.1"), newAddressGroupMember("2.2.2.2"), newAddressGroupMember("3.3.3.3")),
ToAddresses: v1beta1.NewGroupMemberSet(),
Pods: v1beta1.NewGroupMemberPodSet(newAppliedToGroupMember("pod1", "ns1"), newAppliedToGroupMember("pod2", "ns2")),
}
stopCh := make(chan struct{})
defer close(stopCh)
go controller.Run(stopCh)
// addressGroup1 comes, no rule will be synced.
addressGroupWatcher.Add(newAddressGroup("addressGroup1", []v1beta1.GroupMemberPod{*newAddressGroupMemberPod("1.1.1.1"), *newAddressGroupMemberPod("2.2.2.2")}))
addressGroupWatcher.Action(watch.Bookmark, nil)
// appliedToGroup1 comes, no rule will be synced.
appliedToGroupWatcher.Add(newAppliedToGroup("appliedToGroup1", []v1beta1.GroupMemberPod{*newAppliedToGroupMember("pod1", "ns1")}))
appliedToGroupWatcher.Action(watch.Bookmark, nil)
// policy1 comes first, no rule will be synced due to missing addressGroup2 and appliedToGroup2.
policy1 := newNetworkPolicy("policy1", []string{"addressGroup1", "addressGroup2"}, []string{}, []string{"appliedToGroup1", "appliedToGroup2"}, services)
networkPolicyWatcher.Add(policy1)
networkPolicyWatcher.Action(watch.Bookmark, nil)
select {
case ruleID := <-reconciler.updated:
t.Fatalf("Expected no update, got %v", ruleID)
case <-time.After(time.Millisecond * 100):
}
assert.Equal(t, policy1, controller.GetNetworkPolicy(policy1.Name, policy1.Namespace))
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 1, controller.GetAddressGroupNum())
assert.Equal(t, 1, controller.GetAppliedToGroupNum())
// addressGroup2 comes, no rule will be synced due to missing appliedToGroup2 data.
addressGroupWatcher.Add(newAddressGroup("addressGroup2", []v1beta1.GroupMemberPod{*newAddressGroupMemberPod("1.1.1.1"), *newAddressGroupMemberPod("3.3.3.3")}))
select {
case ruleID := <-reconciler.updated:
t.Fatalf("Expected no update, got %v", ruleID)
case <-time.After(time.Millisecond * 100):
}
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 2, controller.GetAddressGroupNum())
assert.Equal(t, 1, controller.GetAppliedToGroupNum())
// appliedToGroup2 comes, policy1 will be synced.
appliedToGroupWatcher.Add(newAppliedToGroup("appliedToGroup2", []v1beta1.GroupMemberPod{*newAppliedToGroupMember("pod2", "ns2")}))
select {
case ruleID := <-reconciler.updated:
actualRule, _ := reconciler.getLastRealized(ruleID)
if actualRule.Direction != desiredRule.Direction {
t.Errorf("Expected Direction %v, got %v", actualRule.Direction, desiredRule.Direction)
}
if !assert.ElementsMatch(t, actualRule.Services, desiredRule.Services) {
t.Errorf("Expected Services %v, got %v", actualRule.Services, desiredRule.Services)
}
if !actualRule.FromAddresses.Equal(desiredRule.FromAddresses) {
t.Errorf("Expected FromAddresses %v, got %v", actualRule.FromAddresses, desiredRule.FromAddresses)
}
if !actualRule.ToAddresses.Equal(desiredRule.ToAddresses) {
t.Errorf("Expected ToAddresses %v, got %v", actualRule.ToAddresses, desiredRule.ToAddresses)
}
if !actualRule.Pods.Equal(desiredRule.Pods) {
t.Errorf("Expected Pods %v, got %v", actualRule.Pods, desiredRule.Pods)
}
case <-time.After(time.Millisecond * 100):
t.Fatal("Expected one update, got none")
}
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 2, controller.GetAddressGroupNum())
assert.Equal(t, 2, controller.GetAppliedToGroupNum())
}
func TestDeleteRule(t *testing.T) |
func TestAddNetworkPolicyWithMultipleRules(t *testing.T) {
controller, clientset, reconciler := newTestController()
addressGroupWatcher := watch.NewFake()
appliedToGroupWatcher := watch.NewFake()
networkPolicyWatcher := watch.NewFake()
clientset.AddWatchReactor("addressgroups", k8stesting.DefaultWatchReactor(addressGroupWatcher, nil))
clientset.AddWatchReactor("appliedtogroups", k8stesting.DefaultWatchReactor(appliedToGroupWatcher, nil))
clientset.AddWatchReactor("networkpolicies", k8stesting.DefaultWatchReactor(networkPolicyWatcher, nil))
protocolTCP := v1beta1.ProtocolTCP
port := intstr.FromInt(80)
services := []v1beta1.Service{{Protocol: &protocolTCP, Port: &port}}
desiredRule1 := &CompletedRule{
rule: &rule{Direction: v1beta1.DirectionIn, Services: services},
FromAddresses: v1beta1.NewGroupMemberSet(newAddressGroupMember("1.1.1.1"), newAddressGroupMember("2.2.2.2")),
ToAddresses: v1beta1.NewGroupMemberSet(),
Pods: v1beta1.NewGroupMemberPodSet(newAppliedToGroupMember("pod1", "ns1")),
}
desiredRule2 := &CompletedRule{
rule: &rule{Direction: v1beta1.DirectionOut, Services: services},
FromAddresses: v1beta1.NewGroupMemberSet(),
ToAddresses: v1beta1.NewGroupMemberSet(newAddressGroupMember("3.3.3.3"), newAddressGroupMember("4.4.4.4")),
Pods: v1beta1.NewGroupMemberPodSet(newAppliedToGroupMember("pod1", "ns1")),
}
stopCh := make(chan struct{})
defer close(stopCh)
go controller.Run(stopCh)
// Test NetworkPolicyInfoQuerier functions when the NetworkPolicy has multiple rules.
policy1 := getNetworkPolicyWithMultipleRules("policy1", []string{"addressGroup1"}, []string{"addressGroup2"}, []string{"appliedToGroup1"}, services)
networkPolicyWatcher.Add(policy1)
networkPolicyWatcher.Action(watch.Bookmark, nil)
addressGroupWatcher.Add(newAddressGroup("addressGroup1", []v1beta1.GroupMemberPod{*newAddressGroupMemberPod("1.1.1.1"), *newAddressGroupMemberPod("2.2.2.2")}))
addressGroupWatcher.Add(newAddressGroup("addressGroup2", []v1beta1.GroupMemberPod{*newAddressGroupMemberPod("3.3.3.3"), *newAddressGroupMemberPod("4.4.4.4")}))
addressGroupWatcher.Action(watch.Bookmark, nil)
appliedToGroupWatcher.Add(newAppliedToGroup("appliedToGroup1", []v1beta1.GroupMemberPod{*newAppliedToGroupMember("pod1", "ns1")}))
appliedToGroupWatcher.Action(watch.Bookmark, nil)
for i := 0; i < 2; i++ {
select {
case ruleID := <-reconciler.updated:
actualRule, _ := reconciler.getLastRealized(ruleID)
if actualRule.Direction == v1beta1.DirectionIn {
if !assert.ElementsMatch(t, actualRule.Services, desiredRule1.Services) {
t.Errorf("Expected Services %v, got %v", actualRule.Services, desiredRule1.Services)
}
if !actualRule.FromAddresses.Equal(desiredRule1.FromAddresses) {
t.Errorf("Expected FromAddresses %v, got %v", actualRule.FromAddresses, desiredRule1.FromAddresses)
}
if !actualRule.ToAddresses.Equal(desiredRule1.ToAddresses) {
t.Errorf("Expected ToAddresses %v, got %v", actualRule.ToAddresses, desiredRule1.ToAddresses)
}
if !actualRule.Pods.Equal(desiredRule1.Pods) {
t.Errorf("Expected Pods %v, got %v", actualRule.Pods, desiredRule1.Pods)
}
}
if actualRule.Direction == v1beta1.DirectionOut {
if !assert.ElementsMatch(t, actualRule.Services, desiredRule2.Services) {
t.Errorf("Expected Services %v, got %v", actualRule.Services, desiredRule2.Services)
}
if !actualRule.FromAddresses.Equal(desiredRule2.FromAddresses) {
t.Errorf("Expected FromAddresses %v, got %v", actualRule.FromAddresses, desiredRule2.FromAddresses)
}
if !actualRule.ToAddresses.Equal(desiredRule2.ToAddresses) {
t.Errorf("Expected ToAddresses %v, got %v", actualRule.ToAddresses, desiredRule2.ToAddresses)
}
if !actualRule.Pods.Equal(desiredRule2.Pods) {
t.Errorf("Expected Pods %v, got %v", actualRule.Pods, desiredRule2.Pods)
}
}
case <-time.After(time.Millisecond * 100):
t.Fatal("Expected two rule updates, got timeout")
}
}
assert.ElementsMatch(t, policy1.Rules, controller.GetNetworkPolicy(policy1.Name, policy1.Namespace).Rules)
assert.ElementsMatch(t, policy1.AppliedToGroups, controller.GetNetworkPolicy(policy1.Name, policy1.Namespace).AppliedToGroups)
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 2, controller.GetAddressGroupNum())
assert.Equal(t, 1, controller.GetAppliedToGroupNum())
}
| {
controller, clientset, reconciler := newTestController()
addressGroupWatcher := watch.NewFake()
appliedToGroupWatcher := watch.NewFake()
networkPolicyWatcher := watch.NewFake()
clientset.AddWatchReactor("addressgroups", k8stesting.DefaultWatchReactor(addressGroupWatcher, nil))
clientset.AddWatchReactor("appliedtogroups", k8stesting.DefaultWatchReactor(appliedToGroupWatcher, nil))
clientset.AddWatchReactor("networkpolicies", k8stesting.DefaultWatchReactor(networkPolicyWatcher, nil))
protocolTCP := v1beta1.ProtocolTCP
port := intstr.FromInt(80)
services := []v1beta1.Service{{Protocol: &protocolTCP, Port: &port}}
stopCh := make(chan struct{})
defer close(stopCh)
go controller.Run(stopCh)
addressGroupWatcher.Add(newAddressGroup("addressGroup1", []v1beta1.GroupMemberPod{*newAddressGroupMemberPod("1.1.1.1"), *newAddressGroupMemberPod("2.2.2.2")}))
addressGroupWatcher.Action(watch.Bookmark, nil)
appliedToGroupWatcher.Add(newAppliedToGroup("appliedToGroup1", []v1beta1.GroupMemberPod{*newAppliedToGroupMember("pod1", "ns1")}))
appliedToGroupWatcher.Action(watch.Bookmark, nil)
networkPolicyWatcher.Add(newNetworkPolicy("policy1", []string{"addressGroup1"}, []string{}, []string{"appliedToGroup1"}, services))
networkPolicyWatcher.Action(watch.Bookmark, nil)
select {
case ruleID := <-reconciler.updated:
_, exists := reconciler.getLastRealized(ruleID)
if !exists {
t.Fatalf("Expected rule %s, got none", ruleID)
}
case <-time.After(time.Millisecond * 100):
t.Fatal("Expected one update, got none")
}
assert.Equal(t, 1, controller.GetNetworkPolicyNum())
assert.Equal(t, 1, controller.GetAddressGroupNum())
assert.Equal(t, 1, controller.GetAppliedToGroupNum())
networkPolicyWatcher.Delete(newNetworkPolicy("policy1", []string{}, []string{}, []string{}, nil))
select {
case ruleID := <-reconciler.deleted:
actualRule, exists := reconciler.getLastRealized(ruleID)
if exists {
t.Errorf("Expected no rule, got %v", actualRule)
}
case <-time.After(time.Millisecond * 100):
t.Fatal("Expected one update, got none")
}
} |
vm.rs | /*
* Copyright 2019 Jeehoon Kang
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use core::mem::{self, MaybeUninit};
use core::ptr;
use core::str;
use core::sync::atomic::AtomicBool;
use arrayvec::ArrayVec;
use scopeguard::guard;
use crate::addr::*;
use crate::arch::*;
use crate::cpu::*;
use crate::list::*;
use crate::mm::*;
use crate::mpool::*;
use crate::page::*;
use crate::spci::*;
use crate::spinlock::*;
use crate::std::*;
use crate::types::*;
const LOG_BUFFER_SIZE: usize = 256;
#[repr(C)]
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum MailboxState {
/// There is no message in the mailbox.
Empty,
/// There is a message in the mailbox that is waiting for a reader.
Received,
/// There is a message in the mailbox that has been read.
Read,
}
#[repr(C)]
pub struct WaitEntry {
/// The VM that is waiting for a mailbox to become writable.
pub waiting_vm: *const Vm,
/// Links used to add entry to a VM's waiter_list. This is protected by the notifying VM's lock.
wait_links: ListEntry,
/// Links used to add entry to a VM's ready_list. This is protected by the waiting VM's lock.
ready_links: ListEntry,
}
impl WaitEntry {
#[inline]
pub fn is_in_ready_list(&self) -> bool {
unsafe { !list_empty(&self.ready_links) }
}
}
#[repr(C)]
pub struct Mailbox {
state: MailboxState,
// Addresses to page used for receiving and sending messages.
// Those pages are not protected by lock -- sender and receiver should
// have a proper protocol so that Hafnium copies synchronized data.
recv: *mut SpciMessage,
send: *const SpciMessage,
/// List of wait_entry structs representing VMs that want to be notified
/// when the mailbox becomes writable. Once the mailbox does become
/// writable, the entry is removed from this list and added to the waiting
/// VM's ready_list.
waiter_list: ListEntry,
/// List of wait_entry structs representing VMs whose mailboxes became
/// writable since the owner of the mailbox registers for notification.
ready_list: ListEntry,
}
impl Mailbox {
/// Initializes the mailbox.
/// TODO(HfO2): Refactor `vm_init` and make `Mailbox::new()` instead of this.
pub unsafe fn init(&mut self) {
self.state = MailboxState::Empty;
self.recv = ptr::null_mut();
self.send = ptr::null();
unsafe { list_init(&mut self.waiter_list) };
unsafe { list_init(&mut self.ready_list) };
}
/// Retrieves the next waiter and removes it from the wait list if the VM's
/// mailbox is in a writable state.
pub fn fetch_waiter(&mut self) -> *mut WaitEntry |
/// Checks if any waiters exists.
pub fn is_waiter_list_empty(&self) -> bool {
unsafe { list_empty(&self.waiter_list) }
}
/// Checks whether there exists a pending message. If one exists, marks the
/// mailbox read.
pub fn try_read(&mut self) -> Result<(), ()> {
if self.state == MailboxState::Received {
self.state = MailboxState::Read;
Ok(())
} else {
Err(())
}
}
/// Set the arrived message is read.
pub fn set_read(&mut self) {
self.state = MailboxState::Read;
}
/// Set a message is arrived.
pub fn set_received(&mut self) {
self.state = MailboxState::Received;
}
/// Configures the hypervisor's stage-1 view of the send and receive pages.
/// The stage-1 page tables must be locked so memory cannot be taken by
/// another core which could result in this transaction being unable to
/// roll back in the case of an error.
pub fn configure_stage1(
&mut self,
pa_send_begin: paddr_t,
pa_send_end: paddr_t,
pa_recv_begin: paddr_t,
pa_recv_end: paddr_t,
hypervisor_ptable: &SpinLock<PageTable<Stage1>>,
local_page_pool: &MPool,
) -> Result<(), ()> {
let mut hypervisor_ptable = hypervisor_ptable.lock();
let mut ptable = guard(&mut hypervisor_ptable, |_| ());
// Map the send page as read-only in the hypervisor address space.
if ptable
.identity_map(pa_send_begin, pa_send_end, Mode::R, local_page_pool)
.is_err()
{
// TODO: partial defrag of failed range.
// Recover any memory consumed in failed mapping.
ptable.defrag(local_page_pool);
return Err(());
}
let mut ptable = guard(ptable, |mut ptable| {
ptable
.unmap(pa_send_begin, pa_send_end, local_page_pool)
.unwrap();
});
// Map the receive page as writable in the hypervisor address space. On
// failure, unmap the send page before returning.
if ptable
.identity_map(pa_recv_begin, pa_recv_end, Mode::W, local_page_pool)
.is_err()
{
// TODO: parital defrag of failed range.
// Recover any memory consumed in failed mapping.
ptable.defrag(local_page_pool);
return Err(());
}
// Forgetting `ptable` only skips dropping the nested `ScopeGuard`s.
// `hypervisor_ptable` will be safely dropped and the lock will be
// released.
mem::forget(ptable);
self.send = pa_addr(pa_send_begin) as usize as *const SpciMessage;
self.recv = pa_addr(pa_recv_begin) as usize as *mut SpciMessage;
Ok(())
}
pub fn get_send_ptr(&self) -> *const SpciMessage {
self.send
}
pub fn get_recv_ptr(&self) -> *mut SpciMessage {
self.recv
}
}
pub struct VmInner {
log_buffer: ArrayVec<c_char, LOG_BUFFER_SIZE>,
pub ptable: PageTable<Stage2>,
mailbox: Mailbox,
/// Wait entries to be used when waiting on other VM mailboxes.
wait_entries: [WaitEntry; MAX_VMS],
arch: ArchVm,
}
impl VmInner {
/// Initializes VmInner.
pub unsafe fn init(&mut self, vm: *mut Vm, ppool: &MPool) -> Result<(), ()> {
unsafe { self.mailbox.init() };
if unsafe { !mm_vm_init(&mut self.ptable, ppool) } {
return Err(());
}
// Initialise waiter entries.
for i in 0..MAX_VMS {
self.wait_entries[i].waiting_vm = vm;
unsafe { list_init(&mut self.wait_entries[i].wait_links) };
unsafe { list_init(&mut self.wait_entries[i].ready_links) };
}
Ok(())
}
/// Retrieves the next waiter and removes it from the wait list if the VM's
/// mailbox is in a writable state.
pub fn fetch_waiter(&mut self) -> *mut WaitEntry {
self.mailbox.fetch_waiter()
}
/// Checks if any waiters exists.
pub fn is_waiter_list_empty(&self) -> bool {
self.mailbox.is_waiter_list_empty()
}
/// Checks whether there exists a pending message. If one exists, marks the
/// mailbox read.
pub fn try_read(&mut self) -> Result<(), ()> {
self.mailbox.try_read()
}
/// Sets the arrived message is read.
pub fn set_read(&mut self) {
self.mailbox.set_read()
}
/// Sets a message is arrived.
pub fn set_received(&mut self) {
self.mailbox.set_received()
}
/// Configures the send and receive pages in the VM stage-2 and hypervisor
/// stage-1 page tables. Locking of the page tables combined with a local
/// memory pool ensures there will always be enough memory to recover from
/// any errors that arise.
#[inline]
fn configure_pages(
&mut self,
pa_send_begin: paddr_t,
pa_send_end: paddr_t,
orig_send_mode: Mode,
pa_recv_begin: paddr_t,
pa_recv_end: paddr_t,
orig_recv_mode: Mode,
hypervisor_ptable: &SpinLock<PageTable<Stage1>>,
fallback_mpool: &MPool,
) -> Result<(), ()> {
// Create a local pool so any freed memory can't be used by another
// thread. This is to ensure the original mapping can be restored if
// any stage of the process fails.
let local_page_pool: MPool = MPool::new_with_fallback(fallback_mpool);
let mut ptable = guard(&mut self.ptable, |_| ());
// Take memory ownership away from the VM and mark as shared.
ptable.identity_map(
pa_send_begin,
pa_send_end,
Mode::UNOWNED | Mode::SHARED | Mode::R | Mode::W,
&local_page_pool,
)?;
let mut ptable = guard(ptable, |mut ptable| {
ptable
.identity_map(pa_send_begin, pa_send_end, orig_send_mode, &local_page_pool)
.unwrap();
});
ptable
.identity_map(
pa_recv_begin,
pa_recv_end,
Mode::UNOWNED | Mode::SHARED | Mode::R,
&local_page_pool,
)
.map_err(|_| {
// TODO: partial defrag of failed range.
// Recover any memory consumed in failed mapping.
ptable.defrag(&local_page_pool);
})?;
let ptable = guard(ptable, |mut ptable| {
ptable
.identity_map(pa_recv_begin, pa_recv_end, orig_recv_mode, &local_page_pool)
.unwrap();
});
self.mailbox.configure_stage1(
pa_send_begin,
pa_send_end,
pa_recv_begin,
pa_recv_end,
hypervisor_ptable,
&local_page_pool,
)?;
mem::forget(ptable);
Ok(())
}
/// Configures the VM to send/receive data through the specified pages. The
/// pages must not be shared.
///
/// Returns:
/// - None on failure.
/// - Some(()) on success.
pub fn configure(
&mut self,
send: ipaddr_t,
recv: ipaddr_t,
hypervisor_ptable: &SpinLock<PageTable<Stage1>>,
fallback_mpool: &MPool,
) -> Result<(), ()> {
// Fail if addresses are not page-aligned.
if !is_aligned(ipa_addr(send), PAGE_SIZE) || !is_aligned(ipa_addr(recv), PAGE_SIZE) {
return Err(());
}
// Convert to physical addresses.
let pa_send_begin = pa_from_ipa(send);
let pa_send_end = pa_add(pa_send_begin, PAGE_SIZE);
let pa_recv_begin = pa_from_ipa(recv);
let pa_recv_end = pa_add(pa_recv_begin, PAGE_SIZE);
// Fail if the same page is used for the send and receive pages.
if pa_addr(pa_send_begin) == pa_addr(pa_recv_begin) {
return Err(());
}
// We only allow these to be setup once.
if self.is_configured() {
return Err(());
}
// Ensure the pages are valid, owned and exclusive to the VM and that
// the VM has the required access to the memory.
let orig_send_mode = self.ptable.get_mode(send, ipa_add(send, PAGE_SIZE))?;
if !(orig_send_mode.valid_owned_exclusive() && orig_send_mode.contains(Mode::R | Mode::W)) {
return Err(());
}
let orig_recv_mode = self.ptable.get_mode(recv, ipa_add(recv, PAGE_SIZE))?;
if !(orig_recv_mode.valid_owned_exclusive() && orig_recv_mode.contains(Mode::R)) {
return Err(());
}
self.configure_pages(
pa_send_begin,
pa_send_end,
orig_send_mode,
pa_recv_begin,
pa_recv_end,
orig_recv_mode,
hypervisor_ptable,
fallback_mpool,
)
}
/// Checks whether `configure` is called before.
pub fn is_configured(&self) -> bool {
!self.mailbox.send.is_null() && !self.mailbox.recv.is_null()
}
/// Checks whether mailbox is empty.
pub fn is_empty(&self) -> bool {
self.mailbox.state == MailboxState::Empty
}
pub fn dequeue_ready_list(&mut self) -> Option<spci_vm_id_t> {
unsafe {
if list_empty(&self.mailbox.ready_list) {
return None;
}
let list_entry = list_pop_front(&self.mailbox.ready_list);
let entry: *mut WaitEntry = container_of!(list_entry, WaitEntry, ready_links);
let ret = entry.offset_from(self.wait_entries.as_ptr()) as spci_vm_id_t;
Some(ret)
}
}
pub fn enqueue_ready_list(&mut self, entry: &mut WaitEntry) {
debug_assert!(unsafe { list_empty(&entry.ready_links) });
unsafe {
list_append(&mut self.mailbox.ready_list, &mut entry.ready_links);
}
}
pub fn get_state(&self) -> MailboxState {
self.mailbox.state
}
pub fn set_empty(&mut self) {
debug_assert_eq!(self.mailbox.state, MailboxState::Read);
self.mailbox.state = MailboxState::Empty;
}
/// Adds `self` into the waiter list of `target`, if `self` is not waiting
/// for another now. Returns false if `self` is waiting for another.
pub fn wait_for(&mut self, target: &mut Self, target_id: spci_vm_id_t) -> Result<(), ()> {
let entry = &mut self.wait_entries[target_id as usize];
// Append waiter only if it's not there yet.
if unsafe { !list_empty(&(*entry).wait_links) } {
return Err(());
}
unsafe {
list_append(&mut target.mailbox.waiter_list, &mut (*entry).wait_links);
}
Ok(())
}
pub fn get_send_ptr(&self) -> *const SpciMessage {
self.mailbox.get_send_ptr()
}
pub fn get_recv_ptr(&self) -> *mut SpciMessage {
self.mailbox.get_recv_ptr()
}
pub fn debug_log(&mut self, id: spci_vm_id_t, c: c_char) {
let flush = if c == b'\n' || c == b'\0' {
true
} else {
self.log_buffer.push(c);
self.log_buffer.is_full()
};
if flush {
let log = str::from_utf8(&self.log_buffer).unwrap_or("non-UTF8 bytes");
dlog!("VM {}: {}\n", id, log);
self.log_buffer.clear();
}
}
}
pub struct Vm {
pub id: spci_vm_id_t,
/// VCpus of this vm.
/// Note: This field is regarded as a kind of mutable states of Vm, but is
/// not contained in VmInner, because
/// 1. Mutable inner fields are contained in VCpuState.
/// 2. VCpuState has higher lock order than one of Vm. It is nonsense to
/// lock VmInner to acquire VCpuState.
pub vcpus: ArrayVec<VCpu, MAX_CPUS>,
/// See api.c for the partial ordering on locks.
pub inner: SpinLock<VmInner>,
pub aborting: AtomicBool,
}
impl Vm {
pub fn init(
&mut self,
id: spci_vm_id_t,
vcpu_count: spci_vcpu_count_t,
ppool: &MPool,
) -> Result<(), ()> {
self.id = id;
// self.vcpus = ArrayVec::new();
// TODO(HfO2): Using former one will allocate large temporal stack to
// store a ArrayVec<[VCpu; MAX_CPUS]>. Maybe MIR-only RLIBs or denoting
// ArrayVec::new as inline will solve this problem.
// (See https://github.com/rust-lang/rust/issues/38913)
unsafe {
self.vcpus = MaybeUninit::uninit().assume_init();
self.vcpus.set_len(0);
}
self.aborting = AtomicBool::new(false);
unsafe {
let self_ptr = self as *mut _;
self.inner.get_mut().init(self_ptr, ppool)?;
for _ in 0..vcpu_count {
// self.vcpus.push(VCpu::new(self_ptr));
// TODO(HfO2): Using former one will allocate large temporal
// stack to store a VCpu. Maybe MIR-only RLIBs or denoting
// ArrayVec::push as inline will solve this problem.
self.vcpus.push_unchecked(VCpu::new(self_ptr));
}
}
Ok(())
}
/// Returns the root address of the page table of this VM. It is safe not to
/// lock `self.inner` because the value of `ptable.as_raw()` doesn't change
/// after `ptable` is initialized. Of course, actual page table may vary
/// during running. That's why this function returns `paddr_t` rather than
/// `&[RawPageTable]`.
pub fn get_ptable_raw(&self) -> paddr_t {
unsafe { self.inner.get_unchecked().ptable.as_raw() }
}
pub fn debug_log(&self, c: c_char) {
self.inner.lock().debug_log(self.id, c)
}
}
pub struct VmManager {
vms: ArrayVec<Vm, MAX_VMS>,
}
impl VmManager {
pub fn new() -> Self {
Self {
vms: ArrayVec::new(),
}
}
pub fn new_vm(&mut self, vcpu_count: spci_vcpu_count_t, ppool: &MPool) -> Option<&mut Vm> {
if self.vms.is_full() {
return None;
}
let idx = self.vms.len();
// Generate IDs based on an offset, as low IDs e.g., 0, are reserved
let id = idx + HF_VM_ID_OFFSET as usize;
let vm = unsafe { self.vms.get_unchecked_mut(idx) };
vm.init(id as u16, vcpu_count, ppool).ok()?;
unsafe {
self.vms.set_len(idx + 1);
}
Some(&mut self.vms[idx])
}
fn get_vm_index(vm_id: spci_vm_id_t) -> usize {
assert!(vm_id >= HF_VM_ID_OFFSET);
(vm_id - HF_VM_ID_OFFSET) as _
}
pub fn get(&self, id: spci_vm_id_t) -> Option<&Vm> {
self.vms.get(Self::get_vm_index(id))
}
pub fn get_mut(&mut self, id: spci_vm_id_t) -> Option<&mut Vm> {
self.vms.get_mut(Self::get_vm_index(id))
}
pub fn get_primary(&self) -> &Vm {
// # Safety
//
// Primary VM always exists.
unsafe { self.vms.get_unchecked(Self::get_vm_index(HF_PRIMARY_VM_ID)) }
}
pub fn len(&self) -> spci_vm_count_t {
self.vms.len() as _
}
}
/// Get the vCPU with the given index from the given VM.
/// This assumes the index is valid, i.e. less than vm->vcpu_count.
#[no_mangle]
pub unsafe extern "C" fn vm_get_vcpu(vm: *const Vm, vcpu_index: spci_vcpu_index_t) -> *const VCpu {
let vm = unsafe { &*vm };
assert!((vcpu_index as usize) < vm.vcpus.len());
&vm.vcpus[vcpu_index as usize]
}
#[no_mangle]
pub unsafe extern "C" fn vm_get_id(vm: *const Vm) -> spci_vm_id_t {
unsafe { (*vm).id }
}
#[no_mangle]
pub unsafe extern "C" fn vm_get_arch(vm: *const Vm) -> *mut ArchVm {
unsafe { &mut (*vm).inner.get_mut_unchecked().arch }
}
#[no_mangle]
pub unsafe extern "C" fn vm_get_vcpu_count(vm: *const Vm) -> spci_vcpu_count_t {
unsafe { (*vm).vcpus.len() as _ }
}
| {
if self.state != MailboxState::Empty
|| self.recv.is_null()
|| unsafe { list_empty(&self.waiter_list) }
{
// The mailbox is not writable or there are no waiters.
return ptr::null_mut();
}
// Remove waiter from the wait list.
container_of!(
unsafe { list_pop_front(&self.waiter_list) },
WaitEntry,
wait_links
)
} |
apps.py | from django.apps import AppConfig
class ContactConfig(AppConfig):
| name = 'contacts'
def ready(self):
import contacts.signals |
|
typed_radix_test.go | // Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gps
import "testing"
// basically a regression test
func TestPathPrefixOrEqual(t *testing.T) {
if !isPathPrefixOrEqual("foo", "foo") {
t.Error("Same path should return true")
}
| if isPathPrefixOrEqual("foo", "fooer") {
t.Error("foo is not a path-type prefix of fooer")
}
if !isPathPrefixOrEqual("foo", "foo/bar") {
t.Error("foo is a path prefix of foo/bar")
}
if isPathPrefixOrEqual("foo", "foo/") {
t.Error("special case - foo is not a path prefix of foo/")
}
} | |
vpn_connection_route.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = ['VpnConnectionRoute']
class VpnConnectionRoute(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides a static route between a VPN connection and a customer gateway.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
vpc = aws.ec2.Vpc("vpc", cidr_block="10.0.0.0/16")
vpn_gateway = aws.ec2.VpnGateway("vpnGateway", vpc_id=vpc.id)
customer_gateway = aws.ec2.CustomerGateway("customerGateway",
bgp_asn="65000",
ip_address="172.0.0.1",
type="ipsec.1")
main = aws.ec2.VpnConnection("main",
vpn_gateway_id=vpn_gateway.id,
customer_gateway_id=customer_gateway.id,
type="ipsec.1",
static_routes_only=True)
office = aws.ec2.VpnConnectionRoute("office",
destination_cidr_block="192.168.10.0/24",
vpn_connection_id=main.id)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_cidr_block: The CIDR block associated with the local subnet of the customer network.
:param pulumi.Input[str] vpn_connection_id: The ID of the VPN connection.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
|
__props__ = dict()
if destination_cidr_block is None:
raise TypeError("Missing required property 'destination_cidr_block'")
__props__['destination_cidr_block'] = destination_cidr_block
if vpn_connection_id is None:
raise TypeError("Missing required property 'vpn_connection_id'")
__props__['vpn_connection_id'] = vpn_connection_id
super(VpnConnectionRoute, __self__).__init__(
'aws:ec2/vpnConnectionRoute:VpnConnectionRoute',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
destination_cidr_block: Optional[pulumi.Input[str]] = None,
vpn_connection_id: Optional[pulumi.Input[str]] = None) -> 'VpnConnectionRoute':
"""
Get an existing VpnConnectionRoute resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] destination_cidr_block: The CIDR block associated with the local subnet of the customer network.
:param pulumi.Input[str] vpn_connection_id: The ID of the VPN connection.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["destination_cidr_block"] = destination_cidr_block
__props__["vpn_connection_id"] = vpn_connection_id
return VpnConnectionRoute(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationCidrBlock")
def destination_cidr_block(self) -> pulumi.Output[str]:
"""
The CIDR block associated with the local subnet of the customer network.
"""
return pulumi.get(self, "destination_cidr_block")
@property
@pulumi.getter(name="vpnConnectionId")
def vpn_connection_id(self) -> pulumi.Output[str]:
"""
The ID of the VPN connection.
"""
return pulumi.get(self, "vpn_connection_id")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') |
duration_test.go | package opts
import (
"testing"
"time" | "github.com/stretchr/testify/assert"
)
func TestDurationOptString(t *testing.T) {
dur := time.Duration(300 * 10e8)
duration := DurationOpt{value: &dur}
assert.Equal(t, "5m0s", duration.String())
}
func TestDurationOptSetAndValue(t *testing.T) {
var duration DurationOpt
assert.NoError(t, duration.Set("300s"))
assert.Equal(t, time.Duration(300*10e8), *duration.Value())
assert.NoError(t, duration.Set("-300s"))
assert.Equal(t, time.Duration(-300*10e8), *duration.Value())
}
func TestPositiveDurationOptSetAndValue(t *testing.T) {
var duration PositiveDurationOpt
assert.NoError(t, duration.Set("300s"))
assert.Equal(t, time.Duration(300*10e8), *duration.Value())
assert.EqualError(t, duration.Set("-300s"), "duration cannot be negative")
} | |
vue.config_20201126195440.js | module.exports = {
// 可以在此配置别名
configureWebpack:{
resolve:{
alias:{
// 比如
'assets':'@/assets', | 'common':'@/common',
}
}
}
} | 'common':'@/common',
'components':'@/components',
'common':'@/common', |
status.go | // Copyright Contributors to the Open Cluster Management project
package status
import (
"context"
"errors"
"os"
"time"
"github.com/go-kit/kit/log"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/stolostron/metrics-collector/pkg/logger"
"github.com/open-cluster-management/multicluster-monitoring-operator/pkg/apis"
oav1beta1 "github.com/open-cluster-management/multicluster-monitoring-operator/pkg/apis/observability/v1beta1"
)
const (
name = "observability-addon"
namespace = "open-cluster-management-addon-observability"
)
type StatusReport struct {
statusClient client.Client
logger log.Logger
}
func New(logger log.Logger) (*StatusReport, error) |
func (s *StatusReport) UpdateStatus(t string, r string, m string) error {
if s.statusClient == nil {
return nil
}
addon := &oav1beta1.ObservabilityAddon{}
err := s.statusClient.Get(context.TODO(), types.NamespacedName{
Name: name,
Namespace: namespace,
}, addon)
if err != nil {
logger.Log(s.logger, logger.Error, "err", err)
return err
}
update := false
found := false
conditions := []oav1beta1.StatusCondition{}
lastestC := oav1beta1.StatusCondition{}
for _, c := range addon.Status.Conditions {
if c.Status == metav1.ConditionTrue {
if c.Type != t {
c.Status = metav1.ConditionFalse
} else {
found = true
if c.Reason != r || c.Message != m {
c.Reason = r
c.Message = m
c.LastTransitionTime = metav1.NewTime(time.Now())
update = true
lastestC = c
continue
}
}
} else {
if c.Type == t {
found = true
c.Status = metav1.ConditionTrue
c.Reason = r
c.Message = m
c.LastTransitionTime = metav1.NewTime(time.Now())
update = true
lastestC = c
continue
}
}
conditions = append(conditions, c)
}
if update {
conditions = append(conditions, lastestC)
}
if !found {
conditions = append(conditions, oav1beta1.StatusCondition{
Type: t,
Status: metav1.ConditionTrue,
Reason: r,
Message: m,
LastTransitionTime: metav1.NewTime(time.Now()),
})
update = true
}
if update {
addon.Status.Conditions = conditions
err = s.statusClient.Status().Update(context.TODO(), addon)
if err != nil {
logger.Log(s.logger, logger.Error, "err", err)
}
return err
}
return nil
}
| {
testMode := os.Getenv("UNIT_TEST") != ""
standaloneMode := os.Getenv("STANDALONE") == "true"
var kubeClient client.Client
if testMode {
kubeClient = fake.NewFakeClient()
} else if standaloneMode {
kubeClient = nil
} else {
config, err := clientcmd.BuildConfigFromFlags("", "")
if err != nil {
return nil, errors.New("Failed to create the kube config")
}
s := scheme.Scheme
if err := apis.AddToScheme(s); err != nil {
return nil, errors.New("Failed to add observabilityaddon into scheme")
}
kubeClient, err = client.New(config, client.Options{Scheme: s})
if err != nil {
return nil, errors.New("Failed to create the kube client")
}
}
return &StatusReport{
statusClient: kubeClient,
logger: log.With(logger, "component", "statusclient"),
}, nil
} |
coding.go | package merkledag
import (
"fmt"
"sort"
"strings"
"gx/ipfs/Qmej7nf81hi2x2tvjRBF3mcp74sQyuDH4VMYDGd1YtXjb2/go-block-format"
pb "github.com/ipfs/go-ipfs/merkledag/pb"
cid "gx/ipfs/QmcZfnkapfECQGcLZaf9B79NRg7cRa9EnZh4LSbkCzwNvY/go-cid"
ipld "gx/ipfs/Qme5bWv7wtjUNGsK2BNGVUFPKiuxWrsqrtvYwCLRw8YFES/go-ipld-format"
)
// for now, we use a PBNode intermediate thing.
// because native go objects are nice.
// unmarshal decodes raw data into a *Node instance.
// The conversion uses an intermediate PBNode.
func (n *ProtoNode) unmarshal(encoded []byte) error {
var pbn pb.PBNode
if err := pbn.Unmarshal(encoded); err != nil {
return fmt.Errorf("unmarshal failed. %v", err)
}
pbnl := pbn.GetLinks()
n.links = make([]*ipld.Link, len(pbnl))
for i, l := range pbnl {
n.links[i] = &ipld.Link{Name: l.GetName(), Size: l.GetTsize()}
c, err := cid.Cast(l.GetHash())
if err != nil {
return fmt.Errorf("link hash #%d is not valid multihash. %v", i, err)
}
n.links[i].Cid = c
}
sort.Stable(LinkSlice(n.links)) // keep links sorted
n.data = pbn.GetData()
n.encoded = encoded
return nil
}
// Marshal encodes a *Node instance into a new byte slice.
// The conversion uses an intermediate PBNode.
func (n *ProtoNode) Marshal() ([]byte, error) {
pbn := n.getPBNode()
data, err := pbn.Marshal()
if err != nil {
return data, fmt.Errorf("marshal failed. %v", err)
}
return data, nil
}
func (n *ProtoNode) getPBNode() *pb.PBNode {
pbn := &pb.PBNode{}
if len(n.links) > 0 {
pbn.Links = make([]*pb.PBLink, len(n.links))
}
sort.Stable(LinkSlice(n.links)) // keep links sorted
for i, l := range n.links {
pbn.Links[i] = &pb.PBLink{}
pbn.Links[i].Name = &l.Name
pbn.Links[i].Tsize = &l.Size
if l.Cid != nil {
pbn.Links[i].Hash = l.Cid.Bytes()
}
}
if len(n.data) > 0 {
pbn.Data = n.data
}
return pbn
}
// EncodeProtobuf returns the encoded raw data version of a Node instance.
// It may use a cached encoded version, unless the force flag is given.
func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) {
sort.Stable(LinkSlice(n.links)) // keep links sorted
if n.encoded == nil || force {
n.cached = nil
var err error
n.encoded, err = n.Marshal()
if err != nil {
return nil, err
}
}
if n.cached == nil {
if n.Prefix.Codec == 0 { // unset
n.Prefix = v0CidPrefix
}
c, err := n.Prefix.Sum(n.encoded)
if err != nil { | return nil, err
}
n.cached = c
}
return n.encoded, nil
}
// DecodeProtobuf decodes raw data and returns a new Node instance.
func DecodeProtobuf(encoded []byte) (*ProtoNode, error) {
n := new(ProtoNode)
err := n.unmarshal(encoded)
if err != nil {
return nil, fmt.Errorf("incorrectly formatted merkledag node: %s", err)
}
return n, nil
}
// DecodeProtobufBlock is a block decoder for protobuf IPLD nodes conforming to
// node.DecodeBlockFunc
func DecodeProtobufBlock(b blocks.Block) (ipld.Node, error) {
c := b.Cid()
if c.Type() != cid.DagProtobuf {
return nil, fmt.Errorf("this function can only decode protobuf nodes")
}
decnd, err := DecodeProtobuf(b.RawData())
if err != nil {
if strings.Contains(err.Error(), "Unmarshal failed") {
return nil, fmt.Errorf("the block referred to by '%s' was not a valid merkledag node", c)
}
return nil, fmt.Errorf("failed to decode Protocol Buffers: %v", err)
}
decnd.cached = c
decnd.Prefix = c.Prefix()
return decnd, nil
}
// Type assertion
var _ ipld.DecodeBlockFunc = DecodeProtobufBlock | |
KeyHandler.js | function KeyHandler()
{
}
Class.create( KeyHandler,
{
init: function(database)
{
this.key = database.key;
this.keySeparator = database.keySeparator;
this.database = database;
},
getKey: function(model, quietly)
{
var field = this.key;
var modelKey = this.buildKey( model, field );
if ( hasFields( model, field, isValue ) )
{
return modelKey;
}
else if ( !quietly )
{
throw 'Composite key not supplied.';
}
return null;
},
buildKeyFromRelations: function(input)
{
if ( isObject( input ) )
{
var relations = this.database.relations; |
for (var relationName in relations)
{
if ( relationName in input )
{
relations[ relationName ].buildKey( input );
}
}
}
},
buildKeyFromInput: function(input)
{
if ( input instanceof this.database.Model )
{
return input.$key();
}
else if ( isArray( input ) ) // && isArray( this.key )
{
return input.join( this.keySeparator );
}
else if ( isObject( input ) )
{
return this.buildKey( input );
}
return input;
}
}); | |
yaml.rs | use std::collections::BTreeMap;
use std::f64;
use std::i64;
use std::mem;
use std::ops::Index;
use std::string;
use std::vec;
use linked_hash_map::LinkedHashMap;
use crate::parser::*;
use crate::scanner::Marker;
use crate::scanner::ScanError;
use crate::scanner::TScalarStyle;
use crate::scanner::TokenType;
/// A YAML node is stored as this `Yaml` enumeration, which provides an easy way to
/// access your YAML document.
///
/// # Examples
///
/// ```
/// use yaml_rust::Yaml;
/// let foo = Yaml::from_str("-123"); // convert the string to the appropriate YAML type
/// assert_eq!(foo.as_i64().unwrap(), -123);
///
/// // iterate over an Array
/// let vec = Yaml::Array(vec![Yaml::Integer(1), Yaml::Integer(2)]);
/// for v in vec.as_vec().unwrap() {
/// assert!(v.as_i64().is_some());
/// }
/// ```
#[derive(Clone, PartialEq, PartialOrd, Debug, Eq, Ord, Hash)]
pub enum Yaml {
/// Float types are stored as String and parsed on demand.
/// Note that f64 does NOT implement Eq trait and can NOT be stored in BTreeMap.
Real(string::String, Option<TokenType>),
/// YAML int is stored as i64.
Integer(i64, Option<TokenType>),
/// YAML scalar.
String(string::String, Option<TokenType>),
/// YAML bool, e.g. `true` or `false`.
Boolean(bool, Option<TokenType>),
/// YAML array, can be accessed as a `Vec`.
Array(self::Array, Option<TokenType>),
/// YAML hash, can be accessed as a `LinkedHashMap`.
///
/// Insertion order will match the order of insertion into the map.
Hash(self::Hash, Option<TokenType>),
/// Alias, not fully supported yet.
Alias(usize),
/// YAML null, e.g. `null` or `~`.
Null,
/// Accessing a nonexistent node via the Index trait returns `BadValue`. This
/// simplifies error handling in the calling code. Invalid type conversion also
/// returns `BadValue`.
BadValue,
}
pub type Array = Vec<Yaml>;
pub type Hash = LinkedHashMap<Yaml, Yaml>;
// parse f64 as Core schema
// See: https://github.com/chyh1990/yaml-rust/issues/51
fn parse_f64(v: &str) -> Option<f64> {
match v {
".inf" | ".Inf" | ".INF" | "+.inf" | "+.Inf" | "+.INF" => Some(f64::INFINITY),
"-.inf" | "-.Inf" | "-.INF" => Some(f64::NEG_INFINITY),
".nan" | "NaN" | ".NAN" => Some(f64::NAN),
_ => v.parse::<f64>().ok(),
}
}
pub struct YamlLoader {
docs: Vec<Yaml>,
// states
// (current node, anchor_id) tuple
doc_stack: Vec<(Yaml, usize)>,
key_stack: Vec<Yaml>,
anchor_map: BTreeMap<usize, Yaml>,
}
impl MarkedEventReceiver for YamlLoader {
fn on_event(&mut self, ev: Event, _: Marker) {
// println!("EV {:?}", ev);
match ev {
Event::DocumentStart => {
// do nothing
}
Event::DocumentEnd => {
match self.doc_stack.len() {
// empty document
0 => self.docs.push(Yaml::BadValue),
1 => self.docs.push(self.doc_stack.pop().unwrap().0),
_ => unreachable!(),
}
}
Event::SequenceStart(aid, tag) => {
self.doc_stack.push((Yaml::Array(Vec::new(), tag), aid));
}
Event::SequenceEnd => {
let node = self.doc_stack.pop().unwrap();
self.insert_new_node(node);
}
Event::MappingStart(aid, tag) => {
self.doc_stack.push((Yaml::Hash(Hash::new(), tag), aid));
self.key_stack.push(Yaml::BadValue);
}
Event::MappingEnd => {
self.key_stack.pop().unwrap();
let node = self.doc_stack.pop().unwrap();
self.insert_new_node(node);
}
Event::Scalar(v, style, aid, tag) => {
let node = if style != TScalarStyle::Plain {
Yaml::String(v, tag)
} else if let Some(TokenType::Tag(ref handle, ref suffix)) = tag {
// XXX tag:yaml.org,2002:
if handle == "!!" {
match suffix.as_ref() {
"bool" => {
// "true" or "false"
match v.parse::<bool>() {
Err(_) => Yaml::BadValue,
Ok(v) => Yaml::Boolean(v, tag),
}
}
"int" => match v.parse::<i64>() {
Err(_) => Yaml::BadValue,
Ok(v) => Yaml::Integer(v, tag),
},
"float" => match parse_f64(&v) {
Some(_) => Yaml::Real(v, tag),
None => Yaml::BadValue,
},
"null" => match v.as_ref() {
"~" | "null" => Yaml::Null,
_ => Yaml::BadValue,
},
_ => Yaml::String(v, tag),
}
} else {
Yaml::String(v, tag)
}
} else {
// Datatype is not specified, or unrecognized
Yaml::from_str(&v, tag)
};
self.insert_new_node((node, aid));
}
Event::Alias(id) => {
let n = match self.anchor_map.get(&id) {
Some(v) => v.clone(),
None => Yaml::BadValue,
};
self.insert_new_node((n, 0));
}
_ => { /* ignore */ }
}
// println!("DOC {:?}", self.doc_stack);
}
}
impl YamlLoader {
fn insert_new_node(&mut self, node: (Yaml, usize)) {
// valid anchor id starts from 1
if node.1 > 0 {
self.anchor_map.insert(node.1, node.0.clone());
}
if self.doc_stack.is_empty() {
self.doc_stack.push(node);
} else {
let parent = self.doc_stack.last_mut().unwrap();
match *parent {
(Yaml::Array(ref mut v, _), _) => v.push(node.0),
(Yaml::Hash(ref mut h, _), _) => {
let cur_key = self.key_stack.last_mut().unwrap();
// current node is a key
if cur_key.is_badvalue() {
*cur_key = node.0;
// current node is a value
} else {
let mut newkey = Yaml::BadValue;
mem::swap(&mut newkey, cur_key);
h.insert(newkey, node.0);
}
}
_ => unreachable!(),
}
}
}
pub fn load_from_str(source: &str) -> Result<Vec<Yaml>, ScanError> {
let mut loader = YamlLoader {
docs: Vec::new(),
doc_stack: Vec::new(),
key_stack: Vec::new(),
anchor_map: BTreeMap::new(),
};
let mut parser = Parser::new(source.chars());
parser.load(&mut loader, true)?;
Ok(loader.docs)
}
}
macro_rules! define_as (
($name:ident, $t:ident, $yt:ident) => (
pub fn $name(&self) -> Option<$t> {
match *self {
Yaml::$yt(v,_) => Some(v),
_ => None
}
}
);
);
macro_rules! define_as_ref (
($name:ident, $t:ty, $yt:ident) => (
pub fn $name(&self) -> Option<$t> {
match *self {
Yaml::$yt(ref v,_) => Some(v),
_ => None
}
}
);
);
macro_rules! define_into (
($name:ident, $t:ty, $yt:ident) => (
pub fn $name(self) -> Option<$t> {
match self {
Yaml::$yt(v,_) => Some(v),
_ => None
}
}
);
);
impl Yaml {
define_as!(as_bool, bool, Boolean);
define_as!(as_i64, i64, Integer);
define_as_ref!(as_str, &str, String);
define_as_ref!(as_hash, &Hash, Hash);
define_as_ref!(as_vec, &Array, Array);
define_into!(into_bool, bool, Boolean);
define_into!(into_i64, i64, Integer);
define_into!(into_string, String, String);
define_into!(into_hash, Hash, Hash);
define_into!(into_vec, Array, Array);
pub fn is_null(&self) -> bool {
match *self {
Yaml::Null => true,
_ => false,
}
}
pub fn is_badvalue(&self) -> bool {
match *self {
Yaml::BadValue => true,
_ => false,
}
}
pub fn is_array(&self) -> bool {
match *self {
Yaml::Array(..) => true,
_ => false,
}
}
pub fn as_f64(&self) -> Option<f64> {
match *self {
Yaml::Real(ref v, _) => parse_f64(v),
_ => None,
}
}
pub fn into_f64(self) -> Option<f64> {
match self {
Yaml::Real(ref v, _) => parse_f64(v),
_ => None,
}
}
pub fn get_tag(&self) -> Option<&str> {
match self {
Yaml::Real(_, tag)
| Yaml::Integer(_, tag)
| Yaml::String(_, tag)
| Yaml::Boolean(_, tag)
| Yaml::Array(_, tag)
| Yaml::Hash(_, tag) => if let Some(TokenType::Tag(_,b)) = tag{Some(b)} else {None}
,
_ => None,
}
}
}
#[cfg_attr(feature = "cargo-clippy", allow(should_implement_trait))]
impl Yaml {
// Not implementing FromStr because there is no possibility of Error.
// This function falls back to Yaml::String if nothing else matches.
pub fn from_str(v: &str, tag: Option<TokenType>) -> Yaml {
if v.starts_with("0x") {
if let Ok(i) = i64::from_str_radix(&v[2..], 16) {
return Yaml::Integer(i, tag);
}
}
if v.starts_with("0o") {
if let Ok(i) = i64::from_str_radix(&v[2..], 8) {
return Yaml::Integer(i, tag);
}
}
if v.starts_with('+') {
if let Ok(i) = v[1..].parse::<i64>() {
return Yaml::Integer(i, tag);
}
}
match v {
"~" | "null" => Yaml::Null,
"true" => Yaml::Boolean(true, tag),
"false" => Yaml::Boolean(false, tag),
_ if v.parse::<i64>().is_ok() => Yaml::Integer(v.parse::<i64>().unwrap(), tag),
// try parsing as f64
_ if parse_f64(v).is_some() => Yaml::Real(v.to_owned(), tag),
_ => Yaml::String(v.to_owned(), tag),
}
}
}
static BAD_VALUE: Yaml = Yaml::BadValue;
impl<'a> Index<&'a str> for Yaml {
type Output = Yaml;
fn index(&self, idx: &'a str) -> &Yaml {
let key = Yaml::String(idx.to_owned(), None);
match self.as_hash() {
Some(h) => h.get(&key).unwrap_or(&BAD_VALUE),
None => &BAD_VALUE,
}
}
}
impl Index<usize> for Yaml {
type Output = Yaml;
fn index(&self, idx: usize) -> &Yaml {
if let Some(v) = self.as_vec() {
v.get(idx).unwrap_or(&BAD_VALUE)
} else if let Some(v) = self.as_hash() {
let key = Yaml::Integer(idx as i64, None);
v.get(&key).unwrap_or(&BAD_VALUE)
} else {
&BAD_VALUE
}
}
}
impl IntoIterator for Yaml {
type Item = Yaml;
type IntoIter = YamlIter;
fn into_iter(self) -> Self::IntoIter {
YamlIter {
yaml: self.into_vec().unwrap_or_else(Vec::new).into_iter(),
}
}
}
pub struct YamlIter {
yaml: vec::IntoIter<Yaml>,
}
impl Iterator for YamlIter {
type Item = Yaml;
fn next(&mut self) -> Option<Yaml> {
self.yaml.next()
}
}
#[cfg(test)]
mod test {
use std::f64;
use crate::yaml::*;
#[test]
fn test_coerce() {
let s = "---
a: 1
b: 2.2
c: [1, 2]
";
let out = YamlLoader::load_from_str(&s).unwrap();
let doc = &out[0];
assert_eq!(doc["a"].as_i64().unwrap(), 1i64);
assert_eq!(doc["b"].as_f64().unwrap(), 2.2f64);
assert_eq!(doc["c"][1].as_i64().unwrap(), 2i64);
assert!(doc["d"][0].is_badvalue());
}
#[test]
fn test_empty_doc() {
let s: String = "".to_owned();
YamlLoader::load_from_str(&s).unwrap();
let s: String = "---".to_owned();
assert_eq!(YamlLoader::load_from_str(&s).unwrap()[0], Yaml::Null);
}
#[test]
fn test_parser() {
let s: String = "
# comment
a0 bb: val
a1:
b1: 4
b2: d
a2: 4 # i'm comment
a3: [1, 2, 3]
a4:
- - a1
- a2
- 2
a5: 'single_quoted'
a6: \"double_quoted\"
a7: 你好
"
.to_owned();
let out = YamlLoader::load_from_str(&s).unwrap();
let doc = &out[0];
assert_eq!(doc["a7"].as_str().unwrap(), "你好");
}
#[test]
fn test_multi_doc() {
let s = "
'a scalar'
---
'a scalar'
---
'a scalar'
";
let out = YamlLoader::load_from_str(&s).unwrap();
assert_eq!(out.len(), 3);
}
#[test]
fn test_anchor() {
let s = "
a1: &DEFAULT
b1: 4
b2: d
a2: *DEFAULT
";
let out = YamlLoader::load_from_str(&s).unwrap();
let doc = &out[0];
assert_eq!(doc["a2"]["b1"].as_i64().unwrap(), 4);
}
#[test]
fn test_bad_anchor() {
let s = "
a1: &DEFAULT
b1: 4
b2: *DEFAULT
";
let out = YamlLoader::load_from_str(&s).unwrap();
let doc = &out[0];
assert_eq!(doc["a1"]["b2"], Yaml::BadValue);
}
#[test]
fn test_github_27() {
// https://github.com/chyh1990/yaml-rust/issues/27
let s = "&a";
let out = YamlLoader::load_from_str(&s).unwrap();
let doc = &out[0];
assert_eq!(doc.as_str().unwrap(), "");
}
#[test]
fn test_plain_datatype() {
let s = "
- 'string'
- \"string\"
- string
- 123
- -321
- 1.23
- -1e4
- ~
- null
- true
- false
- !!str 0
- !!int 100
- !!float 2
- !!null ~
- !!bool true
- !!bool false
- 0xFF
# bad values
- !!int string
- !!float string
- !!bool null
- !!null val
- 0o77
- [ 0xF, 0xF ]
- +12345
- [ true, false ]
";
let out = YamlLoader::load_from_str(&s).unwrap();
let doc = &out[0];
assert_eq!(doc[0].as_str().unwrap(), "string");
assert_eq!(doc[1].as_str().unwrap(), "string");
assert_eq!(doc[2].as_str().unwrap(), "string");
assert_eq!(doc[3].as_i64().unwrap(), 123);
assert_eq!(doc[4].as_i64().unwrap(), -321);
assert_eq!(doc[5].as_f64().unwrap(), 1.23);
assert_eq!(doc[6].as_f64().unwrap(), -1e4);
assert!(doc[7].is_null());
assert!(doc[8].is_null());
assert_eq!(doc[9].as_bool().unwrap(), true);
assert_eq!(doc[10].as_bool().unwrap(), false);
assert_eq!(doc[11].as_str().unwrap(), "0");
assert_eq!(doc[12].as_i64().unwrap(), 100);
assert_eq!(doc[13].as_f64().unwrap(), 2.0);
assert!(doc[14].is_null());
assert_eq!(doc[15].as_bool().unwrap(), true);
assert_eq!(doc[16].as_bool().unwrap(), false);
assert_eq!(doc[17].as_i64().unwrap(), 255);
assert!(doc[18].is_badvalue());
assert!(doc[19].is_badvalue());
assert!(doc[20].is_badvalue());
assert!(doc[21].is_badvalue());
assert_eq!(doc[22].as_i64().unwrap(), 63);
assert_eq!(doc[23][0].as_i64().unwrap(), 15);
assert_eq!(doc[23][1].as_i64().unwrap(), 15);
assert_eq!(doc[24].as_i64().unwrap(), 12345);
assert!(doc[25][0].as_bool().unwrap());
assert!(!doc[25][1].as_bool().unwrap());
}
#[test]
fn test_bad_hyphen() {
// See: https://github.com/chyh1990/yaml-rust/issues/23
let s = "{-";
assert!(YamlLoader::load_from_str(&s).is_err());
}
#[test]
fn test_issue_65() {
// See: https://github.com/chyh1990/yaml-rust/issues/65
let b = "\n\"ll\\\"ll\\\r\n\"ll\\\"ll\\\r\r\r\rU\r\r\rU";
assert!(YamlLoader::load_from_str(&b).is_err());
}
#[test]
fn test_bad_docstart() {
assert!(YamlLoader::load_from_str("---This used to cause an infinite loop").is_ok());
assert_eq!(
YamlLoader::load_from_str("----"),
Ok(vec![Yaml::String(String::from("----"), None)])
);
assert_eq!(
YamlLoader::load_from_str("--- #here goes a comment"),
Ok(vec![Yaml::Null])
);
assert_eq!(
YamlLoader::load_from_str("---- #here goes a comment"),
Ok(vec![Yaml::String(String::from("----"), None)])
);
}
#[test]
fn test_plain_datatype_with_into_methods() {
let s = "
- 'string'
- \"string\"
- string
- 123
- -321
- 1.23
- -1e4
- true
- false
- !!str 0
- !!int 100
- !!float 2
- !!bool true
- !!bool false
- 0xFF
- 0o77
- +12345
- -.INF
- .NAN
- !!float .INF
";
let mut out = YamlLoader::load_from_str(&s).unwrap().into_iter();
let mut doc = out.next().unwrap().into_iter();
assert_eq!(doc.next().unwrap().into_string().unwrap(), "string");
assert_eq!(doc.next().unwrap().into_string().unwrap(), "string");
assert_eq!(doc.next().unwrap().into_string().unwrap(), "string");
assert_eq!(doc.next().unwrap().into_i64().unwrap(), 123);
assert_eq!(doc.next().unwrap().into_i64().unwrap(), -321);
assert_eq!(doc.next().unwrap().into_f64().unwrap(), 1.23);
assert_eq!(doc.next().unwrap().into_f64().unwrap(), -1e4);
assert_eq!(doc.next().unwrap().into_bool().unwrap(), true);
assert_eq!(doc.next().unwrap().into_bool().unwrap(), false);
assert_eq!(doc.next().unwrap().into_string().unwrap(), "0");
assert_eq!(doc.next().unwrap().into_i64().unwrap(), 100);
assert_eq!(doc.next().unwrap().into_f64().unwrap(), 2.0);
assert_eq!(doc.next().unwrap().into_bool().unwrap(), true);
assert_eq!(doc.next().unwrap().into_bool().unwrap(), false);
assert_eq!(doc.next().unwrap().into_i64().unwrap(), 255);
assert_eq!(doc.next().unwrap().into_i64().unwrap(), 63);
assert_eq!(doc.next().unwrap().into_i64().unwrap(), 12345);
assert_eq!(doc.next().unwrap().into_f64().unwrap(), f64::NEG_INFINITY);
assert!(doc.next().unwrap().into_f64().is_some());
assert_eq!(doc.next().unwrap().into_f64().unwrap(), f64::INFINITY);
}
#[test]
fn test_hash_order() {
let s = "---
b: ~
a: ~
c: ~
";
let out = YamlLoader::load_from_str(&s).unwrap();
let first = out.into_iter().next().unwrap();
let mut iter = first.into_hash().unwrap().into_iter();
assert_eq!(
Some((Yaml::String("b".to_owned(), None), Yaml::Null)),
iter.next()
);
assert_eq!(
Some((Yaml::String("a".to_owned(), None), Yaml::Null)),
iter.next()
);
assert_eq!(
Some((Yaml::String("c".to_owned(), None), Yaml::Null)),
iter.next()
);
assert_eq!(None, iter.next());
}
#[test]
fn test_integer_key() {
let s = "
0:
important: true
1:
important: false
";
let out = YamlLoader::load_from_str(&s).unwrap();
let first = out.into_iter().next().unwrap();
assert_eq!(first[0]["important"].as_bool().unwrap(), true);
}
#[test]
fn test_ind | let four_spaces = YamlLoader::load_from_str(
r#"
hash:
with:
indentations
"#,
)
.unwrap()
.into_iter()
.next()
.unwrap();
let two_spaces = YamlLoader::load_from_str(
r#"
hash:
with:
indentations
"#,
)
.unwrap()
.into_iter()
.next()
.unwrap();
let one_space = YamlLoader::load_from_str(
r#"
hash:
with:
indentations
"#,
)
.unwrap()
.into_iter()
.next()
.unwrap();
let mixed_spaces = YamlLoader::load_from_str(
r#"
hash:
with:
indentations
"#,
)
.unwrap()
.into_iter()
.next()
.unwrap();
assert_eq!(four_spaces, two_spaces);
assert_eq!(two_spaces, one_space);
assert_eq!(four_spaces, mixed_spaces);
}
#[test]
fn test_two_space_indentations() {
// https://github.com/kbknapp/clap-rs/issues/965
let s = r#"
subcommands:
- server:
about: server related commands
subcommands2:
- server:
about: server related commands
subcommands3:
- server:
about: server related commands
"#;
let out = YamlLoader::load_from_str(&s).unwrap();
let doc = &out.into_iter().next().unwrap();
println!("{:#?}", doc);
assert_eq!(doc["subcommands"][0]["server"], Yaml::Null);
assert!(doc["subcommands2"][0]["server"].as_hash().is_some());
assert!(doc["subcommands3"][0]["server"].as_hash().is_some());
}
#[test]
fn test_recursion_depth_check_objects() {
let s = "{a:".repeat(10_000) + &"}".repeat(10_000);
assert!(YamlLoader::load_from_str(&s).is_err());
}
#[test]
fn test_recursion_depth_check_arrays() {
let s = "[".repeat(10_000) + &"]".repeat(10_000);
assert!(YamlLoader::load_from_str(&s).is_err());
}
}
| entation_equality() {
|
main.go | package main
import (
"fmt"
"os"
"path"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/tendermint/tendermint/libs/cli"
"github.com/cosmos/cosmos-sdk/client"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/client/lcd"
"github.com/cosmos/cosmos-sdk/client/rpc"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/version"
"github.com/cosmos/cosmos-sdk/x/auth"
authcmd "github.com/cosmos/cosmos-sdk/x/auth/client/cli"
authrest "github.com/cosmos/cosmos-sdk/x/auth/client/rest"
"github.com/cosmos/cosmos-sdk/x/bank"
bankcmd "github.com/cosmos/cosmos-sdk/x/bank/client/cli"
"github.com/kava-labs/kava/app"
"github.com/kava-labs/kava/migrate/rest_v0_3"
)
func main() {
// Configure cobra to sort commands
cobra.EnableCommandSorting = false
// Instantiate the codec for the command line application
cdc := app.MakeCodec()
// Set the global config
// config is not sealed as the cli supports two coin types for legacy reasons.
config := sdk.GetConfig()
app.SetBech32AddressPrefixes(config)
app.SetBip44CoinType(config)
// TODO: setup keybase, viper object, etc. to be passed into
// the below functions and eliminate global vars, like we do
// with the cdc
rootCmd := &cobra.Command{
Use: "kvcli",
Short: "Command line interface for interacting with kvd",
}
// Add --chain-id to persistent flags and mark it required
rootCmd.PersistentFlags().String(flags.FlagChainID, "", "Chain ID of tendermint node")
rootCmd.PersistentPreRunE = func(_ *cobra.Command, _ []string) error {
return initConfig(rootCmd)
}
// Construct Root Command
rootCmd.AddCommand(
rpc.StatusCommand(),
client.ConfigCmd(app.DefaultCLIHome),
queryCmd(cdc),
txCmd(cdc),
flags.LineBreak,
lcd.ServeCommand(cdc, registerRoutes),
flags.LineBreak,
getModifiedKeysCmd(),
flags.LineBreak,
version.Cmd,
flags.NewCompletionCmd(rootCmd, true),
)
// Add flags and prefix all env exposed with KA
executor := cli.PrepareMainCmd(rootCmd, "KA", app.DefaultCLIHome)
err := executor.Execute()
if err != nil {
fmt.Printf("Failed executing CLI command: %s, exiting...\n", err)
os.Exit(1)
}
}
func | (cdc *codec.Codec) *cobra.Command {
queryCmd := &cobra.Command{
Use: "query",
Aliases: []string{"q"},
Short: "Querying subcommands",
}
queryCmd.AddCommand(
authcmd.GetAccountCmd(cdc),
flags.LineBreak,
rpc.ValidatorCommand(cdc),
rpc.BlockCommand(),
authcmd.QueryTxsByEventsCmd(cdc),
authcmd.QueryTxCmd(cdc),
flags.LineBreak,
)
// add modules' query commands
app.ModuleBasics.AddQueryCommands(queryCmd, cdc)
return queryCmd
}
func txCmd(cdc *codec.Codec) *cobra.Command {
txCmd := &cobra.Command{
Use: "tx",
Short: "Transactions subcommands",
}
txCmd.AddCommand(
bankcmd.SendTxCmd(cdc),
flags.LineBreak,
authcmd.GetSignCommand(cdc),
authcmd.GetMultiSignCommand(cdc),
flags.LineBreak,
authcmd.GetBroadcastCommand(cdc),
authcmd.GetEncodeCommand(cdc),
authcmd.GetDecodeCommand(cdc),
flags.LineBreak,
)
// add modules' tx commands
app.ModuleBasics.AddTxCommands(txCmd, cdc)
// remove auth and bank commands as they're mounted under the root tx command
var cmdsToRemove []*cobra.Command
for _, cmd := range txCmd.Commands() {
if cmd.Use == auth.ModuleName || cmd.Use == bank.ModuleName {
cmdsToRemove = append(cmdsToRemove, cmd)
}
}
txCmd.RemoveCommand(cmdsToRemove...)
return txCmd
}
// registerRoutes registers the routes from the different modules for the LCD.
func registerRoutes(rs *lcd.RestServer) {
client.RegisterRoutes(rs.CliCtx, rs.Mux)
authrest.RegisterTxRoutes(rs.CliCtx, rs.Mux)
app.ModuleBasics.RegisterRESTRoutes(rs.CliCtx, rs.Mux)
app.RegisterSimulateRoutes(rs.CliCtx, rs.Mux)
// register legacy endpoints compatible with v0.3.x of kava
rest_v0_3.RegisterRoutes(rs.CliCtx, rs.Mux)
}
// initConfig reads in and sets options from a config file (if one exists)
func initConfig(cmd *cobra.Command) error {
home, err := cmd.PersistentFlags().GetString(cli.HomeFlag)
if err != nil {
return err
}
cfgFile := path.Join(home, "config", "config.toml")
if _, err := os.Stat(cfgFile); err == nil {
viper.SetConfigFile(cfgFile)
if err := viper.ReadInConfig(); err != nil {
return err
}
}
if err := viper.BindPFlag(flags.FlagChainID, cmd.PersistentFlags().Lookup(flags.FlagChainID)); err != nil {
return err
}
if err := viper.BindPFlag(cli.EncodingFlag, cmd.PersistentFlags().Lookup(cli.EncodingFlag)); err != nil {
return err
}
return viper.BindPFlag(cli.OutputFlag, cmd.PersistentFlags().Lookup(cli.OutputFlag))
}
| queryCmd |
mutation_builder_fk.go | // Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package optbuilder
import (
"context"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/cat"
"github.com/cockroachdb/cockroach/pkg/sql/opt/memo"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry"
"github.com/cockroachdb/errors"
)
// This file contains methods that populate mutationBuilder.checks and cascades.
//
// -- Checks --
//
// The foreign key checks are queries that run after the statement (including
// the relevant mutation) completes. They check the integrity of the foreign key
// relations that involve modified rows; any row that is returned by these FK
// check queries indicates a foreign key violation.
//
// -- Cacades --
//
// The foreign key cascades are "potential" future queries that perform
// cascading mutations of child tables. These queries are constructed later as
// necessary; mb.cascades stores metadata that include CascadeBuilder instances
// which are used to construct these queries.
// buildFKChecksForInsert builds FK check queries for an insert.
//
// See the comment at the top of the file for general information on checks and
// cascades.
//
// In the case of insert, each FK check query is an anti-join with the left side
// being a WithScan of the mutation input and the right side being the
// referenced table. A simple example of an insert with a FK check:
//
// insert child
// ├── ...
// ├── input binding: &1
// └── f-k-checks
// └── f-k-checks-item: child(p) -> parent(p)
// └── anti-join (hash)
// ├── columns: column2:5!null
// ├── with-scan &1
// │ ├── columns: column2:5!null
// │ └── mapping:
// │ └── column2:4 => column2:5
// ├── scan parent
// │ └── columns: parent.p:6!null
// └── filters
// └── column2:5 = parent.p:6
//
// See testdata/fk-checks-insert for more examples.
func (mb *mutationBuilder) buildFKChecksForInsert() {
if mb.tab.OutboundForeignKeyCount() == 0 {
// No relevant FKs.
return
}
// TODO(radu): if the input is a VALUES with constant expressions, we don't
// need to buffer it. This could be a normalization rule, but it's probably
// more efficient if we did it in here (or we'd end up building the entire FK
// subtrees twice).
mb.ensureWithID()
h := &mb.fkCheckHelper
for i, n := 0, mb.tab.OutboundForeignKeyCount(); i < n; i++ {
if h.initWithOutboundFK(mb, i) {
mb.checks = append(mb.checks, h.buildInsertionCheck())
}
}
telemetry.Inc(sqltelemetry.ForeignKeyChecksUseCounter)
}
// buildFKChecksAndCascadesForDelete builds FK check and cascades for a delete.
//
// See the comment at the top of the file for general information on checks and
// cascades.
//
// -- Checks --
//
// In the case of delete, each FK check query is a semi-join with the left side
// being a WithScan of the mutation input and the right side being the
// referencing table. For example:
// delete parent
// ├── ...
// ├── input binding: &1
// └── f-k-checks
// └── f-k-checks-item: child(p) -> parent(p)
// └── semi-join (hash)
// ├── columns: p:7!null
// ├── with-scan &1
// │ ├── columns: p:7!null
// │ └── mapping:
// │ └── parent.p:5 => p:7
// ├── scan child
// │ └── columns: child.p:9!null
// └── filters
// └── p:7 = child.p:9
//
// See testdata/fk-checks-delete for more examples.
//
// -- Cascades --
//
// See onDeleteCascadeBuilder, onDeleteFastCascadeBuilder, onDeleteSetBuilder
// for details.
//
func (mb *mutationBuilder) buildFKChecksAndCascadesForDelete() {
if mb.tab.InboundForeignKeyCount() == 0 {
// No relevant FKs.
return
}
for i, n := 0, mb.tab.InboundForeignKeyCount(); i < n; i++ {
h := &mb.fkCheckHelper
if !h.initWithInboundFK(mb, i) {
continue
}
// The action dictates how a foreign key reference is handled:
// - with Cascade/SetNull/SetDefault, we create a cascading mutation to
// modify or delete "orphaned" rows in the child table.
// - with Restrict/NoAction, we create a check that causes an error if
// there are any "orphaned" rows in the child table.
if a := h.fk.DeleteReferenceAction(); a != tree.Restrict && a != tree.NoAction {
telemetry.Inc(sqltelemetry.ForeignKeyCascadesUseCounter)
var builder memo.CascadeBuilder
switch a {
case tree.Cascade:
// Try the fast builder first; if it cannot be used, use the regular builder.
var ok bool
builder, ok = tryNewOnDeleteFastCascadeBuilder(
mb.b.ctx, mb.md, mb.b.catalog, h.fk, i, mb.tab, h.otherTab, mb.outScope,
)
if !ok {
mb.ensureWithID()
builder = newOnDeleteCascadeBuilder(mb.tab, i, h.otherTab)
}
case tree.SetNull, tree.SetDefault:
mb.ensureWithID()
builder = newOnDeleteSetBuilder(mb.tab, i, h.otherTab, a)
default:
panic(errors.AssertionFailedf("unhandled action type %s", a))
}
cols := make(opt.ColList, len(h.tabOrdinals))
for i, tabOrd := range h.tabOrdinals {
cols[i] = mb.fetchColIDs[tabOrd]
}
mb.cascades = append(mb.cascades, memo.FKCascade{
FKName: h.fk.Name(),
Builder: builder,
WithID: mb.withID,
OldValues: cols,
NewValues: nil,
})
continue
}
mb.ensureWithID()
fkInput, withScanCols, _ := h.makeFKInputScan(fkInputScanFetchedVals)
mb.checks = append(mb.checks, h.buildDeletionCheck(fkInput, withScanCols))
}
telemetry.Inc(sqltelemetry.ForeignKeyChecksUseCounter)
}
// buildFKChecksForUpdate builds FK check queries for an update.
//
// See the comment at the top of the file for general information on checks and
// cascades.
//
// In the case of update, there are two types of FK check queries:
//
// - insertion-side checks are very similar to the checks we issue for insert;
// they are an anti-join with the left side being a WithScan of the "new"
// values for each row. For example:
// update child
// ├── ...
// ├── input binding: &1
// └── f-k-checks
// └── f-k-checks-item: child(p) -> parent(p)
// └── anti-join (hash)
// ├── columns: column5:6!null
// ├── with-scan &1
// │ ├── columns: column5:6!null
// │ └── mapping:
// │ └── column5:5 => column5:6
// ├── scan parent
// │ └── columns: parent.p:8!null
// └── filters
// └── column5:6 = parent.p:8
//
// - deletion-side checks are similar to the checks we issue for delete; they
// are a semi-join but the left side input is more complicated: it is an
// Except between a WithScan of the "old" values and a WithScan of the "new"
// values for each row (this is the set of values that are effectively
// removed from the table). For example:
// update parent
// ├── ...
// ├── input binding: &1
// └── f-k-checks
// └── f-k-checks-item: child(p) -> parent(p)
// └── semi-join (hash)
// ├── columns: p:8!null
// ├── except
// │ ├── columns: p:8!null
// │ ├── left columns: p:8!null
// │ ├── right columns: column7:9
// │ ├── with-scan &1
// │ │ ├── columns: p:8!null
// │ │ └── mapping:
// │ │ └── parent.p:5 => p:8
// │ └── with-scan &1
// │ ├── columns: column7:9!null
// │ └── mapping:
// │ └── column7:7 => column7:9
// ├── scan child
// │ └── columns: child.p:11!null
// └── filters
// └── p:8 = child.p:11
//
// Only FK relations that involve updated columns result in FK checks.
//
func (mb *mutationBuilder) buildFKChecksForUpdate() {
if mb.tab.OutboundForeignKeyCount() == 0 && mb.tab.InboundForeignKeyCount() == 0 {
return
}
mb.ensureWithID()
// An Update can be thought of an insertion paired with a deletion, so for an
// Update we can emit both semi-joins and anti-joins.
// Each row input to the Update operator contains both the existing and the
// new value for each updated column. From this we can construct the effective
// insertion and deletion.
// Say the table being updated by an update is:
//
// x | y | z
// --+---+--
// 1 | 3 | 5
//
// And we are executing UPDATE t SET y = 10, then the input to the Update
// operator will look like:
//
// x | y | z | new_y
// --+---+---+------
// 1 | 3 | 5 | 10
//
// The insertion check will happen on the "new" row (x, new_y, z); the deletion
// check will happen on the "old" row (x, y, z).
h := &mb.fkCheckHelper
for i, n := 0, mb.tab.OutboundForeignKeyCount(); i < n; i++ {
// Verify that at least one FK column is actually updated.
if mb.outboundFKColsUpdated(i) {
if h.initWithOutboundFK(mb, i) {
mb.checks = append(mb.checks, h.buildInsertionCheck())
}
}
}
// The "deletion" incurred by an update is the rows deleted for a given
// inbound FK minus the rows inserted.
for i, n := 0, mb.tab.InboundForeignKeyCount(); i < n; i++ {
// Verify that at least one FK column is actually updated.
if !mb.inboundFKColsUpdated(i) {
continue
}
if !h.initWithInboundFK(mb, i) {
// The FK constraint can safely be ignored.
continue
}
if a := h.fk.UpdateReferenceAction(); a != tree.Restrict && a != tree.NoAction {
telemetry.Inc(sqltelemetry.ForeignKeyCascadesUseCounter)
builder := newOnUpdateCascadeBuilder(mb.tab, i, h.otherTab, a)
oldCols := make(opt.ColList, len(h.tabOrdinals))
newCols := make(opt.ColList, len(h.tabOrdinals))
for i, tabOrd := range h.tabOrdinals {
fetchColID := mb.fetchColIDs[tabOrd]
updateColID := mb.updateColIDs[tabOrd]
if updateColID == 0 {
updateColID = fetchColID
}
oldCols[i] = fetchColID
newCols[i] = updateColID
}
mb.cascades = append(mb.cascades, memo.FKCascade{
FKName: h.fk.Name(),
Builder: builder,
WithID: mb.withID,
OldValues: oldCols,
NewValues: newCols,
})
continue
}
// Construct an Except expression for the set difference between "old"
// FK values and "new" FK values.
//
// The simplest example to see why this is necessary is when we are
// "updating" a value to the same value, e.g:
// UPDATE child SET c = c
// Here we are not removing any values from the column, so we must not
// check for orphaned rows or we will be generating bogus FK violation
// errors.
//
// There are more complicated cases where one row replaces the value from
// another row, e.g.
// UPDATE child SET c = c+1
// when we have existing consecutive values. These cases are sketchy because
// depending on the order in which the mutations are applied, they may or
// may not result in unique index violations (but if they go through, the FK
// checks should be accurate).
//
// Note that the same reasoning could be applied to the insertion checks,
// but in that case, it is not a correctness issue: it's always ok to
// recheck that an existing row is not orphan. It's not really desirable for
// performance either: we would be incurring extra cost (more complicated
// expressions, scanning the input buffer twice) for a rare case.
oldRows, colsForOldRow, _ := h.makeFKInputScan(fkInputScanFetchedVals)
newRows, colsForNewRow, _ := h.makeFKInputScan(fkInputScanNewVals)
// The rows that no longer exist are the ones that were "deleted" by virtue
// of being updated _from_, minus the ones that were "added" by virtue of
// being updated _to_.
deletedRows := mb.b.factory.ConstructExcept(
oldRows,
newRows,
&memo.SetPrivate{
LeftCols: colsForOldRow,
RightCols: colsForNewRow,
OutCols: colsForOldRow,
},
)
mb.checks = append(mb.checks, h.buildDeletionCheck(deletedRows, colsForOldRow))
}
telemetry.Inc(sqltelemetry.ForeignKeyChecksUseCounter)
}
// buildFKChecksForUpsert builds FK check queries for an upsert.
//
// See the comment at the top of the file for general information on checks and
// cascades.
//
// The case of upsert is very similar to update; see buildFKChecksForUpdate.
// The main difference is that for update, the "new" values were readily
// available, whereas for upsert, the "new" values can be the result of an
// expression of the form:
// CASE WHEN canary IS NULL THEN inserter-value ELSE updated-value END
// These expressions are already projected as part of the mutation input and are
// directly accessible through WithScan.
//
// Only FK relations that involve updated columns result in deletion-side FK
// checks. The insertion-side FK checks are always needed (similar to insert)
// because any of the rows might result in an insert rather than an update.
//
func (mb *mutationBuilder) buildFKChecksForUpsert() {
numOutbound := mb.tab.OutboundForeignKeyCount()
numInbound := mb.tab.InboundForeignKeyCount()
if numOutbound == 0 && numInbound == 0 {
return
}
mb.ensureWithID()
h := &mb.fkCheckHelper
for i := 0; i < numOutbound; i++ {
if h.initWithOutboundFK(mb, i) {
mb.checks = append(mb.checks, h.buildInsertionCheck())
}
}
for i := 0; i < numInbound; i++ {
// Verify that at least one FK column is updated by the Upsert; columns that
// are not updated can get new values (through the insert path) but existing
// values are never removed.
if !mb.inboundFKColsUpdated(i) {
continue
}
if !h.initWithInboundFK(mb, i) {
continue
}
if a := h.fk.UpdateReferenceAction(); a != tree.Restrict && a != tree.NoAction {
telemetry.Inc(sqltelemetry.ForeignKeyCascadesUseCounter)
builder := newOnUpdateCascadeBuilder(mb.tab, i, h.otherTab, a)
oldCols := make(opt.ColList, len(h.tabOrdinals))
newCols := make(opt.ColList, len(h.tabOrdinals))
for i, tabOrd := range h.tabOrdinals {
fetchColID := mb.fetchColIDs[tabOrd]
// Here we don't need to use the upsertColIDs because the rows that
// correspond to inserts will be ignored in the cascade.
updateColID := mb.updateColIDs[tabOrd]
if updateColID == 0 {
updateColID = fetchColID
}
oldCols[i] = fetchColID
newCols[i] = updateColID
}
mb.cascades = append(mb.cascades, memo.FKCascade{
FKName: h.fk.Name(),
Builder: builder,
WithID: mb.withID,
OldValues: oldCols,
NewValues: newCols,
})
continue
}
// Construct an Except expression for the set difference between "old" FK
// values and "new" FK values. See buildFKChecksForUpdate for more details.
//
// Note that technically, to get "old" values for the updated rows we should
// be selecting only the rows that correspond to updates, as opposed to
// insertions (using a "canaryCol IS NOT NULL" condition). But the rows we
// would filter out have all-null fetched values anyway and will never match
// in the semi join.
oldRows, colsForOldRow, _ := h.makeFKInputScan(fkInputScanFetchedVals)
newRows, colsForNewRow, _ := h.makeFKInputScan(fkInputScanNewVals)
// The rows that no longer exist are the ones that were "deleted" by virtue
// of being updated _from_, minus the ones that were "added" by virtue of
// being updated _to_.
deletedRows := mb.b.factory.ConstructExcept(
oldRows,
newRows,
&memo.SetPrivate{
LeftCols: colsForOldRow,
RightCols: colsForNewRow,
OutCols: colsForOldRow,
},
)
mb.checks = append(mb.checks, h.buildDeletionCheck(deletedRows, colsForOldRow))
}
telemetry.Inc(sqltelemetry.ForeignKeyChecksUseCounter)
}
// outboundFKColsUpdated returns true if any of the FK columns for an outbound
// constraint are being updated (according to updateColIDs).
func (mb *mutationBuilder) outboundFKColsUpdated(fkOrdinal int) bool {
fk := mb.tab.OutboundForeignKey(fkOrdinal)
for i, n := 0, fk.ColumnCount(); i < n; i++ {
if ord := fk.OriginColumnOrdinal(mb.tab, i); mb.updateColIDs[ord] != 0 {
return true
}
}
return false
}
// inboundFKColsUpdated returns true if any of the FK columns for an inbound
// constraint are being updated (according to updateColIDs).
func (mb *mutationBuilder) inboundFKColsUpdated(fkOrdinal int) bool {
fk := mb.tab.InboundForeignKey(fkOrdinal)
for i, n := 0, fk.ColumnCount(); i < n; i++ {
if ord := fk.ReferencedColumnOrdinal(mb.tab, i); mb.updateColIDs[ord] != 0 {
return true
}
}
return false
}
// ensureWithID makes sure that withID is initialized (and thus that the input
// to the mutation will be buffered).
//
// Assumes that outScope.expr is the input to the mutation.
func (mb *mutationBuilder) ensureWithID() {
if mb.withID != 0 {
return
}
mb.withID = mb.b.factory.Memo().NextWithID()
mb.md.AddWithBinding(mb.withID, mb.outScope.expr)
}
// fkCheckHelper is a type associated with a single FK constraint and is used to
// build the "leaves" of a FK check expression, namely the WithScan of the
// mutation input and the Scan of the other table.
type fkCheckHelper struct {
mb *mutationBuilder
fk cat.ForeignKeyConstraint
fkOrdinal int
fkOutbound bool
otherTab cat.Table
// tabOrdinals are the table ordinals of the FK columns in the table that is
// being mutated. They correspond 1-to-1 to the columns in the
// ForeignKeyConstraint.
tabOrdinals []int
// otherTabOrdinals are the table ordinals of the FK columns in the "other"
// table. They correspond 1-to-1 to the columns in the ForeignKeyConstraint.
otherTabOrdinals []int
}
// initWithOutboundFK initializes the helper with an outbound FK constraint.
//
// Returns false if the FK relation should be ignored (e.g. because the new
// values for the FK columns are known to be always NULL).
func (h *fkCheckHelper) initWithOutboundFK(mb *mutationBuilder, fkOrdinal int) bool {
*h = fkCheckHelper{
mb: mb,
fk: mb.tab.OutboundForeignKey(fkOrdinal),
fkOrdinal: fkOrdinal,
fkOutbound: true,
}
refID := h.fk.ReferencedTableID()
h.otherTab = resolveTable(mb.b.ctx, mb.b.catalog, refID)
if h.otherTab == nil {
// The other table is in the process of being added; ignore the FK relation.
return false
}
// We need SELECT privileges on the referenced table.
mb.b.checkPrivilege(opt.DepByID(refID), h.otherTab, privilege.SELECT)
numCols := h.fk.ColumnCount()
h.allocOrdinals(numCols)
for i := 0; i < numCols; i++ {
h.tabOrdinals[i] = h.fk.OriginColumnOrdinal(mb.tab, i)
h.otherTabOrdinals[i] = h.fk.ReferencedColumnOrdinal(h.otherTab, i)
}
// Check if we are setting NULL values for the FK columns, like when this
// mutation is the result of a SET NULL cascade action.
numNullCols := 0
for _, tabOrd := range h.tabOrdinals {
colID := mb.mapToReturnColID(tabOrd)
if memo.OutputColumnIsAlwaysNull(mb.outScope.expr, colID) {
numNullCols++
}
}
if numNullCols == numCols {
// All FK columns are getting NULL values; FK check not needed.
return false
}
if numNullCols > 0 && h.fk.MatchMethod() == tree.MatchSimple {
// At least one FK column is getting a NULL value and we are using MATCH
// SIMPLE; FK check not needed.
return false
}
return true
}
// initWithInboundFK initializes the helper with an inbound FK constraint.
//
// Returns false if the FK relation should be ignored (because the other table
// is in the process of being created).
func (h *fkCheckHelper) initWithInboundFK(mb *mutationBuilder, fkOrdinal int) (ok bool) {
*h = fkCheckHelper{
mb: mb,
fk: mb.tab.InboundForeignKey(fkOrdinal),
fkOrdinal: fkOrdinal,
fkOutbound: false,
}
originID := h.fk.OriginTableID()
h.otherTab = resolveTable(mb.b.ctx, mb.b.catalog, originID)
if h.otherTab == nil {
return false
}
// We need SELECT privileges on the origin table.
mb.b.checkPrivilege(opt.DepByID(originID), h.otherTab, privilege.SELECT)
numCols := h.fk.ColumnCount()
h.allocOrdinals(numCols)
for i := 0; i < numCols; i++ {
h.tabOrdinals[i] = h.fk.ReferencedColumnOrdinal(mb.tab, i)
h.otherTabOrdinals[i] = h.fk.OriginColumnOrdinal(h.otherTab, i)
}
return true
}
// resolveTable resolves a table StableID. Returns nil if the table is in the
// process of being added, in which case it is safe to ignore any FK
// relation with the table.
func resolveTable(ctx context.Context, catalog cat.Catalog, id cat.StableID) cat.Table {
ref, isAdding, err := catalog.ResolveDataSourceByID(ctx, cat.Flags{}, id)
if err != nil {
if isAdding {
// The table is in the process of being added.
return nil
}
panic(err)
}
return ref.(cat.Table)
}
type fkInputScanType uint8
const (
fkInputScanNewVals fkInputScanType = iota
fkInputScanFetchedVals
)
// makeFKInputScan constructs a WithScan that iterates over the input to the
// mutation operator. Used in expr | eturns the output columns from the WithScan, which map 1-to-1 to
// h.tabOrdinals. Also returns the subset of these columns that can be assumed
// to be not null (either because they are not null in the mutation input or
// because they are non-nullable table columns).
//
func (h *fkCheckHelper) makeFKInputScan(
typ fkInputScanType,
) (scan memo.RelExpr, outCols opt.ColList, notNullOutCols opt.ColSet) {
mb := h.mb
// inputCols are the column IDs from the mutation input that we are scanning.
inputCols := make(opt.ColList, len(h.tabOrdinals))
// outCols will store the newly synthesized output columns for WithScan.
outCols = make(opt.ColList, len(inputCols))
for i, tabOrd := range h.tabOrdinals {
if typ == fkInputScanNewVals {
inputCols[i] = mb.mapToReturnColID(tabOrd)
} else {
inputCols[i] = mb.fetchColIDs[tabOrd]
}
if inputCols[i] == 0 {
panic(errors.AssertionFailedf("no value for FK column (tabOrd=%d)", tabOrd))
}
// Synthesize new column.
c := mb.b.factory.Metadata().ColumnMeta(inputCols[i])
outCols[i] = mb.md.AddColumn(c.Alias, c.Type)
// If a table column is not nullable, NULLs cannot be inserted (the
// mutation will fail). So for the purposes of FK checks, we can treat
// these columns as not null.
if mb.outScope.expr.Relational().NotNullCols.Contains(inputCols[i]) ||
!mb.tab.Column(tabOrd).IsNullable() {
notNullOutCols.Add(outCols[i])
}
}
scan = mb.b.factory.ConstructWithScan(&memo.WithScanPrivate{
With: mb.withID,
InCols: inputCols,
OutCols: outCols,
ID: mb.b.factory.Metadata().NextUniqueID(),
})
return scan, outCols, notNullOutCols
}
// buildOtherTableScan builds a Scan of the "other" table.
func (h *fkCheckHelper) buildOtherTableScan() (outScope *scope, tabMeta *opt.TableMeta) {
otherTabMeta := h.mb.b.addTable(h.otherTab, tree.NewUnqualifiedTableName(h.otherTab.Name()))
return h.mb.b.buildScan(
otherTabMeta,
h.otherTabOrdinals,
&tree.IndexFlags{IgnoreForeignKeys: true},
noRowLocking,
h.mb.b.allocScope(),
), otherTabMeta
}
func (h *fkCheckHelper) allocOrdinals(numCols int) {
buf := make([]int, numCols*2)
h.tabOrdinals = buf[:numCols]
h.otherTabOrdinals = buf[numCols:]
}
// buildInsertionCheck creates a FK check for rows which are added to a table.
// The input to the insertion check will be produced from the input to the
// mutation operator.
func (h *fkCheckHelper) buildInsertionCheck() memo.FKChecksItem {
fkInput, withScanCols, notNullWithScanCols := h.makeFKInputScan(fkInputScanNewVals)
numCols := len(withScanCols)
f := h.mb.b.factory
if notNullWithScanCols.Len() < numCols {
// The columns we are inserting might have NULLs. These require special
// handling, depending on the match method:
// - MATCH SIMPLE: allows any column(s) to be NULL and the row doesn't
// need to have a match in the referenced table.
// - MATCH FULL: only the case where *all* the columns are NULL is
// allowed, and the row doesn't need to have a match in the
// referenced table.
//
// Note that rows that have NULLs will never have a match in the anti
// join and will generate errors. To handle these cases, we filter the
// mutated rows (before the anti join) to remove those which don't need a
// match.
//
// For SIMPLE, we filter out any rows which have a NULL. For FULL, we
// filter out any rows where all the columns are NULL (rows which have
// NULLs a subset of columns are let through and will generate FK errors
// because they will never have a match in the anti join).
switch m := h.fk.MatchMethod(); m {
case tree.MatchSimple:
// Filter out any rows which have a NULL; build filters of the form
// (a IS NOT NULL) AND (b IS NOT NULL) ...
filters := make(memo.FiltersExpr, 0, numCols-notNullWithScanCols.Len())
for _, col := range withScanCols {
if !notNullWithScanCols.Contains(col) {
filters = append(filters, f.ConstructFiltersItem(
f.ConstructIsNot(
f.ConstructVariable(col),
memo.NullSingleton,
),
))
}
}
fkInput = f.ConstructSelect(fkInput, filters)
case tree.MatchFull:
// Filter out any rows which have NULLs on all referencing columns.
if !notNullWithScanCols.Empty() {
// We statically know that some of the referencing columns can't be
// NULL. In this case, we don't need to filter anything (the case
// where all the origin columns are NULL is not possible).
break
}
// Build a filter of the form
// (a IS NOT NULL) OR (b IS NOT NULL) ...
var condition opt.ScalarExpr
for _, col := range withScanCols {
is := f.ConstructIsNot(
f.ConstructVariable(col),
memo.NullSingleton,
)
if condition == nil {
condition = is
} else {
condition = f.ConstructOr(condition, is)
}
}
fkInput = f.ConstructSelect(
fkInput,
memo.FiltersExpr{f.ConstructFiltersItem(condition)},
)
default:
panic(errors.AssertionFailedf("match method %s not supported", m))
}
}
// Build an anti-join, with the origin FK columns on the left and the
// referenced columns on the right.
scanScope, refTabMeta := h.buildOtherTableScan()
// Build the join filters:
// (origin_a = referenced_a) AND (origin_b = referenced_b) AND ...
antiJoinFilters := make(memo.FiltersExpr, numCols)
for j := 0; j < numCols; j++ {
antiJoinFilters[j] = f.ConstructFiltersItem(
f.ConstructEq(
f.ConstructVariable(withScanCols[j]),
f.ConstructVariable(scanScope.cols[j].id),
),
)
}
var p memo.JoinPrivate
if h.mb.b.evalCtx.SessionData.PreferLookupJoinsForFKs {
p.Flags = memo.PreferLookupJoinIntoRight
}
antiJoin := f.ConstructAntiJoin(fkInput, scanScope.expr, antiJoinFilters, &p)
return f.ConstructFKChecksItem(antiJoin, &memo.FKChecksItemPrivate{
OriginTable: h.mb.tabID,
ReferencedTable: refTabMeta.MetaID,
FKOutbound: true,
FKOrdinal: h.fkOrdinal,
KeyCols: withScanCols,
OpName: h.mb.opName,
})
}
// buildDeletionCheck creates a FK check for rows which are removed from a
// table. deletedRows is used as the input to the deletion check, and deleteCols
// is a list of the columns for the rows being deleted, containing values for
// the referenced FK columns in the table we are mutating.
func (h *fkCheckHelper) buildDeletionCheck(
deletedRows memo.RelExpr, deleteCols opt.ColList,
) memo.FKChecksItem {
// Build a semi join, with the referenced FK columns on the left and the
// origin columns on the right.
scanScope, origTabMeta := h.buildOtherTableScan()
// Note that it's impossible to orphan a row whose FK key columns contain a
// NULL, since by definition a NULL never refers to an actual row (in
// either MATCH FULL or MATCH SIMPLE).
// Build the join filters:
// (origin_a = referenced_a) AND (origin_b = referenced_b) AND ...
f := h.mb.b.factory
semiJoinFilters := make(memo.FiltersExpr, len(deleteCols))
for j := range deleteCols {
semiJoinFilters[j] = f.ConstructFiltersItem(
f.ConstructEq(
f.ConstructVariable(deleteCols[j]),
f.ConstructVariable(scanScope.cols[j].id),
),
)
}
var p memo.JoinPrivate
if h.mb.b.evalCtx.SessionData.PreferLookupJoinsForFKs {
p.Flags = memo.PreferLookupJoinIntoRight
}
semiJoin := f.ConstructSemiJoin(deletedRows, scanScope.expr, semiJoinFilters, &p)
return f.ConstructFKChecksItem(semiJoin, &memo.FKChecksItemPrivate{
OriginTable: origTabMeta.MetaID,
ReferencedTable: h.mb.tabID,
FKOutbound: false,
FKOrdinal: h.fkOrdinal,
KeyCols: deleteCols,
OpName: h.mb.opName,
})
}
| essions that generate rows for checking for FK
// violations.
//
// The WithScan expression will scan either the new values or the fetched values
// for the given table ordinals (which correspond to FK columns).
//
// R |
app.module.ts | import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { HttpClientModule } from '@angular/common/http';
import { BrowserModule } from '@angular/platform-browser';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import { RouterModule } from '@angular/router';
import { StoreModule } from '@ngrx/store';
import { EffectsModule } from '@ngrx/effects';
import { DBModule } from '@ngrx/db';
import {
StoreRouterConnectingModule,
RouterStateSerializer,
} from '@ngrx/router-store';
import { StoreDevtoolsModule } from '@ngrx/store-devtools';
import { CoreModule } from './core/core.module';
import { AuthModule } from './auth/auth.module';
import { routes } from './routes';
import { reducers, metaReducers } from './reducers';
import { schema } from './db';
import { CustomRouterStateSerializer } from './shared/utils';
import { AppComponent } from './core/containers/app';
import { environment } from '../environments/environment';
@NgModule({
imports: [
CommonModule,
BrowserModule,
BrowserAnimationsModule,
HttpClientModule,
RouterModule.forRoot(routes, { useHash: true }),
/**
* StoreModule.forRoot is imported once in the root module, accepting a reducer
* function or object map of reducer functions. If passed an object of
* reducers, combineReducers will be run creating your application
* meta-reducer. This returns all providers for an @ngrx/store
* based application.
*/
StoreModule.forRoot(reducers, { metaReducers }),
/**
* @ngrx/router-store keeps router state up-to-date in the store.
*/
StoreRouterConnectingModule.forRoot({
/*
They stateKey defines the name of the state used by the router-store reducer.
This matches the key defined in the map of reducers
*/
stateKey: 'router',
}),
/**
* Store devtools instrument the store retaining past versions of state
* and recalculating new states. This enables powerful time-travel
* debugging.
*
* To use the debugger, install the Redux Devtools extension for either
* Chrome or Firefox
*
* See: https://github.com/zalmoxisus/redux-devtools-extension
*/
StoreDevtoolsModule.instrument({
name: 'Powr Store DevTools',
logOnly: environment.production,
}),
/**
* EffectsModule.forRoot() is imported once in the root module and
* sets up the effects class to be initialized immediately when the
* application starts.
*
* See: https://github.com/ngrx/platform/blob/master/docs/effects/api.md#forroot
*/
EffectsModule.forRoot([]),
/**
* `provideDB` sets up @ngrx/db with the provided schema and makes the Database
* service available.
*/
DBModule.provideDB(schema),
CoreModule.forRoot(),
AuthModule.forRoot(),
],
providers: [
/**
* The `RouterStateSnapshot` provided by the `Router` is a large complex structure.
* A custom RouterStateSerializer is used to parse the `RouterStateSnapshot` provided
* by `@ngrx/router-store` to include only the desired pieces of the snapshot.
*/
{ provide: RouterStateSerializer, useClass: CustomRouterStateSerializer },
],
bootstrap: [AppComponent],
})
export class | {}
| AppModule |
hv_second_secfpn_kitti.py | voxel_size = [0.05, 0.05, 0.1]
model = dict(
type='VoxelNet',
voxel_layer=dict(max_num_points=5,
point_cloud_range=[0, -40, -3, 70.4, 40, 1],
voxel_size=voxel_size,
max_voxels=(16000, 40000)),
voxel_encoder=dict(type='HardSimpleVFE'),
middle_encoder=dict(type='SparseEncoder',
in_channels=4,
sparse_shape=[41, 1600, 1408],
order=('conv', 'norm', 'act')),
backbone=dict(type='SECOND',
in_channels=256,
layer_nums=[5, 5],
layer_strides=[1, 2],
out_channels=[128, 256]),
neck=dict(type='SECONDFPN',
in_channels=[128, 256],
upsample_strides=[1, 2],
out_channels=[256, 256]),
bbox_head=dict(type='Anchor3DHead',
num_classes=3,
in_channels=512,
feat_channels=512,
use_direction_classifier=True,
anchor_generator=dict(
type='Anchor3DRangeGenerator',
ranges=[
[0, -40.0, -0.6, 70.4, 40.0, -0.6],
[0, -40.0, -0.6, 70.4, 40.0, -0.6],
[0, -40.0, -1.78, 70.4, 40.0, -1.78],
],
sizes=[[0.6, 0.8, 1.73], [0.6, 1.76, 1.73],
[1.6, 3.9, 1.56]],
rotations=[0, 1.57],
reshape_out=False),
diff_rad_by_sin=True,
bbox_coder=dict(type='DeltaXYZWLHRBBoxCoder'),
loss_cls=dict(type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss',
beta=1.0 / 9.0,
loss_weight=2.0),
loss_dir=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=0.2)),
# model training and testing settings
train_cfg=dict(
assigner=[
dict( # for Pedestrian
type='MaxIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.35,
neg_iou_thr=0.2,
min_pos_iou=0.2,
ignore_iof_thr=-1),
dict( # for Cyclist | iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.35,
neg_iou_thr=0.2,
min_pos_iou=0.2,
ignore_iof_thr=-1),
dict( # for Car
type='MaxIoUAssigner',
iou_calculator=dict(type='BboxOverlapsNearest3D'),
pos_iou_thr=0.6,
neg_iou_thr=0.45,
min_pos_iou=0.45,
ignore_iof_thr=-1),
],
allowed_border=0,
pos_weight=-1,
debug=False),
test_cfg=dict(use_rotate_nms=True,
nms_across_levels=False,
nms_thr=0.01,
score_thr=0.1,
min_bbox_size=0,
nms_pre=100,
max_num=50)) | type='MaxIoUAssigner', |
blog.tsx | /** @jsx jsx */
import { jsx } from 'theme-ui'
import { Link } from 'gatsby'
import Layout from '@lekoarts/gatsby-theme-minimal-blog/src/components/layout'
import Listing from './listing'
import useSiteMetadata from '@lekoarts/gatsby-theme-minimal-blog/src/hooks/use-site-metadata'
import replaceSlashes from '@lekoarts/gatsby-theme-minimal-blog/src/utils/replaceSlashes'
import SEO from './seo'
type PostsProps = {
posts: {
slug: string
title: string
date: string
tags?: {
name: string
slug: string
}[]
}[]
}
const Blog = ({ posts }: PostsProps) => {
const { tagsPath, basePath } = useSiteMetadata()
return (
<Layout>
<SEO title="Blog" />
<Listing posts={posts} sx={{ mt: [4, 5] }} showTags={false} />
<Link to={replaceSlashes(`/${basePath}/${tagsPath}`)}>
View all tags
</Link>
</Layout>
)
}
| export default Blog |
|
senzori.js | window.addEventListener("deviceorientation", on_device_orientation);
function on_device_orientation(evt){
var alpha=evt.alpha;
var beta=evt.beta;
var gamma=evt.gamma;
document.getElementById("a").innerHTML ="Alpha="+Math.round(alpha).toString();
document.getElementById("b").innerHTML = "Beta="+Math.round(beta).toString();
document.getElementById("c").innerHTML = "Gamma="+Math.round(gamma).toString();
var canvas=document.getElementById("canvas");
var ctx= canvas.getContext("2d");
ctx.clearRect(0,0, canvas.width,canvas.height);
var raza=20;
| var centru={x:canvas.width/2, y:canvas.height/2};
ctx.beginPath();
ctx.arc(centru.x +gamma*(canvas.width /2-raza), centru.y+beta*(canvas.height /2-raza), raza, 0, 2 * Math.PI);
ctx.stroke();
} | |
__init__.py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import routes
class import_(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-vrf - based on the path /vrf/address-family/ip/unicast/ip/import. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__routes',)
_yang_name = 'import'
_rest_name = 'import'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__routes = YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'vrf', u'address-family', u'ip', u'unicast', u'ip', u'import']
def | (self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'vrf', u'address-family', u'ipv4', u'unicast', u'ip', u'import']
def _get_routes(self):
"""
Getter method for routes, mapped from YANG variable /vrf/address_family/ip/unicast/ip/import/routes (list)
YANG Description: import IPV4 routes
"""
return self.__routes
def _set_routes(self, v, load=False):
"""
Setter method for routes, mapped from YANG variable /vrf/address_family/ip/unicast/ip/import/routes (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_routes() directly.
YANG Description: import IPV4 routes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """routes must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)""",
})
self.__routes = t
if hasattr(self, '_set'):
self._set()
def _unset_routes(self):
self.__routes = YANGDynClass(base=YANGListType("src_vrf route_map",routes.routes, yang_name="routes", rest_name="routes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-vrf route-map', extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}), is_container='list', yang_name="routes", rest_name="routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'import IPV4 routes ', u'cli-suppress-mode': None, u'callpoint': u'Ipv4ImportRoutes'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='list', is_config=True)
routes = __builtin__.property(_get_routes, _set_routes)
_pyangbind_elements = {'routes': routes, }
| _rest_path |
sector.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/elojah/game_02/pkg/space/dto/sector.proto
package dto
import (
fmt "fmt"
dto "github.com/elojah/game_02/pkg/account/dto"
github_com_elojah_game_02_pkg_ulid "github.com/elojah/game_02/pkg/ulid"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type ListSector struct {
Auth dto.Auth `protobuf:"bytes,1,opt,name=Auth,proto3" json:"Auth"`
IDs []github_com_elojah_game_02_pkg_ulid.ID `protobuf:"bytes,2,rep,name=IDs,proto3,customtype=github.com/elojah/game_02/pkg/ulid.ID" json:"IDs"`
}
func (m *ListSector) Reset() { *m = ListSector{} }
func (*ListSector) ProtoMessage() {}
func (*ListSector) Descriptor() ([]byte, []int) {
return fileDescriptor_4a4d7d4a74309654, []int{0}
}
func (m *ListSector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ListSector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ListSector.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ListSector) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListSector.Merge(m, src)
}
func (m *ListSector) XXX_Size() int {
return m.Size()
}
func (m *ListSector) XXX_DiscardUnknown() {
xxx_messageInfo_ListSector.DiscardUnknown(m)
}
var xxx_messageInfo_ListSector proto.InternalMessageInfo
func (m *ListSector) GetAuth() dto.Auth {
if m != nil {
return m.Auth
}
return dto.Auth{}
}
func init() {
proto.RegisterType((*ListSector)(nil), "dto.ListSector")
}
func init() {
proto.RegisterFile("github.com/elojah/game_02/pkg/space/dto/sector.proto", fileDescriptor_4a4d7d4a74309654)
}
var fileDescriptor_4a4d7d4a74309654 = []byte{
// 274 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x49, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcd, 0xc9, 0xcf, 0x4a, 0xcc, 0xd0, 0x4f, 0x4f,
0xcc, 0x4d, 0x8d, 0x37, 0x30, 0xd2, 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2e, 0x48, 0x4c, 0x4e, 0xd5,
0x4f, 0x29, 0xc9, 0xd7, 0x2f, 0x4e, 0x4d, 0x2e, 0xc9, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9,
0x17, 0x62, 0x4e, 0x29, 0xc9, 0x97, 0xd2, 0x45, 0xd2, 0x9a, 0x9e, 0x9f, 0x9e, 0xaf, 0x0f, 0x96,
0x4b, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0xa2, 0x47, 0xca, 0x1c, 0xbf, 0x4d, 0x89,
0xc9, 0xc9, 0xf9, 0xa5, 0x79, 0x25, 0x60, 0xbb, 0xa0, 0x6c, 0x88, 0x46, 0xa5, 0x22, 0x2e, 0x2e,
0x9f, 0xcc, 0xe2, 0x92, 0x60, 0xb0, 0x03, 0x84, 0x94, 0xb9, 0x58, 0x1c, 0x4b, 0x4b, 0x32, 0x24,
0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0x38, 0xf5, 0x52, 0x4a, 0xf2, 0xf5, 0x40, 0x02, 0x4e, 0x2c,
0x27, 0xee, 0xc9, 0x33, 0x04, 0x81, 0x25, 0x85, 0xec, 0xb9, 0x98, 0x3d, 0x5d, 0x8a, 0x25, 0x98,
0x14, 0x98, 0x35, 0x78, 0x9c, 0x74, 0x41, 0x12, 0xb7, 0xee, 0xc9, 0xab, 0xe2, 0x77, 0x40, 0x69,
0x4e, 0x66, 0x8a, 0x9e, 0xa7, 0x4b, 0x10, 0x48, 0xa7, 0x93, 0xc3, 0x85, 0x87, 0x72, 0x0c, 0x37,
0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6,
0x15, 0x8f, 0xe4, 0x18, 0x77, 0x3c, 0x92, 0x63, 0x3c, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39,
0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23,
0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x89,
0x0d, 0xec, 0x78, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0x4e, 0x35, 0x21, 0x61, 0x01,
0x00, 0x00,
}
func (this *ListSector) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ListSector)
if !ok {
that2, ok := that.(ListSector)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if !this.Auth.Equal(&that1.Auth) {
return false
}
if len(this.IDs) != len(that1.IDs) {
return false
}
for i := range this.IDs {
if !this.IDs[i].Equal(that1.IDs[i]) {
return false
}
}
return true
}
func (this *ListSector) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s = append(s, "&dto.ListSector{")
s = append(s, "Auth: "+strings.Replace(this.Auth.GoString(), `&`, ``, 1)+",\n")
s = append(s, "IDs: "+fmt.Sprintf("%#v", this.IDs)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringSector(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *ListSector) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ListSector) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *ListSector) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.IDs) > 0 {
for iNdEx := len(m.IDs) - 1; iNdEx >= 0; iNdEx-- {
{
size := m.IDs[iNdEx].Size()
i -= size
if _, err := m.IDs[iNdEx].MarshalTo(dAtA[i:]); err != nil {
return 0, err
}
i = encodeVarintSector(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
{
size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintSector(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintSector(dAtA []byte, offset int, v uint64) int {
offset -= sovSector(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func NewPopulatedListSector(r randySector, easy bool) *ListSector {
this := &ListSector{}
v1 := dto.NewPopulatedAuth(r, easy)
this.Auth = *v1
v2 := r.Intn(10)
this.IDs = make([]github_com_elojah_game_02_pkg_ulid.ID, v2)
for i := 0; i < v2; i++ {
v3 := github_com_elojah_game_02_pkg_ulid.NewPopulatedID(r)
this.IDs[i] = *v3
}
if !easy && r.Intn(10) != 0 {
}
return this
}
type randySector interface {
Float32() float32
Float64() float64
Int63() int64
Int31() int32
Uint32() uint32
Intn(n int) int
}
func randUTF8RuneSector(r randySector) rune {
ru := r.Intn(62)
if ru < 10 {
return rune(ru + 48)
} else if ru < 36 {
return rune(ru + 55)
}
return rune(ru + 61)
}
func randStringSector(r randySector) string {
v4 := r.Intn(100)
tmps := make([]rune, v4)
for i := 0; i < v4; i++ {
tmps[i] = randUTF8RuneSector(r)
}
return string(tmps)
}
func randUnrecognizedSector(r randySector, maxFieldNumber int) (dAtA []byte) {
l := r.Intn(5)
for i := 0; i < l; i++ {
wire := r.Intn(4)
if wire == 3 {
wire = 5
}
fieldNumber := maxFieldNumber + r.Intn(100)
dAtA = randFieldSector(dAtA, r, fieldNumber, wire)
}
return dAtA
}
func randFieldSector(dAtA []byte, r randySector, fieldNumber int, wire int) []byte {
key := uint32(fieldNumber)<<3 | uint32(wire)
switch wire {
case 0:
dAtA = encodeVarintPopulateSector(dAtA, uint64(key))
v5 := r.Int63()
if r.Intn(2) == 0 {
v5 *= -1
}
dAtA = encodeVarintPopulateSector(dAtA, uint64(v5))
case 1:
dAtA = encodeVarintPopulateSector(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
case 2:
dAtA = encodeVarintPopulateSector(dAtA, uint64(key))
ll := r.Intn(100)
dAtA = encodeVarintPopulateSector(dAtA, uint64(ll))
for j := 0; j < ll; j++ {
dAtA = append(dAtA, byte(r.Intn(256)))
}
default:
dAtA = encodeVarintPopulateSector(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
}
return dAtA
}
func encodeVarintPopulateSector(dAtA []byte, v uint64) []byte {
for v >= 1<<7 {
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
v >>= 7
}
dAtA = append(dAtA, uint8(v))
return dAtA
}
func (m *ListSector) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.Auth.Size()
n += 1 + l + sovSector(uint64(l))
if len(m.IDs) > 0 {
for _, e := range m.IDs {
l = e.Size()
n += 1 + l + sovSector(uint64(l))
}
}
return n
}
func sovSector(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozSector(x uint64) (n int) {
return sovSector(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *ListSector) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ListSector{`,
`Auth:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Auth), "Auth", "dto.Auth", 1), `&`, ``, 1) + `,`,
`IDs:` + fmt.Sprintf("%v", this.IDs) + `,`,
`}`,
}, "")
return s
}
func valueToStringSector(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *ListSector) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0 | for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSector
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ListSector: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ListSector: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSector
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthSector
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthSector
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IDs", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSector
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthSector
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthSector
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
var v github_com_elojah_game_02_pkg_ulid.ID
m.IDs = append(m.IDs, v)
if err := m.IDs[len(m.IDs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSector(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSector
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthSector
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipSector(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSector
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSector
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSector
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthSector
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupSector
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthSector
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthSector = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSector = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupSector = fmt.Errorf("proto: unexpected end of group")
) | |
SimpleStack.js | /**
* @flow
*/
import type {
NavigationScreenProp,
NavigationState,
NavigationStateRoute,
NavigationEventSubscription,
} from 'react-navigation';
import * as React from 'react';
import { Platform, ScrollView, StatusBar } from 'react-native';
import {
createStackNavigator,
SafeAreaView,
withNavigation,
NavigationActions,
StackActions,
} from 'react-navigation';
import invariant from 'invariant';
import SampleText from './SampleText';
import { Button } from './commonComponents/ButtonWithMargin';
import { HeaderButtons } from './commonComponents/HeaderButtons';
const DEBUG = false;
type MyNavScreenProps = {
navigation: NavigationScreenProp<NavigationState>,
banner: React.Node,
};
type BackButtonProps = {
navigation: NavigationScreenProp<NavigationStateRoute>,
};
class | extends React.Component<BackButtonProps, any> {
render() {
return (
<HeaderButtons>
<HeaderButtons.Item title="Back" onPress={this._navigateBack} />
</HeaderButtons>
);
}
_navigateBack = () => {
this.props.navigation.goBack(null);
};
}
const MyBackButtonWithNavigation = withNavigation(MyBackButton);
class MyNavScreen extends React.Component<MyNavScreenProps> {
render() {
const { navigation, banner } = this.props;
const { push, replace, popToTop, pop, dismiss } = navigation;
invariant(
push && replace && popToTop && pop && dismiss,
'missing action creators for StackNavigator'
);
return (
<SafeAreaView>
<SampleText>{banner}</SampleText>
<Button
onPress={() => push('Profile', { name: 'Jane' })}
title="Push a profile screen"
/>
<Button
onPress={() =>
navigation.dispatch(
StackActions.reset({
index: 0,
actions: [
NavigationActions.navigate({
routeName: 'Photos',
params: { name: 'Jane' },
}),
],
})
)
}
title="Reset photos"
/>
<Button
onPress={() => navigation.navigate('Photos', { name: 'Jane' })}
title="Navigate to a photos screen"
/>
<Button
onPress={() => replace('Profile', { name: 'Lucy' })}
title="Replace with profile"
/>
<Button onPress={() => popToTop()} title="Pop to top" />
<Button onPress={() => pop()} title="Pop" />
<Button
onPress={() => {
if (navigation.goBack()) {
console.log('goBack handled');
} else {
console.log('goBack unhandled');
}
}}
title="Go back"
/>
<Button onPress={() => dismiss()} title="Dismiss" />
<StatusBar barStyle="default" />
</SafeAreaView>
);
}
}
type MyHomeScreenProps = {
navigation: NavigationScreenProp<NavigationState>,
};
class MyHomeScreen extends React.Component<MyHomeScreenProps> {
static navigationOptions = {
title: 'Welcome',
};
_s0: NavigationEventSubscription;
_s1: NavigationEventSubscription;
_s2: NavigationEventSubscription;
_s3: NavigationEventSubscription;
componentDidMount() {
this._s0 = this.props.navigation.addListener('willFocus', this._onWF);
this._s1 = this.props.navigation.addListener('didFocus', this._onDF);
this._s2 = this.props.navigation.addListener('willBlur', this._onWB);
this._s3 = this.props.navigation.addListener('didBlur', this._onDB);
}
componentWillUnmount() {
this._s0.remove();
this._s1.remove();
this._s2.remove();
this._s3.remove();
}
_onWF = a => {
DEBUG && console.log('_willFocus HomeScreen', a);
};
_onDF = a => {
DEBUG && console.log('_didFocus HomeScreen', a);
};
_onWB = a => {
DEBUG && console.log('_willBlur HomeScreen', a);
};
_onDB = a => {
DEBUG && console.log('_didBlur HomeScreen', a);
};
render() {
const { navigation } = this.props;
return <MyNavScreen banner="Home Screen" navigation={navigation} />;
}
}
type MyPhotosScreenProps = {
navigation: NavigationScreenProp<NavigationState>,
};
class MyPhotosScreen extends React.Component<MyPhotosScreenProps> {
static navigationOptions = {
title: 'Photos',
headerLeft: <MyBackButtonWithNavigation />,
};
_s0: NavigationEventSubscription;
_s1: NavigationEventSubscription;
_s2: NavigationEventSubscription;
_s3: NavigationEventSubscription;
componentDidMount() {
this._s0 = this.props.navigation.addListener('willFocus', this._onWF);
this._s1 = this.props.navigation.addListener('didFocus', this._onDF);
this._s2 = this.props.navigation.addListener('willBlur', this._onWB);
this._s3 = this.props.navigation.addListener('didBlur', this._onDB);
}
componentWillUnmount() {
this._s0.remove();
this._s1.remove();
this._s2.remove();
this._s3.remove();
}
_onWF = a => {
DEBUG && console.log('_willFocus PhotosScreen', a);
};
_onDF = a => {
DEBUG && console.log('_didFocus PhotosScreen', a);
};
_onWB = a => {
DEBUG && console.log('_willBlur PhotosScreen', a);
};
_onDB = a => {
DEBUG && console.log('_didBlur PhotosScreen', a);
};
render() {
const { navigation } = this.props;
return (
<MyNavScreen
banner={`${navigation.getParam('name')}'s Photos`}
navigation={navigation}
/>
);
}
}
const MyProfileScreen = ({ navigation }) => (
<MyNavScreen
banner={`${
navigation.getParam('mode') === 'edit' ? 'Now Editing ' : ''
}${navigation.getParam('name')}'s Profile`}
navigation={navigation}
/>
);
MyProfileScreen.navigationOptions = props => {
const { navigation } = props;
const { state, setParams } = navigation;
const { params } = state;
return {
headerBackImage: params.headerBackImage,
headerTitle: `${params.name}'s Profile!`,
// Render a button on the right side of the header.
// When pressed switches the screen to edit mode.
headerRight: (
<HeaderButtons>
<HeaderButtons.Item
title={params.mode === 'edit' ? 'Done' : 'Edit'}
onPress={() =>
setParams({ mode: params.mode === 'edit' ? '' : 'edit' })
}
/>
</HeaderButtons>
),
};
};
const SimpleStack = createStackNavigator(
{
Home: {
screen: MyHomeScreen,
},
Profile: {
path: 'people/:name',
screen: MyProfileScreen,
},
Photos: {
path: 'photos/:name',
screen: MyPhotosScreen,
},
},
{
// headerLayoutPreset: 'center',
}
);
export default SimpleStack;
| MyBackButton |
index.js | 'use strict';
var isPresent = require('is-present');
var hasClassSelector = require('has-class-selector');
module.exports = function classPrefix(prefix, options) {
options = options || {};
var ignored = options.ignored;
return function prefixRules(styling) {
styling.rules.forEach(function(rule) {
if (rule.rules) {
return prefixRules(rule);
}
if (!rule.selectors) return rule;
rule.selectors = rule.selectors.map(function(selector) {
var shouldIgnore = false;
if (hasClassSelector(selector)) {
// Ensure that the selector doesn't match the ignored list
if (isPresent(ignored)) { | shouldIgnore = ignored.some(function(opt) {
if (typeof opt == 'string') {
return selector === opt;
} else if (opt instanceof RegExp) {
return opt.exec(selector);
}
});
}
return shouldIgnore ? selector : selector.split('.').join('.' + prefix);
} else {
return selector;
}
});
});
};
}; | |
sa.py | import win32com.client as wc
from utils import vstr
from utils import vshort
from utils import vstrarr
from utils import check_error
from bc import SiebelBusObject
from ps import SiebelPropertySet
from bs import SiebelService
PROGID = 'SiebelDataServer.ApplicationObject'
class | (object):
def __init__(self, conf):
self._sa = wc.Dispatch(PROGID)
self._sa.LoadObjects(vstr(conf), vshort(0))
def getLastErrText(self):
return self._sa.GetLastErrText
@check_error
def getBusObject(self, name):
return SiebelBusObject(self._sa.GetBusObject(vstr(name), vshort(0)),
self._sa)
@check_error
def getProfileAttr(self, name):
return self._sa.GetProfileAttr(vstr(name), vshort(0))
@check_error
def getService(self, name):
return SiebelService(self._sa.GetService(vstr(name), vshort(0)),
self._sa)
@check_error
def getSharedGlobal(self, name):
return self._sa.GetSharedGlobal(vstr(name), vshort(0))
@check_error
def invokeMethod(self, methodName, *methodArgs):
return self._sa.InvokeMethod(vstr(methodName),
vstrarr(list(methodArgs)),
vshort(0))
@check_error
def currencyCode(self):
return self._sa.CurrencyCode(vshort(0))
@check_error
def login(self, login, password):
self._sa.Login(vstr(login), vstr(password), vshort(0))
@check_error
def loginId(self):
return self._sa.LoginId(vshort(0))
@check_error
def loginName(self):
return self._sa.LoginName(vshort(0))
@check_error
def newPropertySet(self):
return SiebelPropertySet(self._sa.NewPropertySet(vshort(0)), self._sa)
@check_error
def positionId(self):
return self._sa.PositionId(vshort(0))
@check_error
def positionName(self):
return self._sa.PositionName(vshort(0))
@check_error
def setPositionId(self, value):
self._sa.SetPositionId(vstr(value), vshort(0))
@check_error
def setPositionName(self, value):
self._sa.SetPositionName(vstr(value), vshort(0))
@check_error
def setProfileAttr(self, name, value):
self._sa.SetProfileAttr(vstr(name), vstr(value), vshort(0))
@check_error
def setSharedGlobal(self, name, value):
self._sa.SetSharedGlobal(vstr(name), vstr(value), vshort(0))
@check_error
def trace(self, msg):
self._sa.Trace(vstr(msg), vshort(0))
@check_error
def traceOff(self):
self._sa.TraceOff(vshort(0))
@check_error
def traceOn(self, file_name, category, source):
self._sa.TraceOn(vstr(file_name), vstr(
category), vstr(source), vshort(0))
def evalExpr(self, expr):
bo = self.getBusObject('Employee')
bc = bo.getBusComp('Employee')
return bc.invokeMethod('EvalExpr', expr)
def repositoryId(self):
return self.evalExpr("RepositoryId()")
| SiebelApplication |
html.go | package strutil
import (
"bytes"
"fmt"
"html"
"html/template"
"regexp"
"strings"
"github.com/PuerkitoBio/goquery"
gohtml "golang.org/x/net/html"
)
// HTML strips html tags, replace common entities, and escapes <>&;'" in the result.
// Note the returned text may contain entities as it is escaped by HTMLEscapeString, and most entities are not translated.
func RemoveHtmlTag(s string) (output string) {
// Shortcut strings with no tags in them
if !strings.ContainsAny(s, "<>") {
output = s
} else {
// First remove line breaks etc as these have no meaning outside html tags (except pre)
// this means pre sections will lose formatting... but will result in less unintentional paras.
s = strings.Replace(s, "\n", "", -1)
// Then replace line breaks with newlines, to preserve that formatting
s = strings.Replace(s, "</p>", "\n", -1)
s = strings.Replace(s, "<br>", "\n", -1)
s = strings.Replace(s, "</br>", "\n", -1)
s = strings.Replace(s, "<br/>", "\n", -1)
s = strings.Replace(s, "<br />", "\n", -1)
// Walk through the string removing all tags
b := bytes.NewBufferString("")
inTag := false
for _, r := range s {
switch r {
case '<':
inTag = true
case '>':
inTag = false
default:
if !inTag {
b.WriteRune(r)
}
}
}
output = b.String()
}
// Remove a few common harmless entities, to arrive at something more like plain text
output = strings.Replace(output, "‘", "'", -1)
output = strings.Replace(output, "’", "'", -1)
output = strings.Replace(output, "“", "\"", -1)
output = strings.Replace(output, "”", "\"", -1)
output = strings.Replace(output, " ", " ", -1)
output = strings.Replace(output, """, "\"", -1)
output = strings.Replace(output, "'", "'", -1)
// Translate some entities into their plain text equivalent (for example accents, if encoded as entities)
output = html.UnescapeString(output)
// In case we have missed any tags above, escape the text - removes <, >, &, ' and ".
output = template.HTMLEscapeString(output)
// After processing, remove some harmless entities &, ' and " which are encoded by HTMLEscapeString
output = strings.Replace(output, """, "\"", -1)
output = strings.Replace(output, "'", "'", -1)
output = strings.Replace(output, "&", "&", -1) // NB space after
output = strings.Replace(output, "& ", "& ", -1) // NB space after
output = strings.Replace(output, "&amp; ", "& ", -1) // NB space after
return output
}
func RemoveHtmlTagExceptBlank(s string) (output string) {
// Shortcut strings with no tags in them
if !strings.ContainsAny(s, "<>") {
output = s
} else {
// First remove line breaks etc as these have no meaning outside html tags (except pre)
// this means pre sections will lose formatting... but will result in less unintentional paras.
// Then replace line breaks with newlines, to preserve that formatting
s = strings.Replace(s, "</p>", "\n", -1)
s = strings.Replace(s, "<br>", "\n", -1)
s = strings.Replace(s, "</br>", "\n", -1)
s = strings.Replace(s, "<br/>", "\n", -1)
s = strings.Replace(s, "<br />", "\n", -1)
// Walk through the string removing all tags
b := bytes.NewBufferString("")
inTag := false
for _, r := range s {
switch r {
case '<':
inTag = true
case '>':
inTag = false
default:
if !inTag |
}
}
output = b.String()
}
// Remove a few common harmless entities, to arrive at something more like plain text
output = strings.Replace(output, "‘", "'", -1)
output = strings.Replace(output, "’", "'", -1)
output = strings.Replace(output, "“", "\"", -1)
output = strings.Replace(output, "”", "\"", -1)
output = strings.Replace(output, " ", " ", -1)
output = strings.Replace(output, """, "\"", -1)
output = strings.Replace(output, "'", "'", -1)
// Translate some entities into their plain text equivalent (for example accents, if encoded as entities)
output = html.UnescapeString(output)
// In case we have missed any tags above, escape the text - removes <, >, &, ' and ".
output = template.HTMLEscapeString(output)
// After processing, remove some harmless entities &, ' and " which are encoded by HTMLEscapeString
output = strings.Replace(output, """, "\"", -1)
output = strings.Replace(output, "'", "'", -1)
output = strings.Replace(output, "& ", "& ", -1) // NB space after
output = strings.Replace(output, "&amp; ", "& ", -1) // NB space after
return output
}
func RemoveSpecialCharacters(s string) string {
s = strings.Replace(s, "\u2028", "", -1)
s = strings.Replace(s, "\u2029", "", -1)
return s
}
func RemoveEmptyPTag(s string) string {
s = strings.Replace(s, "<p> </p>", "", -1)
return s
}
func Html2(s *goquery.Selection) (ret string, e error) {
// Since there is no .innerHtml, the HTML content must be re-created from
// the nodes using html.Render.
var buf bytes.Buffer
if len(s.Nodes) > 0 {
for c := s.Nodes[0]; c != nil; c = c.NextSibling {
e = gohtml.Render(&buf, c)
if e != nil {
return
}
}
ret = buf.String()
}
return
}
func RemoveStylesOfHtmlTag(html string, keptStyles ...string) string {
styleTagR := regexp.MustCompile(`style="([^=>]*;?)*"`)
keptStyleR := regexp.MustCompile(fmt.Sprintf("^%s", strings.Join(keptStyles, "|")))
hasKeptStyles := len(keptStyles) != 0
matches := styleTagR.FindAllStringSubmatch(html, -1)
var toCleanStyles = make(map[string]struct{})
for _, groups := range matches {
if len(groups)!=2{
continue
}
styles := strings.Split(groups[1], ";")
for _, style := range styles{
trimStr := strings.ToLower(strings.TrimSpace(style))
if trimStr == "" || (hasKeptStyles && keptStyleR.MatchString(trimStr)) {
continue
}
toCleanStyles[style] = struct{}{}
}
}
cleanedHtml := html
for cleanStyle := range toCleanStyles{
escapedStyle := strings.Replace(cleanStyle, "(", `\(`, -1)
escapedStyle = strings.Replace(escapedStyle, ")", `\)`, -1)
cleanR := regexp.MustCompile(fmt.Sprintf("%s[;]?", escapedStyle))
cleanedHtml = cleanR.ReplaceAllString(cleanedHtml, "")
}
return cleanedHtml
} | {
b.WriteRune(r)
} |
loader.go | package httpfs
import (
"io"
"net/http"
"os"
"github.com/CloudyKit/jet/v4"
)
type httpFileSystemLoader struct {
fs http.FileSystem
}
// NewLoader returns an initialized loader serving the passed http.FileSystem.
func NewLoader(fs http.FileSystem) jet.Loader {
return &httpFileSystemLoader{fs: fs}
}
// Open opens the file via the internal http.FileSystem. It is the callers duty to close the file.
func (l *httpFileSystemLoader) Open(name string) (io.ReadCloser, error) {
if l.fs == nil {
return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist}
}
return l.fs.Open(name)
}
// Exists checks if the template with the given name exists by walking the list of template paths
// returns string with the full path of the template and bool true if the template file was found
func (l *httpFileSystemLoader) Exists(name string) (string, bool) {
if l.fs == nil {
return "", false
}
if f, err := l.Open(name); err == nil {
f.Close()
return name, true
} | } | return "", false |
errorLine.ts | /*
* A simple error line entity
*/
export interface ErrorLine { | value: string;
valueStyle: string;
} | id: number;
label: string; |
main.rs | // Silence some warnings so they don't distract from the exercise.
#![allow(dead_code, unused_variables)]
use ding_machine::*;
fn main() | {
let coords: (f32, f32) = (6.3, 15.0);
// 1. Pass parts of `coords` to the `print_difference` function. This should show the difference
// between the two numbers in coords when you do `cargo run`. Use tuple indexing.
//
// The `print_difference` function is defined below the `main` function. It may help if you look
// at how it is defined.
//
print_difference( coords.0, coords.1 ); // Uncomment and finish this line
// 2. We want to use the `print_array` function to print coords...but coords isn't an array!
// Create an array of type [f32; 2] and initialize it to contain the
// information from coords. Uncomment the print_array line and run the code.
//
let coords_arr = [coords.0, coords.1]; // create an array literal out of parts of `coord` here
print_array(coords_arr); // and pass it in here (this line doesn't need to change)
let series = [1, 1, 2, 3, 5, 8, 13];
// 3. Make the `ding` function happy by passing it the value 13 out of the `series` array.
// Use array indexing. Done correctly, `cargo run` will produce the additional output
// "Ding, you found 13!"
//
ding(series[6]);
let mess = ([3, 2], 3.14, [(false, -3), (true, -100)], 5, "candy");
// 4. Pass the `on_off` function the value `true` from the variable `mess`. Done correctly,
// `cargo run` will produce the additional output "Lights are on!" I'll get you started:
//
on_off(mess.2[1].0);
// 5. What a mess -- functions in a binary! Let's get organized!
//
// - Make a library file (src/lib.rs)
// - Move all the functions (except main) into the library
// - Make all the functions public with `pub`
// - Bring all the functions into scope using use statements. Remember, the name of the library
// is defined in Cargo.toml. You'll need to know that to `use` it.
//
// `cargo run` should produce the same output, only now the code is more organized. 🎉
// Challenge: Uncomment the line below, run the code, and examine the
// output. Then go refactor the print_distance() function according to the
// instructions in the comments inside that function.
print_distance(coords);
}
|
|
string_utils.py | """
Utility module to manipulate strings.
"""
import re
import types
__author__ = "Jenson Jose"
__email__ = "[email protected]"
__status__ = "Alpha"
class StringUtils:
"""
Utility class containing methods for manipulation of strings.
"""
def __init__(self):
pass
@staticmethod
def is_blank(string):
"""
Checks is supplied string is blank.
:param string: The string to be verified.
:return: True if string is blank, False otherwise.
:rtype: bool
"""
if string.strip():
return False
return True
@staticmethod
def join_list_elements(string_list, join_char=""):
"""
Joins list elements into a single string, using the joining character.
:param string_list: The list of strings to be joined.
:param join_char: The character to use for joining the strings.
:return: The combined string of joined list elements, if string_list is a valid list, False otherwise.
:rtype: str
"""
if isinstance(string_list, types.ListType):
if len(string_list) > 0:
return str(join_char).join(string_list)
return False
@staticmethod
def remove_lines(text, line_count):
"""
Removes specified number of lines from beginning or end of supplied text.
:param text: Text from which lines are to be removed.
:param line_count: The number of lines to remove.
+ve value starts removal from beginning and -ve value start removal from end.
:return: The updated text with removed lines.
:rtype: str
"""
text_lines = text.split("\n")
if len(text_lines) > 0:
if line_count > 0:
for index in range(0, line_count):
text_lines.remove(text_lines[index])
elif line_count < 0:
text_lines_r = text_lines
text_lines_r.reverse()
# print "baa... " + str(text_lines) + "...aab"
# print "baaR... " + str(text_lines_r) + "...Raab"
for index in range(0, line_count):
text_lines_r.remove(text_lines_r[index])
text_lines = text_lines_r
text_lines.reverse()
else:
pass
updated_text = StringUtils.join_list_elements(text_lines, "\n")
return updated_text
@staticmethod
def remove_lines_range(text, start_line_number, end_line_number):
"""
Removes a range of lines from the supplied text.
:param text: Text from which lines are to be removed.
:param start_line_number: Starting line number where removal will begin.
:param end_line_number: Ending line number where removal will end.
:return: The updated text with removed lines.
:rtype: str
""" |
text_lines = text.split("\n")
if len(text_lines) > 0:
for index in range(start_line_number, end_line_number):
text_lines.remove(text_lines[index])
updated_text = StringUtils.join_list_elements(text_lines, "\n")
return updated_text
@staticmethod
def remove_lines_list(text, line_list):
"""
Removes specified lines from the supplied text.
:param text: Text from which lines are to be removed.
:param line_list: List containing specific lines to remove from the text.
:return: The updated text with removed lines.
:rtype: str
"""
text_lines = text.split("\n")
if len(text_lines) > 0:
for line_index in line_list:
text_lines.remove(text_lines[line_index])
updated_text = StringUtils.join_list_elements(text_lines, "\n")
return updated_text
@staticmethod
def remove_leading_blanks(string):
"""
Removes leading blank spaces from supplied string.
:param string: String from which leading blanks are to be removed.
:return: String with leading blanks removed.
:rtype: str
"""
text_lines = string.split("\n")
if len(text_lines) > 0:
for line in text_lines:
if StringUtils.is_blank(line):
text_lines.remove(line)
else:
break
updated_string = StringUtils.join_list_elements(text_lines, "\n")
return updated_string
@staticmethod
def remove_trailing_blanks(string):
"""
Removes trailing blank spaces from supplied string.
:param string: String from which trailing blanks are to be removed.
:return: String with trailing blanks removed.
:rtype: str
"""
text_lines = string.split("\n")
text_lines_r = ""
if len(text_lines) > 0:
text_lines_r = text_lines
text_lines_r.reverse()
for line in text_lines_r:
if StringUtils.is_blank(line):
text_lines_r.remove(line)
else:
break
text_lines = text_lines_r
text_lines.reverse()
updated_string = StringUtils.join_list_elements(text_lines, "\n")
return updated_string
@staticmethod
def extract_line(text, line_number):
"""
Extracts specified line from the supplied text based on line number.
:param text: Text from which line is to be extracted.
:param line_number: Line number to be extracted.
:return: The extracted line.
:rtype: str
"""
text_lines = text.split("\n")
if len(text_lines) > 0:
return text_lines[line_number - 1]
return False
@staticmethod
def get_line_number(text, string):
"""
Determines line number of first occurrence of given string within given text.
:param text: Text in which 'string' is to be searched.
:param string: Data to locate within given text.
:return: The extracted line number.
:rtype: str
"""
text_lines = text.split("\n")
# look for exact match in the supplied text
line_ctr = 1
for text_line in text_lines:
if StringUtils.equals_ignore_case(text_line, string):
return line_ctr
line_ctr += 1
line_ctr = 1
for text_line in text_lines:
if StringUtils.check_pattern(text_line, string):
return line_ctr
line_ctr += 1
return False
@staticmethod
def check_pattern(text, pattern):
"""
Looks for supplied pattern in given text.
:param text: Text in which 'pattern' is to be searched.
:param pattern: Pattern expression to locate within given text.
:return: True if match was found, False otherwise.
:rtype: bool
"""
compiled_pattern = re.compile(pattern)
if len(compiled_pattern.findall(text)) > 0:
return True
return False
@staticmethod
def equals_ignore_case(string1, string2):
"""
Compares 2 given strings for equality regardless of case.
:param string1: String to be compared for equality.
:param string2: String to be compared for equality.
:return: True, if matching, False otherwise
:rtype: bool
"""
return string1.lower() == string2.lower()
@staticmethod
def equals_match_case(string1, string2):
"""
Compares 2 given strings for equality.
:param string1: String to be compared for equality.
:param string2: String to be compared for equality.
:return: True, if matching, False otherwise
:rtype: bool
"""
return string1 == string2 | |
main.py | from machine import ADC, Pin
import time
class LDR:
"""This class read a value from a light dependent resistor (LDR)"""
def | (self, pin, min_value=0, max_value=100):
"""
Initializes a new instance.
:parameter pin A pin that's connected to an LDR.
:parameter min_value A min value that can be returned by value() method.
:parameter max_value A max value that can be returned by value() method.
"""
if min_value >= max_value:
raise Exception('Min value is greater or equal to max value')
# initialize ADC (analog to digital conversion)
self.adc = ADC(Pin(pin))
# set 11dB input attenuation (voltage range roughly 0.0v - 3.6v)
self.adc.atten(ADC.ATTN_11DB)
self.min_value = min_value
self.max_value = max_value
def read(self):
"""
Read a raw value from the LDR.
:return A value from 0 to 4095.
"""
return self.adc.read()
def value(self):
"""
Read a value from the LDR in the specified range.
:return A value from the specified [min, max] range.
"""
return (self.max_value - self.min_value) * self.read() / 4095
# initialize an LDR
ldr = LDR(34)
while True:
# read a value from the LDR
value = ldr.value()
print('value = {}'.format(value))
# a little delay
time.sleep(3)
| __init__ |
export.component.ts | import { Component } from '@angular/core';
import { MatBottomSheetRef } from '@angular/material/bottom-sheet';
@Component({
selector: 'magic-bean-export',
templateUrl: './export.component.html',
styleUrls: ['./export.component.scss'],
})
export class ExportComponent {
fromDateValue: Date;
toDateValue: Date;
constructor(private _bottomSheetRef: MatBottomSheetRef<ExportComponent>) { }
openLink(): void { | onDateChange(range: 'min' | 'max', date: Date) {
switch (range) {
case 'min':
this.toDateValue = date;
break;
case 'max':
this.fromDateValue = date;
break;
}
}
} | this._bottomSheetRef.dismiss({ fromDate: this.fromDateValue, toDate: this.toDateValue });
}
|
tokens.go | package tokens
import (
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
prng "github.com/EricLagerg/go-prng/xorshift"
"github.com/golang/glog"
)
const (
HashSize = 32
// We exclude '0' here because casting a byte slice/array that contains
// a '0' to a string will cause it to have a null byte. On Unix --
// and I'm assuming Windows -- platforms a null byte denotes the end
// of the string. This causes syscalls like Create or Open to break
// because the string is interpreted incorrectly.
charTable = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ123456789"
tableLen = uint64(len(charTable) - 1)
)
var (
CSRFKey = []byte("0655A28CAAEB0448132026D863771C5D")
ErrGeneratedBadToken = errors.New("Generated a token with an invalid length.")
)
// NewAuthToken returns a hex-encoded byte slice with a length of 64.
// The token is generated by reading from the OS' PRNG source, usually
// /dev/urandom on Unix-like systems and the CryptGenRandom API on
// Windows.
func NewAuthToken() []byte {
return newSecureToken()
}
func NewSessionID() string {
return string(newSecureToken())
}
// NewCSRFToken returns a byte slice with a new base-64 encoded CSRF token,
// only valid for a user's session.
func NewCSRFToken(id string) []byte {
mac := hmac.New(sha256.New, CSRFKey)
_, err := mac.Write([]byte(id))
if err != nil {
glog.Fatalln(err)
}
src := mac.Sum(nil)
buf := make([]byte, base64.StdEncoding.EncodedLen(len(src)))
base64.URLEncoding.Encode(buf, src)
return buf
}
func newSecureToken() []byte {
buf := make([]byte, HashSize)
n, err := io.ReadFull(rand.Reader, buf)
if err != nil {
glog.Fatalln(err)
}
if n != HashSize {
glog.Fatalln(ErrGeneratedBadToken)
}
tok := make([]byte, hex.EncodedLen(len(buf)))
if hex.Encode(tok, buf) != 64 {
glog.Fatalln(ErrGeneratedBadToken)
}
return tok
}
var (
r = new(prng.Shift128Plus) // Our xorshift PRNG
_u uint64 // Prevent Go from optimizing out the warmup.
)
func init() {
r.Seed()
// Warm up the PRNG. It doesn't necessarily need it, but it won't
// hurt.
var n uint64
fmt.Println("Starting PRNG warmup. MAKE SURE TO CHANGE THIS.")
for i := 0; i < 1; i++ { //e9; i++ {
n = r.Next()
}
_u = n
fmt.Printf("Ended PRNG warmup with value of %d.\n", n)
}
// Create fast random strings for job IDs.
// Made up to 29 million unique strings before I quit testing.
func NewJobID() string {
var buf [32]byte
for i := HashSize - 1; i >= 0; i-- {
key := r.Next() % tableLen
buf[i] = charTable[key] | return string(buf[:])
} | }
|
lib.rs | use swap_or_not_shuffle::shuffle_list;
use std::{slice, ptr};
use libc::{uint8_t, size_t};
#[no_mangle]
pub fn shuffle_list_c(
input_ptr: *mut usize,
input_size: size_t,
seed_ptr: *mut uint8_t) -> bool | {
let input: &[usize] = unsafe {
slice::from_raw_parts(input_ptr, input_size as usize)
};
let seed = unsafe {
Vec::from_raw_parts(seed_ptr, 32, 32)
};
return match shuffle_list(input.to_vec(), 90, &seed, false) {
None => false,
Some(x) => {
unsafe {
ptr::copy_nonoverlapping(x.as_ptr(), input_ptr, input_size);
}
true
}
}
} |
|
create_database_and_collection.rs | #![cfg(feature = "mock_transport_framework")]
use azure_core::Context;
use azure_cosmos::prelude::*;
use futures::stream::StreamExt;
use std::error::Error;
mod setup;
type BoxedError = Box<dyn Error + Send + Sync>;
#[tokio::test]
async fn | () -> Result<(), BoxedError> {
env_logger::init();
let client = setup::initialize("create_database_and_collection")?;
let database_name = "test-create-database-and-collection";
let context = Context::new();
// create database!
log::info!("Creating a database with name '{}'...", database_name);
let db = client.create_database(&database_name).into_future().await?;
log::info!("Successfully created a database");
log::debug!("The create_database response: {:#?}", db);
assert_eq!(db.database.id, database_name);
// create collection!
let db_client = client.clone().into_database_client(database_name.clone());
let collection_name = "panzadoro";
log::info!("Creating a collection with name '{}'...", collection_name);
let collection = db_client
.create_collection(
context.clone(),
collection_name,
CreateCollectionOptions::new("/id"),
)
.await?;
assert_eq!(collection.collection.id, collection_name);
log::info!("Successfully created a collection");
log::debug!("The create_collection response: {:#?}", collection);
// list collections!
log::info!("Listing all collections...");
let collections =
Box::pin(db_client.list_collections(context.clone(), ListCollectionsOptions::new()))
.next()
.await
.expect("No collection page")?;
assert_eq!(collections.count, 1);
log::info!("Successfully listed collections");
log::debug!("The list_collection response: {:#?}", collections);
// delete database
log::info!("Deleting the database...");
let deleted_database = db_client
.delete_database(context.clone(), DeleteDatabaseOptions::new())
.await?;
log::info!("Successfully deleted database");
log::debug!("The delete_database response: {:#?}", deleted_database);
Ok(())
}
| create_database_and_collection |
ae7ca721953bebc6e814c651a0786102.js | load("201224b0d1c296b45befd2285e95dd42.js");
// |jit-test| --ion-offthread-compile=off;
// We disable any off-main thread compilation, and set a definite trigger for
// Ion compilation, such that we can garantee that we would OSR into the inner
// loop before we reach the end of the loop.
setJitCompilerOption("ion.warmup.trigger", 30);
function | (n) {
while (!inIon()) {
var inner = 0;
let x = {};
for (var i = 0; i < n; i++) {
inner += inIon() == true ? 1 : 0;
if (inner <= 1)
bailout();
}
assertEq(inner != 1, true);
}
}
// Iterate enough to ensure that we OSR in this inner loop.
f(300);
| f |
OneSpanAnalysis_Mdl.py | import numpy as np
import numpy.linalg as la
from MdlUtilities import Field, FieldList
import MdlUtilities as mdl
def get_osaCasing_fields():
OD = Field(2030)
ID = Field(2031)
Weight = Field(2032)
Density = Field(2039)
E = Field(2040)
osaCasing_fields = FieldList()
osaCasing_fields.append( OD )
osaCasing_fields.append( ID )
osaCasing_fields.append( Weight )
osaCasing_fields.append( Density )
osaCasing_fields.append( E )
return osaCasing_fields
def | ():
Type = Field(2049)
IPOD = Field(2009)
CentOD = Field(2011)
#CentID = Field(2012)
ResF_SO67 = Field(2018)
minResF = Field(2017)
SO_minResF = Field(2019)
ResF_SO67.set_representation('Res. Force @ SO=67%')
minResF.set_representation('minimum Res. Force')
SO_minResF.set_representation('StandOff @ min. Res. F.')
osaCent_fields = FieldList()
osaCent_fields.append( Type )
osaCent_fields.append( IPOD )
osaCent_fields.append( CentOD )
#osaCent_fields.append( CentID )
osaCent_fields.append( ResF_SO67 )
osaCent_fields.append( minResF )
osaCent_fields.append( SO_minResF )
return osaCent_fields
def get_osaWellbore_fields():
HoleID = Field(2010)
MaxSpan = Field(2061)
MudIPDensity = Field(2077)
MudOPDensity = Field(2077)
HoleID.set_representation('Hole ID')
HoleID.set_abbreviation('HoleID')
MaxSpan.set_representation('Max span')
MaxSpan.set_abbreviation('MaxSpan')
MudIPDensity.set_representation('Mud inside pipe')
MudIPDensity.set_abbreviation('MudIPDensity')
MudOPDensity.set_representation('Mud in annulus')
MudOPDensity.set_abbreviation('MudOPDensity')
osaWellbore_fields = FieldList()
osaWellbore_fields.append( HoleID )
osaWellbore_fields.append( MaxSpan )
osaWellbore_fields.append( MudIPDensity )
osaWellbore_fields.append( MudOPDensity )
return osaWellbore_fields
def get_osaOutputdata1_fields():
clearanceA = Field(2073, altBg=True, altFg=True)
clearanceB = Field(2073, altBg=True, altFg=True)
clearanceM = Field(2073, altBg=True, altFg=True)
sideForceA = Field(2074, altBg=True, altFg=True)
sideForceB = Field(2074, altBg=True, altFg=True)
sideForceM = Field(2074, altBg=True, altFg=True)
standoffA = Field(2078, altBg=True, altFg=True)
standoffB = Field(2078, altBg=True, altFg=True)
standoffM = Field(2078, altBg=True, altFg=True)
clearanceA.set_representation('Annular clearance @ cent. A')
clearanceA.set_abbreviation('ClearanceA')
clearanceB.set_representation('Annular clearance @ cent. B')
clearanceB.set_abbreviation('ClearanceB')
clearanceM.set_representation('Annular clearance @ mid span')
clearanceM.set_abbreviation('ClearanceM')
sideForceA.set_representation('Side force @ cent. A')
sideForceA.set_abbreviation('SideForceA')
sideForceB.set_representation('Side force @ cent. B')
sideForceB.set_abbreviation('SideForceB')
sideForceM.set_representation('Side force @ mid span')
sideForceM.set_abbreviation('SideForceM')
standoffA.set_representation('Standoff @ cent. A')
standoffA.set_abbreviation('StandoffA')
standoffB.set_representation('Standoff @ cent. B')
standoffB.set_abbreviation('StandoffB')
standoffM.set_representation('Standoff @ mid span')
standoffM.set_abbreviation('StandoffM')
osaOutputdata1_fields = FieldList()
osaOutputdata1_fields.append( clearanceA )
osaOutputdata1_fields.append( clearanceB )
osaOutputdata1_fields.append( clearanceM )
osaOutputdata1_fields.append( sideForceA )
osaOutputdata1_fields.append( sideForceB )
osaOutputdata1_fields.append( sideForceM )
osaOutputdata1_fields.append( standoffA )
osaOutputdata1_fields.append( standoffB )
osaOutputdata1_fields.append( standoffM )
return osaOutputdata1_fields
def get_osaOutputdata2_fields():
axialForce = Field(2075, altBg=True, altFg=True)
deflection = Field(2076, altBg=True, altFg=True)
wClearance = Field(2073, altBg=True, altFg=True)
wStandoff = Field(2078, altBg=True, altFg=True)
axialForce.set_representation('Axial extra force @ top')
axialForce.set_abbreviation('AxialForce')
deflection.set_representation('Max. pipe deflection')
deflection.set_abbreviation('MaxDeflection')
wClearance.set_representation('Mean wellbore clearance')
wClearance.set_abbreviation('WellboreClearance')
wStandoff.set_representation('Mean wellbore standoff')
wStandoff.set_abbreviation('WellboreStandoff')
osaOutputdata2_fields = FieldList()
osaOutputdata2_fields.append( axialForce )
osaOutputdata2_fields.append( deflection )
osaOutputdata2_fields.append( wClearance )
osaOutputdata2_fields.append( wStandoff )
return osaOutputdata2_fields
def get_casingDeflectionCurve(self):
# Equation(s) Reference 1:
# Hans C. Juvkam-Wold, Jiang Wu. Casing Deflection and Centralizer Spacing Calculations.
# SPE Drilling Engineering (December 1992).
# Equation(s) Reference 2:
# Hans C. Juvkam-Wold, Richard L. Baxter. Discussion of Optimal Spacing for Casing Centralizers.
# SPE Drilling Engineering (December 1988).
# Equation(s) Reference 3:
# Carlos F. H. Fonseca, Jacques Braile. Optimizing of Centralizer Distribution.
# SPE Latin American Petroleum Engineering Conference (October 1990).
self.osaCasing_fields.referenceUnitConvert_fields()
self.osaCentA_fields.referenceUnitConvert_fields()
self.osaCentB_fields.referenceUnitConvert_fields()
self.osaWellbore_fields.referenceUnitConvert_fields()
Rot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )
dH = self.osaWellbore_fields.HoleID[0]
L = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100
ρe = self.osaWellbore_fields.MudOPDensity[0]
ρi = self.osaWellbore_fields.MudIPDensity[0]
ρs = self.osaCasing_fields.Density[0]
E = self.osaCasing_fields.E[0]
w = self.osaCasing_fields.PW[0]
D = self.osaCasing_fields.OD[0]
d = self.osaCasing_fields.ID[0]
Type_A = self.osaCentA_fields.Type[0]
F_So67_A = self.osaCentA_fields.ResF_SO67[0]
minF_A = self.osaCentA_fields.minResF[0]
So_minF_A = self.osaCentA_fields.SO_minResF[0]
DA = self.osaCentA_fields.COD[0]
dA = self.osaCentA_fields.IPOD[0]
Type_B = self.osaCentB_fields.Type[0]
F_So67_B = self.osaCentB_fields.ResF_SO67[0]
minF_B = self.osaCentB_fields.minResF[0]
So_minF_B = self.osaCentB_fields.SO_minResF[0]
DB = self.osaCentB_fields.COD[0]
dB = self.osaCentB_fields.IPOD[0]
#kA = ResFA/(DA/2-0.335*(DA-D)) # Con esto se calculan los coeficientes de los resortes ( 0.335=0.67/2 )
#kB = ResFB/(DB/2-0.335*(DB-D))
for field in self.osaWellbore_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCasing_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentA_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentB_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
if dA!=D or dB!=D or dH<=D:
raise mdl.LogicalError('The selected devices are not size-consistent.')
θ = np.pi*self.osaInclination_slider.sliderPosition()/180
I = np.pi/64*(D**4-d**4) # [Ref.3] Momento de inercia diferente a momento de inercia polar.
F = 30000 # [Ref.1]
Radio = L*1e6
aspr = L*0.02
buoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi ) # [Ref.2]
w *= buoyancyFactor
fC = w*L*np.sin(θ)/2
if Type_A=='Resin': #mdl.isNoneEntry(ResFA):
yA = 0
dA = d
else:
kA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)
yA = fC/kA if (DA<dH) else fC/kA/2
if Type_B=='Resin': #mdl.isNoneEntry(ResFB):
yB = 0
dB = d
else:
kB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)
yB = fC/kB if (DB<dH) else fC/kB/2
R = D/2
rH = dH/2
rA_min = R+(DA/2-R)*0.1
rB_min = R+(DB/2-R)*0.1
rA = (DA/2-yA) if (DA<dH) else (rH-yA)
rB = (DB/2-yB) if (DB<dH) else (rH-yB)
rA = rA_min if (rA<=rA_min) else rA
rB = rB_min if (rB<=rB_min) else rB
α = np.arctan( (rB-rA)/L )
Lα = L/np.cos(α)
x = np.linspace( 0, Lα, 101 )
K = np.sqrt(F/E/I)
y = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2 # [Ref.1]
Rα = Rot(α)
xy = np.array([x,y])
x,y = np.dot(Rα,xy)
Δy = rH-rB
y += Δy
cH = rH-R
cA = rA-R
cB = rB-R
indexes = y>cH
y[indexes] = cH
indexes = y<-cH
y[indexes] =-cH
cy = cH-y
rM = rH-y[50]
if y[50]==cH:
fM = fC
fC = 0
else:
fM = 0
cM = rM-R
x -= L/2
yoh = y*0
ohc = np.array([x, yoh])
ohp = np.array([x, (yoh+rH)*aspr])
ohm = np.array([x, (yoh-rH)*aspr])
xyc = np.array([x, y*aspr])
xyp = np.array([x, (y+R)*aspr])
xym = np.array([x, (y-R)*aspr])
φ = θ + np.pi/2
Rφ = Rot(φ)
OHc = np.dot(Rφ,ohc)
OHp = np.dot(Rφ,ohp)
OHm = np.dot(Rφ,ohm)
XYc = np.dot(Rφ,xyc)
XYp = np.dot(Rφ,xyp)
XYm = np.dot(Rφ,xym)
SA = cA/cH
SB = cB/cH
SM = cM/cH
Sy = cy/cH
δ = (cA+cB)/2-cM
self.osaOutputdata1_fields.clear_content()
self.osaOutputdata2_fields.clear_content()
self.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )
self.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )
self.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )
self.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )
self.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )
self.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )
self.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )
self.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )
self.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )
self.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )
self.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )
self.osaCasing_fields.inverseReferenceUnitConvert_fields()
self.osaCentA_fields.inverseReferenceUnitConvert_fields()
self.osaCentB_fields.inverseReferenceUnitConvert_fields()
self.osaWellbore_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()
lim = L/2*1.05
return OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM
| get_osaCent_fields |
0012_delete_questionnaire.py | # Generated by Django 3.0.4 on 2020-03-26 11:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0011_auto_20200326_1107'),
] | migrations.DeleteModel(
name='Questionnaire',
),
] |
operations = [ |
__init__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from .FocalLoss import FocalLoss
from .CRFLoss import CRFLoss
from .Loss import Loss | from torch.nn import CrossEntropyLoss, L1Loss, MSELoss, NLLLoss, PoissonNLLLoss, NLLLoss2d, KLDivLoss, BCELoss, BCEWithLogitsLoss, MarginRankingLoss, HingeEmbeddingLoss, MultiLabelMarginLoss, SmoothL1Loss, SoftMarginLoss, MultiLabelSoftMarginLoss, CosineEmbeddingLoss, MultiMarginLoss, TripletMarginLoss |
|
perm.go | package auth
import (
"strings"
)
// Perm the basic permission info
type Perm string
const (
// ReadPerm the permission of read
ReadPerm = "r"
// WritePerm the permission of write
WritePerm = "w"
// ExecutePerm the permission of execute
ExecutePerm = "x"
// DefaultPerm the default permission
DefaultPerm = ReadPerm
// FullPerm the permission of read, write and execute
FullPerm = "rwx"
)
// String return perm string
func (p Perm) String() string {
return string(p)
}
// R have the permission to read or not
func (p Perm) R() bool {
return strings.Contains(p.String(), ReadPerm)
}
// W have the permission to write or not
func (p Perm) W() bool {
return strings.Contains(p.String(), WritePerm)
}
// X have the permission to execute or not
func (p Perm) X() bool {
return strings.Contains(p.String(), ExecutePerm)
}
// CheckTo check the target permission whether accord with current permission
// if the current permission is invalid, return false always
func (p Perm) CheckTo(t Perm) bool {
if !p.IsValid() {
return false
}
if !t.IsValid() || (p.R() && !t.R()) || (p.W() && !t.W()) || (p.X() && !t.X()) {
return false
}
return true
} |
// IsValid is a valid permission or not
func (p Perm) IsValid() bool {
return len(p.String()) > 0 && (p.R() || p.W() || p.X())
}
// ToPermWithDefault convert a perm string to Perm
// defaultPerm if the perm is empty, replace with the defaultPerm
func ToPermWithDefault(perm string, defaultPerm string) (p Perm) {
perm = strings.TrimSpace(perm)
if len(perm) == 0 {
perm = defaultPerm
}
return ToPerm(perm)
}
// ToPerm convert a perm string to Perm
func ToPerm(perm string) (p Perm) {
perm = strings.TrimSpace(perm)
permLen := len(perm)
if permLen == 0 || permLen > 3 {
return p
}
perm = strings.ToLower(perm)
r, w, x := false, false, false
for i := 0; i < permLen; i++ {
c := perm[i : i+1]
switch c {
case ReadPerm:
r = true
case WritePerm:
w = true
case ExecutePerm:
x = true
default:
return p
}
}
if r {
p += ReadPerm
}
if w {
p += WritePerm
}
if x {
p += ExecutePerm
}
return p
} | |
train_dalle.py | import argparse
from random import choice
from pathlib import Path
# torch
import torch
from torch.optim import Adam
from torch.nn.utils import clip_grad_norm_
# vision imports
from PIL import Image
from torchvision import transforms as T
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle related classes and utils
from dalle_pytorch import OpenAIDiscreteVAE, DiscreteVAE, DALLE
from dalle_pytorch.simple_tokenizer import tokenize, tokenizer, VOCAB_SIZE
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required = False)
group.add_argument('--vae_path', type = str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type = str,
help='path to your partially trained DALL-E')
parser.add_argument('--image_text_folder', type = str, required = True,
help='path to your folder of images and text for learning the DALL-E')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
# constants
VAE_PATH = args.vae_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = 20
BATCH_SIZE = 4
LEARNING_RATE = 3e-4
GRAD_CLIP_NORM = 0.5
MODEL_DIM = 512
TEXT_SEQ_LEN = 256
DEPTH = 2
HEADS = 4
DIM_HEAD = 64
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path))
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
dalle_params = dict(
vae = vae,
**dalle_params
)
IMAGE_SIZE = vae_params['image_size']
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
print('using OpenAIs pretrained VAE for encoding images to tokens')
vae_params = None
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
dalle_params = dict(
vae = vae,
num_text_tokens = VOCAB_SIZE,
text_seq_len = TEXT_SEQ_LEN,
dim = MODEL_DIM,
depth = DEPTH,
heads = HEADS,
dim_head = DIM_HEAD
)
# helpers
def save_model(path):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'weights': dalle.state_dict()
}
torch.save(save_obj, path)
# dataset loading
class TextImageDataset(Dataset):
def __init__(self, folder, text_len = 256, image_size = 128):
super().__init__()
path = Path(folder)
text_files = [*path.glob('**/*.txt')]
image_files = [
*path.glob('**/*.png'),
*path.glob('**/*.jpg'),
*path.glob('**/*.jpeg')
]
text_files = {t.stem: t for t in text_files}
image_files = {i.stem: i for i in image_files}
keys = (image_files.keys() & text_files.keys())
self.keys = list(keys)
self.text_files = {k: v for k, v in text_files.items() if k in keys}
self.image_files = {k: v for k, v in image_files.items() if k in keys}
self.image_tranform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.CenterCrop(image_size),
T.Resize(image_size),
T.ToTensor(),
T.Lambda(lambda t: t.expand(3, -1, -1)),
T.Normalize((0.5,) * 3, (0.5,) * 3)
])
def __len__(self):
return len(self.keys)
def __getitem__(self, ind):
key = self.keys[ind]
text_file = self.text_files[key]
image_file = self.image_files[key]
image = Image.open(image_file)
descriptions = text_file.read_text().split('\n')
descriptions = list(filter(lambda t: len(t) > 0, descriptions))
description = choice(descriptions)
tokenized_text = tokenize(description).squeeze(0)
mask = tokenized_text != 0
image_tensor = self.image_tranform(image)
return tokenized_text, image_tensor, mask
# create dataset and dataloader
ds = TextImageDataset(
args.image_text_folder,
text_len = TEXT_SEQ_LEN,
image_size = IMAGE_SIZE
)
assert len(ds) > 0, 'dataset is empty'
print(f'{len(ds)} image-text pairs found for training')
dl = DataLoader(ds, batch_size = BATCH_SIZE, shuffle = True, drop_last = True)
# initialize DALL-E
dalle = DALLE(**dalle_params).cuda()
if RESUME: | # optimizer
opt = Adam(dalle.parameters(), lr = LEARNING_RATE)
# experiment tracker
import wandb
wandb.config.depth = DEPTH
wandb.config.heads = HEADS
wandb.config.dim_head = DIM_HEAD
wandb.init(project = 'dalle_train_transformer', resume = RESUME)
# training
for epoch in range(EPOCHS):
for i, (text, images, mask) in enumerate(dl):
text, images, mask = map(lambda t: t.cuda(), (text, images, mask))
loss = dalle(text, images, mask = mask, return_loss = True)
loss.backward()
clip_grad_norm_(dalle.parameters(), GRAD_CLIP_NORM)
opt.step()
opt.zero_grad()
log = {}
if i % 10 == 0:
print(epoch, i, f'loss - {loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': loss.item()
}
if i % 100 == 0:
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
image = dalle.generate_images(
text[:1],
mask = mask[:1],
filter_thres = 0.9 # topk sampling at 0.9
)
save_model(f'./dalle.pt')
wandb.save(f'./dalle.pt')
log = {
**log,
'image': wandb.Image(image, caption = decoded_text)
}
wandb.log(log)
save_model(f'./dalle-final.pt')
wandb.save('./dalle-final.pt')
wandb.finish() | dalle.load_state_dict(weights)
|
model_total_credits.go | /*
Sumo Logic API
# Getting Started Welcome to the Sumo Logic API reference. You can use these APIs to interact with the Sumo Logic platform. For information on the collector and search APIs see our [API home page](https://help.sumologic.com/APIs). ## API Endpoints Sumo Logic has several deployments in different geographic locations. You'll need to use the Sumo Logic API endpoint corresponding to your geographic location. See the table below for the different API endpoints by deployment. For details determining your account's deployment see [API endpoints](https://help.sumologic.com/?cid=3011). <table> <tr> <td> <strong>Deployment</strong> </td> <td> <strong>Endpoint</strong> </td> </tr> <tr> <td> AU </td> <td> https://api.au.sumologic.com/api/ </td> </tr> <tr> <td> CA </td> <td> https://api.ca.sumologic.com/api/ </td> </tr> <tr> <td> DE </td> <td> https://api.de.sumologic.com/api/ </td> </tr> <tr> <td> EU </td> <td> https://api.eu.sumologic.com/api/ </td> </tr> <tr> <td> FED </td> <td> https://api.fed.sumologic.com/api/ </td> </tr> <tr> <td> IN </td> <td> https://api.in.sumologic.com/api/ </td> </tr> <tr> <td> JP </td> <td> https://api.jp.sumologic.com/api/ </td> </tr> <tr> <td> US1 </td> <td> https://api.sumologic.com/api/ </td> </tr> <tr> <td> US2 </td> <td> https://api.us2.sumologic.com/api/ </td> </tr> </table> ## Authentication Sumo Logic supports the following options for API authentication: - Access ID and Access Key - Base64 encoded Access ID and Access Key See [Access Keys](https://help.sumologic.com/Manage/Security/Access-Keys) to generate an Access Key. Make sure to copy the key you create, because it is displayed only once. When you have an Access ID and Access Key you can execute requests such as the following: ```bash curl -u \"<accessId>:<accessKey>\" -X GET https://api.<deployment>.sumologic.com/api/v1/users ``` Where `deployment` is either `au`, `ca`, `de`, `eu`, `fed`, `in`, `jp`, `us1`, or `us2`. See [API endpoints](#section/API-Endpoints) for details. If you prefer to use basic access authentication, you can do a Base64 encoding of your `<accessId>:<accessKey>` to authenticate your HTTPS request. The following is an example request, replace the placeholder `<encoded>` with your encoded Access ID and Access Key string: ```bash curl -H \"Authorization: Basic <encoded>\" -X GET https://api.<deployment>.sumologic.com/api/v1/users ``` Refer to [API Authentication](https://help.sumologic.com/?cid=3012) for a Base64 example. ## Status Codes Generic status codes that apply to all our APIs. See the [HTTP status code registry](https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml) for reference. <table> <tr> <td> <strong>HTTP Status Code</strong> </td> <td> <strong>Error Code</strong> </td> <td> <strong>Description</strong> </td> </tr> <tr> <td> 301 </td> <td> moved </td> <td> The requested resource SHOULD be accessed through returned URI in Location Header. See [troubleshooting](https://help.sumologic.com/APIs/Troubleshooting-APIs/API-301-Error-Moved) for details.</td> </tr> <tr> <td> 401 </td> <td> unauthorized </td> <td> Credential could not be verified.</td> </tr> <tr> <td> 403 </td> <td> forbidden </td> <td> This operation is not allowed for your account type or the user doesn't have the role capability to perform this action. See [troubleshooting](https://help.sumologic.com/APIs/Troubleshooting-APIs/API-403-Error-This-operation-is-not-allowed-for-your-account-type) for details.</td> </tr> <tr> <td> 404 </td> <td> notfound </td> <td> Requested resource could not be found. </td> </tr> <tr> <td> 405 </td> <td> method.unsupported </td> <td> Unsupported method for URL. </td> </tr> <tr> <td> 415 </td> <td> contenttype.invalid </td> <td> Invalid content type. </td> </tr> <tr> <td> 429 </td> <td> rate.limit.exceeded </td> <td> The API request rate is higher than 4 request per second or inflight API requests are higher than 10 request per second. </td> </tr> <tr> <td> 500 </td> <td> internal.error </td> <td> Internal server error. </td> </tr> <tr> <td> 503 </td> <td> service.unavailable </td> <td> Service is currently unavailable. </td> </tr> </table> ## Filtering Some API endpoints support filtering results on a specified set of fields. Each endpoint that supports filtering will list the fields that can be filtered. Multiple fields can be combined by using an ampersand `&` character. For example, to get 20 users whose `firstName` is `John` and `lastName` is `Doe`: ```bash api.sumologic.com/v1/users?limit=20&firstName=John&lastName=Doe ``` ## Sorting Some API endpoints support sorting fields by using the `sortBy` query parameter. The default sort order is ascending. Prefix the field with a minus sign `-` to sort in descending order. For example, to get 20 users sorted by their `email` in descending order: ```bash api.sumologic.com/v1/users?limit=20&sort=-email ``` ## Asynchronous Request Asynchronous requests do not wait for results, instead they immediately respond back with a job identifier while the job runs in the background. You can use the job identifier to track the status of the asynchronous job request. Here is a typical flow for an asynchronous request. 1. Start an asynchronous job. On success, a job identifier is returned. The job identifier uniquely identifies your asynchronous job. 2. Once started, use the job identifier from step 1 to track the status of your asynchronous job. An asynchronous request will typically provide an endpoint to poll for the status of asynchronous job. A successful response from the status endpoint will have the following structure: ```json { \"status\": \"Status of asynchronous request\", \"statusMessage\": \"Optional message with additional information in case request succeeds\", \"error\": \"Error object in case request fails\" } ``` The `status` field can have one of the following values: 1. `Success`: The job succeeded. The `statusMessage` field might have additional information. 2. `InProgress`: The job is still running. 3. `Failed`: The job failed. The `error` field in the response will have more information about the failure. 3. Some asynchronous APIs may provide a third endpoint (like [export result](#operation/getAsyncExportResult)) to fetch the result of an asynchronous job. ### Example Let's say we want to export a folder with the identifier `0000000006A2E86F`. We will use the [async export](#operation/beginAsyncExport) API to export all the content under the folder with `id=0000000006A2E86F`. 1. Start an export job for the folder ```bash curl -X POST -u \"<accessId>:<accessKey>\" https://api.<deployment>.sumologic.com/api/v2/content/0000000006A2E86F/export ``` See [authentication section](#section/Authentication) for more details about `accessId`, `accessKey`, and `deployment`. On success, you will get back a job identifier. In the response below, `C03E086C137F38B4` is the job identifier. ```bash { \"id\": \"C03E086C137F38B4\" } ``` 2. Now poll for the status of the asynchronous job with the [status](#operation/getAsyncExportStatus) endpoint. ```bash curl -X GET -u \"<accessId>:<accessKey>\" https://api.<deployment>.sumologic.com/api/v2/content/0000000006A2E86F/export/C03E086C137F38B4/status ``` You may get a response like ```json { \"status\": \"InProgress\", \"statusMessage\": null, \"error\": null } ``` It implies the job is still in progress. Keep polling till the status is either `Success` or `Failed`. 3. When the asynchronous job completes (`status != \"InProgress\"`), you can fetch the results with the [export result](#operation/getAsyncExportResult) endpoint. ```bash curl -X GET -u \"<accessId>:<accessKey>\" https://api.<deployment>.sumologic.com/api/v2/content/0000000006A2E86F/export/C03E086C137F38B4/result ``` The asynchronous job may fail (`status == \"Failed\"`). You can look at the `error` field for more details. ```json { \"status\": \"Failed\", \"errors\": { \"code\": \"content1:too_many_items\", \"message\": \"Too many objects: object count(1100) was greater than limit 1000\" } } ``` ## Rate Limiting * A rate limit of four API requests per second (240 requests per minute) applies to all API calls from a user. * A rate limit of 10 concurrent requests to any API endpoint applies to an access key. If a rate is exceeded, a rate limit exceeded 429 status code is returned. ## Generating Clients You can use [OpenAPI Generator](https://openapi-generator.tech) to generate clients from the YAML file to access the API. ### Using [NPM](https://www.npmjs.com/get-npm) 1. Install [NPM package wrapper](https://github.com/openapitools/openapi-generator-cli) globally, exposing the CLI on the command line: ```bash npm install @openapitools/openapi-generator-cli -g ``` You can see detailed instructions [here](https://openapi-generator.tech/docs/installation#npm). 2. Download the [YAML file](/docs/sumologic-api.yaml) and save it locally. Let's say the file is saved as `sumologic-api.yaml`. 3. Use the following command to generate `python` client inside the `sumo/client/python` directory: ```bash openapi-generator generate -i sumologic-api.yaml -g python -o sumo/client/python ``` ### Using [Homebrew](https://brew.sh/) 1. Install OpenAPI Generator ```bash brew install openapi-generator ``` 2. Download the [YAML file](/docs/sumologic-api.yaml) and save it locally. Let's say the file is saved as `sumologic-api.yaml`. 3. Use the following command to generate `python` client side code inside the `sumo/client/python` directory: ```bash openapi-generator generate -i sumologic-api.yaml -g python -o sumo/client/python ```
API version: 1.0.0
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package openapi
import (
"encoding/json"
)
// TotalCredits Total amount of credits to be deducted from the parent organization corresponding to the baselines
type TotalCredits struct {
// Numerical value of the amount of credits
TotalCredits float64 `json:"totalCredits"`
Breakdown *CreditsBreakdown `json:"breakdown,omitempty"`
}
// NewTotalCredits instantiates a new TotalCredits object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func | (totalCredits float64) *TotalCredits {
this := TotalCredits{}
this.TotalCredits = totalCredits
return &this
}
// NewTotalCreditsWithDefaults instantiates a new TotalCredits object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewTotalCreditsWithDefaults() *TotalCredits {
this := TotalCredits{}
return &this
}
// GetTotalCredits returns the TotalCredits field value
func (o *TotalCredits) GetTotalCredits() float64 {
if o == nil {
var ret float64
return ret
}
return o.TotalCredits
}
// GetTotalCreditsOk returns a tuple with the TotalCredits field value
// and a boolean to check if the value has been set.
func (o *TotalCredits) GetTotalCreditsOk() (*float64, bool) {
if o == nil {
return nil, false
}
return &o.TotalCredits, true
}
// SetTotalCredits sets field value
func (o *TotalCredits) SetTotalCredits(v float64) {
o.TotalCredits = v
}
// GetBreakdown returns the Breakdown field value if set, zero value otherwise.
func (o *TotalCredits) GetBreakdown() CreditsBreakdown {
if o == nil || o.Breakdown == nil {
var ret CreditsBreakdown
return ret
}
return *o.Breakdown
}
// GetBreakdownOk returns a tuple with the Breakdown field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *TotalCredits) GetBreakdownOk() (*CreditsBreakdown, bool) {
if o == nil || o.Breakdown == nil {
return nil, false
}
return o.Breakdown, true
}
// HasBreakdown returns a boolean if a field has been set.
func (o *TotalCredits) HasBreakdown() bool {
if o != nil && o.Breakdown != nil {
return true
}
return false
}
// SetBreakdown gets a reference to the given CreditsBreakdown and assigns it to the Breakdown field.
func (o *TotalCredits) SetBreakdown(v CreditsBreakdown) {
o.Breakdown = &v
}
func (o TotalCredits) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["totalCredits"] = o.TotalCredits
}
if o.Breakdown != nil {
toSerialize["breakdown"] = o.Breakdown
}
return json.Marshal(toSerialize)
}
type NullableTotalCredits struct {
value *TotalCredits
isSet bool
}
func (v NullableTotalCredits) Get() *TotalCredits {
return v.value
}
func (v *NullableTotalCredits) Set(val *TotalCredits) {
v.value = val
v.isSet = true
}
func (v NullableTotalCredits) IsSet() bool {
return v.isSet
}
func (v *NullableTotalCredits) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableTotalCredits(val *TotalCredits) *NullableTotalCredits {
return &NullableTotalCredits{value: val, isSet: true}
}
func (v NullableTotalCredits) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableTotalCredits) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
| NewTotalCredits |
robot.py | import os
import time
import numpy as np
import yaml
import utils
from . import vrep
from ..robot import Robot as BaseRobot
from ..robot import Reward
from ..data import Data as TextData
import random
from bisect import bisect_right
import cv2
import os
class SimRobot(BaseRobot):
def __init__(self, obj_mesh_dir, num_obj, *args):
BaseRobot.__init__(self, *args)
self.text_data = TextData()
# Define colors for object meshes (Tableau palette)
self.color_name = ['blue', 'green', 'brown', 'orange', 'yellow', 'gray', 'red', 'purple', 'cyan', 'pink']
self.color_space = np.asarray([[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167]]) / 255.0 # pink
# Read files in object mesh directory
self.obj_mesh_dir = obj_mesh_dir
self.num_obj = num_obj
self.mesh_list = list(filter(lambda x: x.endswith('.obj'), os.listdir(self.obj_mesh_dir)))
try:
with open(os.path.join(obj_mesh_dir, 'blocks.yml')) as f:
yaml_dict = yaml.safe_load(f)
self.groups = yaml_dict['groups']
self.mesh_name = yaml_dict['names']
for obj in self.mesh_list:
if obj not in self.mesh_name.keys():
raise Exception
except Exception:
print('Failed to read block names/groups')
exit(1)
# Make sure to have the server side running in V-REP:
# in a child script of a V-REP scene, add following command
# to be executed just once, at simulation start:
#
# simExtRemoteApiStart(19999)
#
# then start simulation, and run this program.
#
# IMPORTANT: for each successful call to simxStart, there
# should be a corresponding call to simxFinish at the end!
# MODIFY remoteApiConnections.txt
# Connect to simulator
vrep.simxFinish(-1) # Just in case, close all opened connections
# Connect to V-REP on port 19997
self.sim_client = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)
if self.sim_client == -1:
print('Failed to connect to simulation (V-REP remote API server). Exiting.')
exit()
else:
print('Connected to simulation.')
# self.restart_sim()
self.MODE = vrep.simx_opmode_blocking
# Setup virtual camera in simulation
self.setup_sim_camera()
self.object_handles = []
self.object_left_handles = []
self.target_handle = None
# Add objects to simulation environment
# self.add_objects()
def setup_sim_camera(self):
# Get handle to camera
sim_ret, self.cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_persp', self.MODE)
_, self.up_cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_ortho', self.MODE)
# Get camera pose and intrinsics in simulationo
sim_ret, cam_position = vrep.simxGetObjectPosition(self.sim_client, self.cam_handle, -1, self.MODE)
sim_ret, cam_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.cam_handle, -1, self.MODE)
cam_trans = np.eye(4, 4)
cam_trans[0:3, 3] = np.asarray(cam_position)
cam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]
cam_rotm = np.eye(4, 4)
cam_rotm[0:3, 0:3] = np.linalg.inv(utils.euler2rotm(cam_orientation))
# Compute rigid transformation representating camera pose
self.cam_pose = np.dot(cam_trans, cam_rotm)
self.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])
self.cam_depth_scale = 1
# Get background image
self.bg_color_img, self.bg_depth_img = self.get_camera_data()
self.bg_depth_img = self.bg_depth_img * self.cam_depth_scale
def add_objects(self, mesh_idx=-1, mesh_color=-1):
# TODO
# handle <-> ind <-> obj -> name
# Just for debug
# print([self.mesh_list[ind] for ind in self.obj_mesh_ind])
# self.obj_mesh_ind = np.array(range(len(self.mesh_list)))
# self.obj_mesh_color = self.color_space[np.asarray(range(self.num_obj)) % 10, :]
# Randomly choose objects to add to scene
if mesh_idx == -1:
group_chosen = np.random.choice(self.groups, size=self.num_obj, replace=False)
self.obj_mesh_ind = np.array([self.mesh_list.index(np.random.choice(obj)) for obj in group_chosen])
self.obj_mesh_color = self.color_space[np.random.choice(np.arange(self.color_space.shape[0]), size=self.num_obj, replace=False)]
else:
self.obj_mesh_ind = np.array([mesh_idx])
self.obj_mesh_color = np.array([mesh_color])
# import pdb; pdb.set_trace()
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
self.object_handles = []
for object_idx in range(len(self.obj_mesh_ind)):
curr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])
curr_shape_name = 'shape_%02d' % object_idx
drop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1
drop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]
object_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1], self.obj_mesh_color[object_idx][2]]
ret_resp, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(self.sim_client, 'remoteApiCommandServer', vrep.sim_scripttype_childscript, 'importShape', [0, 0, 255, 0], object_position + object_orientation + object_color, [curr_mesh_file, curr_shape_name], bytearray(), vrep.simx_opmode_blocking)
if ret_resp == 8:
print('Failed to add new objects to simulation. Please restart.')
exit()
# print(ret_ints, ret_ints[0])
curr_shape_handle = ret_ints[0]
self.object_handles.append(curr_shape_handle)
time.sleep(2)
self.object_left_handles = self.object_handles.copy()
self.prev_obj_positions = []
self.obj_positions = []
self.get_instruction() # nb
# import pdb; pdb.set_trace()
def restart_sim(self):
sim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5, 0, 0.3), vrep.simx_opmode_blocking)
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
# V-REP bug requiring multiple starts and stops to restart
while gripper_position[2] > 0.4:
vrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)
vrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)
time.sleep(1)
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
def is_stable(self):
# Check if simulation is stable by checking if gripper is within workspace
sim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)
sim_is_ok = gripper_position[0] > self.workspace_limits[0][0] - 0.1 and \
gripper_position[0] < self.workspace_limits[0][1] + 0.1 and \
gripper_position[1] > self.workspace_limits[1][0] - 0.1 and \
gripper_position[1] < self.workspace_limits[1][1] + 0.1 and \
gripper_position[2] > self.workspace_limits[2][0] and \
gripper_position[2] < self.workspace_limits[2][1]
if not sim_is_ok:
print('Simulation unstable, Reset.')
return sim_is_ok
def reset(self):
self.restart_sim()
self.add_objects()
# def stop_sim(self):objects/blocks
# if self.is_sim:
# # Now send some data to V-REP in a non-blocking fashion:
# # vrep.simxAddStatusbarMessage(sim_client,'Hello V-REP!',vrep.simx_opmode_oneshot)
# # # Start the simulation
# # vrep.simxStartSimulation(sim_client,vrep.simx_opmode_oneshot_wait)
# # # Stop simulation:
# # vrep.simxStopSimulation(sim_client,vrep.simx_opmode_oneshot_wait)
# # Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
# vrep.simxGetPingTime(self.sim_client)
# # Now close the connection to V-REP:
# vrep.simxFinish(self.sim_client)
def get_task_score(self):
key_positions = np.asarray([[-0.625, 0.125, 0.0], # red
[-0.625, -0.125, 0.0], # blue
[-0.375, 0.125, 0.0], # green
[-0.375, -0.125, 0.0]]) # yellow
obj_positions = np.asarray(self.get_obj_positions())
obj_positions.shape = (1, obj_positions.shape[0], obj_positions.shape[1])
obj_positions = np.tile(obj_positions, (key_positions.shape[0], 1, 1))
key_positions.shape = (key_positions.shape[0], 1, key_positions.shape[1])
key_positions = np.tile(key_positions, (1, obj_positions.shape[1], 1))
key_dist = np.sqrt(np.sum(np.power(obj_positions - key_positions, 2), axis=2))
key_nn_idx = np.argmin(key_dist, axis=0)
return np.sum(key_nn_idx == np.asarray(range(self.num_obj)) % 4)
def check_goal_reached(self, handle):
# goal_reached = self.get_task_score() == self.num_obj
goal_reached = self.target_handle == handle
return goal_reached
def get_obj_positions(self):
obj_positions = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)
obj_positions.append(object_position)
return obj_positions
def get_obj_positions_and_orientations(self):
obj_positions = []
obj_orientations = []
for object_handle in self.object_handles:
sim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)
sim_ret, object_orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)
obj_positions.append(object_position)
obj_orientations.append(object_orientation)
return obj_positions, obj_orientations
def reposition_objects(self, workspace_limits):
# Move gripper out of the way
|
def get_camera_data(self, handle=-1):
if handle == -1:
handle = self.cam_handle
# Get color image from simulation
sim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, handle, 0, self.MODE)
color_img = np.asarray(raw_image)
color_img.shape = (resolution[1], resolution[0], 3)
color_img = color_img.astype(np.float) / 255
color_img[color_img < 0] += 1
color_img *= 255
color_img = np.fliplr(color_img)
color_img = color_img.astype(np.uint8)
# Get depth image from simulation
sim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.sim_client, handle, self.MODE)
depth_img = np.asarray(depth_buffer)
depth_img.shape = (resolution[1], resolution[0])
depth_img = np.fliplr(depth_img)
zNear = 0.01
zFar = 10
depth_img = depth_img * (zFar - zNear) + zNear
return color_img, depth_img
def get_instruction(self):
# TODO
# add more template
instruction_template = "pick up the {color} {shape}."
ind = np.random.randint(0, self.num_obj)
color = utils.get_mush_color_name(self.obj_mesh_color[ind])
shape = np.random.choice(self.mesh_name[self.mesh_list[self.obj_mesh_ind[ind]]])
self.target_handle = self.object_handles[ind]
self.instruction_str = instruction_template.format(color=color, shape=shape) # nb
self.instruction = self.text_data.get_tensor(self.instruction_str)
return self.instruction
def close_gripper(self, _async=False):
gripper_motor_velocity = -0.5
gripper_motor_force = 100
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
gripper_fully_closed = False
while gripper_joint_position > -0.047: # Block until gripper is fully closed
sim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
# print(gripper_joint_position)
if new_gripper_joint_position >= gripper_joint_position:
return gripper_fully_closed
gripper_joint_position = new_gripper_joint_position
gripper_fully_closed = True
return gripper_fully_closed
def open_gripper(self, _async=False):
gripper_motor_velocity = 0.5
gripper_motor_force = 20
sim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
vrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)
vrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)
while gripper_joint_position < 0.0536: # Block until gripper is fully open
sim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)
def move_to(self, tool_position, tool_orientation):
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.02 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_magnitude / 0.02))
for step_iter in range(num_move_steps):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0], UR5_target_position[1] + move_step[1], UR5_target_position[2] + move_step[2]), vrep.simx_opmode_blocking)
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
# Primitives ----------------------------------------------------------
def random_grasp_action(self):
'''
angles = []
for i in range(8):
angle = np.deg2rad(i * (360.0 / 16))
tool_rotation_angle = (angle % np.pi) - np.pi / 2
angles.append(tool_rotation_angle)
print(angles)
'''
# assert len(self.object_left_handles) > 0
object_handle = random.sample(self.object_left_handles, 1)[0]
_, orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)
all_angles = [-1.5708, -1.1781, -0.7854, -0.3927, 0.0, 0.3927, 0.7854, 1.1781]
possible_angles = [orientation[1], orientation[1] - np.pi/2.0]
anegle = random.sample(possible_angles, 1)[0]
angle = max(0, bisect_right(all_angles, orientation[1]) - 1)
_, position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)
action_x = (position[1] - self.workspace_limits[1][0]) / self.heightmap_resolution
action_y = (position[0] - self.workspace_limits[0][0]) / self.heightmap_resolution
action_x = min(action_x, 223)
action_y = min(action_y, 223)
action = (angle, int(action_x), int(action_y))
# print(object_handle, action)
# import pdb; pdb.set_trace()
return action
def step(self, action, valid_depth_heightmap, num_rotations, heightmap_resolution):
# Compute 3D position of pixel
angle = np.deg2rad(action[0] * (360.0 / num_rotations))
best_pix_x = action[2]
best_pix_y = action[1]
primitive_position = [
best_pix_x * heightmap_resolution + self.workspace_limits[0][0],
best_pix_y * heightmap_resolution + self.workspace_limits[1][0],
valid_depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]
]
reward = self.grasp(primitive_position, angle)
done = (reward == Reward.SUCCESS)
# print(reward, done)
return reward.value, done
def grasp(self, position, heightmap_rotation_angle):
# print('Executing: grasp at (%f, %f, %f)' % (position[0], position[1], position[2]))
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
# Avoid collision with floor
position = np.asarray(position).copy()
position[2] = max(position[2] - 0.04, self.workspace_limits[2][0] + 0.02)
# Move gripper to location above grasp target
grasp_location_margin = 0.15
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
location_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_grasp_target
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
# if np.floor(move_direction[0] / move_step[0]) == np.nan or move_step[0] == 0: import pdb; pdb.set_trace()
num_move_steps = int(np.floor(move_direction[0] / move_step[0])) if move_step[0] != 0 else 1
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
# Ensure gripper is open
self.open_gripper()
# Approach grasp target
self.move_to(position, None)
# Close gripper to grasp target
gripper_full_closed = self.close_gripper()
# Move gripper to location above grasp target
self.move_to(location_above_grasp_target, None)
# Check if grasp is successful
gripper_full_closed = self.close_gripper()
grasp_sth = not gripper_full_closed
# Move the grasped object elsewhere
if grasp_sth:
object_positions = np.asarray(self.get_obj_positions())
object_positions = object_positions[:, 2]
grasped_object_ind = np.argmax(object_positions)
grasped_object_handle = self.object_handles[grasped_object_ind]
vrep.simxSetObjectPosition(self.sim_client, grasped_object_handle, -1, (-0.5, 0.5 + 0.05 * float(grasped_object_ind), 0.1), self.MODE)
self.object_left_handles.remove(grasped_object_handle)
if grasped_object_handle == self.target_handle:
return Reward.SUCCESS
else:
return Reward.WRONG
else:
return Reward.FAIL
def push(self, position, heightmap_rotation_angle, workspace_limits):
# print('Executing: push at (%f, %f, %f)' % (position[0], position[1], position[2]))
# Compute tool orientation from heightmap rotation angle
tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2
# Adjust pushing point to be on tip of finger
position[2] = position[2] + 0.026
# Compute pushing direction
push_orientation = [1.0, 0.0]
push_direction = np.asarray([push_orientation[0] * np.cos(heightmap_rotation_angle) - push_orientation[1] * np.sin(heightmap_rotation_angle), push_orientation[0] * np.sin(heightmap_rotation_angle) + push_orientation[1] * np.cos(heightmap_rotation_angle)])
# Move gripper to location above pushing point
pushing_point_margin = 0.1
location_above_pushing_point = (position[0], position[1], position[2] + pushing_point_margin)
# Compute gripper position and linear movement increments
tool_position = location_above_pushing_point
sim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
move_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])
move_magnitude = np.linalg.norm(move_direction)
move_step = 0.05 * move_direction / move_magnitude
num_move_steps = int(np.floor(move_direction[0] / move_step[0]))
# Compute gripper orientation and rotation increments
sim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)
rotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3
num_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))
# Simultaneously move and rotate gripper
for step_iter in range(max(num_move_steps, num_rotation_steps)):
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)
vrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)
vrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)
# Ensure gripper is closed
self.close_gripper()
# Approach pushing point
self.move_to(position, None)
# Compute target location (push to the right)
push_length = 0.1
target_x = min(max(position[0] + push_direction[0] * push_length, workspace_limits[0][0]), workspace_limits[0][1])
target_y = min(max(position[1] + push_direction[1] * push_length, workspace_limits[1][0]), workspace_limits[1][1])
push_length = np.sqrt(np.power(target_x - position[0], 2) + np.power(target_y - position[1], 2))
# Move in pushing direction towards target location
self.move_to([target_x, target_y, position[2]], None)
# Move gripper to location above grasp target
self.move_to([target_x, target_y, location_above_pushing_point[2]], None)
push_success = True
return push_success
# def place(self, position, heightmap_rotation_angle, workspace_limits):
# print('Executing: place at (%f, %f, %f)' % (position[0], position[1], position[2]))
# # Compute tool orientation from heightmap rotation angle
# tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi/2
# # Avoid collision with floor
# position[2] = max(position[2] + 0.04 + 0.02, workspace_limits[2][0] + 0.02)
# # Move gripper to location above place target
# place_location_margin = 0.1
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)
# location_above_place_target = (position[0], position[1], position[2] + place_location_margin)
# self.move_to(location_above_place_target, None)
# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)
# if tool_rotation_angle - gripper_orientation[1] > 0:
# increment = 0.2
# else:
# increment = -0.2
# while abs(tool_rotation_angle - gripper_orientation[1]) >= 0.2:
# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, gripper_orientation[1] + increment, np.pi/2), vrep.simx_opmode_blocking)
# time.sleep(0.01)
# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)
# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, tool_rotation_angle, np.pi/2), vrep.simx_opmode_blocking)
# # Approach place target
# self.move_to(position, None)
# # Ensure gripper is open
# self.open_gripper()
# # Move gripper to location above place target
# self.move_to(location_above_place_target, None)
| self.move_to([-0.1, 0, 0.3], None)
# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target', self.MODE)
# vrep.simxSetObjectPosition(self.sim_client, UR5_target_handle, -1, (-0.5,0,0.3), self.MODE)
# time.sleep(1)
for object_handle in self.object_handles:
# Drop object at random x,y location and random orientation in robot workspace
drop_x = (workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample() + workspace_limits[0][0] + 0.1
drop_y = (workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample() + workspace_limits[1][0] + 0.1
object_position = [drop_x, drop_y, 0.15]
object_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]
vrep.simxSetObjectPosition(self.sim_client, object_handle, -1, object_position, self.MODE)
vrep.simxSetObjectOrientation(self.sim_client, object_handle, -1, object_orientation, self.MODE)
time.sleep(2) |
nes.rs | use crate::apu::Apu;
use crate::dynamic_nes::DynamicNes;
use crate::mapper::{Mapper, PersistentState, PersistentStateError};
use crate::ppu::{Oam, Ppu, ScanlineIter};
use crate::timing;
use mos6502_model::debug::InstructionWithOperand;
use mos6502_model::machine::{Address, Cpu, Memory, MemoryReadOnly};
use nes_name_table_debug::NameTableFrame;
use nes_render_output::RenderOutput;
use serde::{Deserialize, Serialize};
use serde_big_array::big_array;
use std::io::{self, Write};
const RAM_BYTES: usize = 0x800;
big_array! { BigArray; }
#[derive(Clone, Serialize, Deserialize)]
struct NesDevices<M: Mapper> {
#[serde(with = "BigArray")]
ram: [u8; RAM_BYTES],
ppu: Ppu,
apu: Apu,
controller1: Controller,
mapper: M,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct NesDevicesWithOam<M: Mapper> {
devices: NesDevices<M>,
oam: Oam,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Controller {
current_state: u8,
shift_register: u8,
strobe: bool,
}
mod controller {
pub mod bit {
pub const A: u8 = 0;
pub const B: u8 = 1;
pub const SELECT: u8 = 2;
pub const START: u8 = 3;
pub const UP: u8 = 4;
pub const DOWN: u8 = 5;
pub const LEFT: u8 = 6;
pub const RIGHT: u8 = 7;
}
pub mod flag {
use super::bit;
pub const A: u8 = 1 << bit::A;
pub const B: u8 = 1 << bit::B;
pub const SELECT: u8 = 1 << bit::SELECT;
pub const START: u8 = 1 << bit::START;
pub const UP: u8 = 1 << bit::UP;
pub const DOWN: u8 = 1 << bit::DOWN;
pub const LEFT: u8 = 1 << bit::LEFT;
pub const RIGHT: u8 = 1 << bit::RIGHT;
}
}
impl Controller {
fn new() -> Self {
Self {
current_state: 0,
shift_register: 0,
strobe: false,
}
}
fn set_strobe(&mut self) {
self.shift_register = self.current_state;
self.strobe = true;
}
fn clear_strobe(&mut self) {
self.strobe = false;
}
fn shift_read(&mut self) -> u8 {
let masked = self.shift_register & 1;
self.shift_register = self.shift_register.wrapping_shr(1);
masked
}
pub fn set_a(&mut self) {
self.current_state |= controller::flag::A;
}
pub fn set_b(&mut self) {
self.current_state |= controller::flag::B;
}
pub fn set_select(&mut self) {
self.current_state |= controller::flag::SELECT;
}
pub fn set_start(&mut self) {
self.current_state |= controller::flag::START;
}
pub fn set_left(&mut self) {
self.current_state |= controller::flag::LEFT;
}
pub fn set_right(&mut self) {
self.current_state |= controller::flag::RIGHT;
}
pub fn set_up(&mut self) {
self.current_state |= controller::flag::UP;
}
pub fn set_down(&mut self) {
self.current_state |= controller::flag::DOWN;
}
pub fn clear_a(&mut self) {
self.current_state &= !controller::flag::A;
}
pub fn clear_b(&mut self) {
self.current_state &= !controller::flag::B;
}
pub fn clear_select(&mut self) {
self.current_state &= !controller::flag::SELECT;
}
pub fn clear_start(&mut self) {
self.current_state &= !controller::flag::START;
}
pub fn clear_left(&mut self) {
self.current_state &= !controller::flag::LEFT;
}
pub fn clear_right(&mut self) {
self.current_state &= !controller::flag::RIGHT;
}
pub fn clear_up(&mut self) {
self.current_state &= !controller::flag::UP;
}
pub fn clear_down(&mut self) {
self.current_state &= !controller::flag::DOWN;
}
}
impl<M: Mapper> Memory for NesDevices<M> {
fn read_u8(&mut self, address: Address) -> u8 {
let data = match address {
0..=0x1FFF => self.ram[address as usize % RAM_BYTES],
0x2000..=0x3FFF => match address % 8 {
0 => 0,
1 => 0,
2 => self.ppu.read_status(),
3 => 0,
5 => 0,
6 => 0,
7 => self.ppu.read_data(&self.mapper),
_ => unreachable!(),
},
0x4016 => self.controller1.shift_read(),
0x4000..=0x401F => 0,
cartridge_address => self.mapper.cpu_read_u8(cartridge_address),
};
data
}
fn read_u8_zero_page(&mut self, address: u8) -> u8 {
self.ram[address as usize]
}
fn read_u8_stack(&mut self, stack_pointer: u8) -> u8 {
self.ram[0x0100 | stack_pointer as usize]
}
fn write_u8(&mut self, address: Address, data: u8) {
match address {
0..=0x1FFF => self.ram[address as usize % RAM_BYTES] = data,
0x2000..=0x3FFF => match address % 8 {
0 => self.ppu.write_control(data),
1 => self.ppu.write_mask(data),
2 => (),
3 => self.ppu.write_oam_address(data),
5 => self.ppu.write_scroll(data),
6 => self.ppu.write_address(data),
7 => self.ppu.write_data(&mut self.mapper, data),
_ => unreachable!(),
},
0x4016 => {
if data & 1 != 0 {
self.controller1.set_strobe();
} else {
self.controller1.clear_strobe();
}
}
0x4000..=0x401F => {}
cartridge_address => self.mapper.cpu_write_u8(cartridge_address, data),
}
}
fn write_u8_zero_page(&mut self, address: u8, data: u8) {
self.ram[address as usize] = data;
}
fn write_u8_stack(&mut self, stack_pointer: u8, data: u8) {
self.ram[0x0100 | stack_pointer as usize] = data;
}
}
impl<M: Mapper> Memory for NesDevicesWithOam<M> {
fn read_u8(&mut self, address: Address) -> u8 {
match address {
0x2004 => self.devices.ppu.read_oam_data(&self.oam),
other => self.devices.read_u8(other),
}
}
fn write_u8(&mut self, address: Address, data: u8) {
match address {
0x4014 => self.oam.dma(&mut self.devices, data),
0x2004 => self.devices.ppu.write_oam_data(data, &mut self.oam),
other => self.devices.write_u8(other, data),
}
}
fn read_u8_zero_page(&mut self, address: u8) -> u8 {
self.devices.read_u8_zero_page(address)
}
fn read_u8_stack(&mut self, stack_pointer: u8) -> u8 {
self.devices.read_u8_stack(stack_pointer)
}
fn write_u8_zero_page(&mut self, address: u8, data: u8) {
self.devices.write_u8_zero_page(address, data);
}
fn write_u8_stack(&mut self, stack_pointer: u8, data: u8) {
self.devices.write_u8_stack(stack_pointer, data);
}
}
impl<M: Mapper> MemoryReadOnly for NesDevices<M> {
fn read_u8_read_only(&self, address: Address) -> u8 {
let data = match address {
0..=0x1FFF => self.ram[address as usize % RAM_BYTES],
0x2000..=0x401F => 0,
cartridge_address => self.mapper.cpu_read_u8_read_only(cartridge_address),
};
data
}
}
impl<M: Mapper> MemoryReadOnly for NesDevicesWithOam<M> {
fn read_u8_read_only(&self, address: Address) -> u8 {
self.devices.read_u8_read_only(address)
}
}
pub trait RunForCycles {
fn run_for_cycles<M: Memory + MemoryReadOnly>(
&mut self,
cpu: &mut Cpu,
memory: &mut M,
num_cycles: u32,
);
}
pub struct RunForCyclesRegular;
pub struct RunForCyclesDebug;
impl RunForCycles for RunForCyclesRegular {
fn run_for_cycles<M: Memory + MemoryReadOnly>(
&mut self,
cpu: &mut Cpu,
memory: &mut M,
num_cycles: u32,
) {
cpu.run_for_cycles(memory, num_cycles as usize).unwrap();
}
}
impl RunForCycles for RunForCyclesDebug {
fn run_for_cycles<M: Memory + MemoryReadOnly>(
&mut self,
cpu: &mut Cpu,
memory: &mut M,
num_cycles: u32,
) {
let mut count = 0;
while count < num_cycles {
if let Ok(instruction_with_operand) = InstructionWithOperand::next(cpu, memory) {
let stdout = io::stdout();
let mut handle = stdout.lock();
let _ = writeln!(handle, "{}", instruction_with_operand);
}
count += cpu.step(memory).unwrap() as u32;
}
}
}
#[derive(Clone, Serialize, Deserialize)]
pub struct Nes<M: Mapper> {
cpu: Cpu,
devices: NesDevicesWithOam<M>,
}
impl<M: Mapper> Nes<M> {
fn start(&mut self) {
self.cpu.start(&mut self.devices);
}
pub fn new(mapper: M) -> Self {
let mut nes = Nes {
cpu: Cpu::new(),
devices: NesDevicesWithOam {
devices: NesDevices {
ram: [0; RAM_BYTES],
ppu: Ppu::new(),
apu: Apu::new(),
controller1: Controller::new(),
mapper,
},
oam: Oam::new(),
},
};
nes.start();
nes
}
pub fn run_for_frame_general<R: RunForCycles, O: RenderOutput>(
&mut self,
run: &mut R,
pixels: &mut O,
mut name_table_frame: Option<&mut NameTableFrame>,
) {
// pre-render scanline
run.run_for_cycles(
&mut self.cpu,
&mut self.devices,
timing::ntsc::APPROX_CPU_CYCLES_PER_SCANLINE,
);
self.devices.devices.ppu.render_sprites(
&self.devices.devices.mapper,
&self.devices.oam,
pixels,
);
if let Some(ref mut name_table_frame) = name_table_frame {
self.devices
.devices
.ppu
.debug_render_name_table_frame(&self.devices.devices.mapper, name_table_frame);
}
let sprite_zero = self
.devices
.devices
.ppu
.sprite_zero(&self.devices.oam, &mut self.devices.devices.mapper);
for scanline in ScanlineIter::new() {
if let Some(ref mut name_table_frame) = name_table_frame {
name_table_frame.set_scroll(
scanline.index(),
self.devices.devices.ppu.scroll_x(),
self.devices.devices.ppu.scroll_y(),
);
}
run.run_for_cycles(
&mut self.cpu,
&mut self.devices,
timing::ntsc::APPROX_CPU_CYCLES_PER_SCANLINE,
);
if let Some(sprite_zero_hit) = self.devices.devices.ppu.render_background_scanline(
scanline,
&sprite_zero,
&self.devices.devices.mapper,
pixels,
) {
let pixels_after_sprite_zero_hit =
nes_specs::SCREEN_WIDTH_PX - sprite_zero_hit.screen_pixel_x() as u16;
let approx_cpu_cycles_after_sprite_zero_hit = pixels_after_sprite_zero_hit as u32
/ timing::ntsc::NUM_PPU_CYCLES_PER_CPU_CYCLE;
run.run_for_cycles(
&mut self.cpu,
&mut self.devices,
approx_cpu_cycles_after_sprite_zero_hit,
);
}
}
// post-render scanline
run.run_for_cycles(
&mut self.cpu,
&mut self.devices,
timing::ntsc::APPROX_CPU_CYCLES_PER_SCANLINE,
);
if self.devices.devices.ppu.is_vblank_nmi_enabled() {
self.cpu.nmi(&mut self.devices);
}
self.devices.devices.ppu.before_vblank();
run.run_for_cycles(
&mut self.cpu,
&mut self.devices,
timing::ntsc::APPROX_CPU_CYCLES_PER_VBLANK,
);
self.devices.devices.ppu.after_vblank();
}
pub fn run_for_frame<O: RenderOutput>(
&mut self,
pixels: &mut O,
name_table_frame: Option<&mut NameTableFrame>,
) {
self.run_for_frame_general(&mut RunForCyclesRegular, pixels, name_table_frame);
}
pub fn run_for_frame_debug<O: RenderOutput>(
&mut self,
pixels: &mut O,
name_table_frame: Option<&mut NameTableFrame>,
) {
self.run_for_frame_general(&mut RunForCyclesDebug, pixels, name_table_frame);
}
pub fn clone_dynamic_nes(&self) -> DynamicNes {
M::clone_dynamic_nes(self)
}
pub fn save_persistent_state(&self) -> Option<PersistentState> {
self.devices.devices.mapper.save_persistent_state()
}
pub fn load_persistent_state(
&mut self,
persistent_state: &PersistentState,
) -> Result<(), PersistentStateError> {
self.devices
.devices
.mapper
.load_persistent_state(persistent_state)
}
pub fn ppu(&self) -> &Ppu {
&self.devices.devices.ppu
}
pub fn controller1_mut(&mut self) -> &mut Controller {
&mut self.devices.devices.controller1
}
pub fn mapper(&self) -> &M {
&self.devices.devices.mapper
}
pub fn devices_with_oam(&self) -> &NesDevicesWithOam<M> {
&self.devices
}
}
pub mod controller1 {
use super::*;
pub mod press {
use super::*;
pub fn left<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.set_left();
}
pub fn right<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.set_right();
}
pub fn up<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.set_up();
}
pub fn down<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.set_down();
}
pub fn start<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.set_start();
}
pub fn select<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.set_select();
}
pub fn a<M: Mapper>(nes: &mut Nes<M>) |
pub fn b<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.set_b();
}
}
pub mod release {
use super::*;
pub fn left<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_left();
}
pub fn right<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_right();
}
pub fn up<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_up();
}
pub fn down<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_down();
}
pub fn start<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_start();
}
pub fn select<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_select();
}
pub fn a<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_a();
}
pub fn b<M: Mapper>(nes: &mut Nes<M>) {
nes.devices.devices.controller1.clear_b();
}
}
}
| {
nes.devices.devices.controller1.set_a();
} |
style.js | import { makeStyles, Theme, createStyles } from "@material-ui/core/styles";
export default makeStyles((theme: Theme) =>
createStyles({
editPanelInput: {
marginBottom : 30
},
}), | ); |
|
carpe.js | /**
* Simple module definition library.
*
* Sample usage:
* <pre>
* module(["view", "model.core", "model.account"], function (view, modelCore, modelAccount) {
* var myView = view.MyView(new modelAccount.UserAccount(new modelCore.Money(100), "bob"));
* // ...
* });
* </pre>
*
* @author Alexander Shabanov
*/
window.module = (function () {
/* module root */
var fetchRootNs = (function () {
var rootNs;
return function fetchRootNs() {
if (rootNs !== undefined) {
return rootNs;
}
// search for namespace
if (typeof(window.app) === "undefined") {
rootNs = {};
console.log("Introducing new root namespace");
} else {
rootNs = window.app;
console.log("Using existing root namespace");
}
return rootNs;
}
}());
/**
* Fetches a specified dependency from the dependencies list
*/
function fetchDependency(dependency, dependencies) {
var depType = typeof(dependency);
if (depType === "string") {
// fetch a dependency from root namespace
var mods = dependency.split('.');
var result = fetchRootNs();
for (var i = 0; i < mods.length; ++i) {
var mod = mods[i];
if (mod in result) {
// refer to the existing module
result = result[mod];
} else {
// introduce new module
var newModule = {};
result[mod] = newModule;
result = newModule;
}
}
return result;
}
return dependency; // return dependency 'as is'
}
function | (dependencies, definitionFn) {
var fetchedDependencies = [];
for (var i = 0; i < dependencies.length; ++i) {
fetchedDependencies.push(fetchDependency(dependencies[i], dependencies));
}
return definitionFn.apply(this, fetchedDependencies);
}
return module;
} ());
/**
* Declares base domain object.
* Minimalistic backbone-alike definition of the domain objects without bells and whistles.
* Oriented to REST API with "shortened" field names.
*
* @author Alexander Shabanov
*/
module(["model"], function (model) {
function DomainObject(options) {
}
function createCtorFn(fieldToPayloadKeys, payloadToFieldKeys) {
// create object constructor
return function (options) {
if (!options) {
return;
}
// init-by-payload
if (typeof(options.payload) !== "undefined") {
for (var payloadKey in payloadToFieldKeys) {
if (payloadKey in options.payload) {
this[payloadKey] = options.payload[payloadKey];
}
}
}
// init-by-model
if (typeof(options.model) !== "undefined") {
for (var fieldKey in fieldToPayloadKeys) {
if (fieldKey in options.model) {
this[fieldToPayloadKeys[fieldKey]] = options.model[fieldKey];
}
}
}
};
}
DomainObject.define = function DomainObject_define(domainNamespace, objectName, mapping) {
var parameterMapping = mapping.parameters;
// prepare keys
var fieldToPayloadKeys = {};
var payloadToFieldKeys = {};
for (var key in parameterMapping) {
if (parameterMapping.hasOwnProperty(key)) {
var payloadKey = parameterMapping[key];
fieldToPayloadKeys[key] = payloadKey;
payloadToFieldKeys[payloadKey] = key;
}
}
var ctor = createCtorFn(fieldToPayloadKeys, payloadToFieldKeys);
ctor.name = objectName;
domainNamespace[objectName] = ctor;
// initialize getters
for (var fieldKey in fieldToPayloadKeys) {
var payloadKey = fieldToPayloadKeys[fieldKey];
// Create getter name, i.e. title => getTitle
var getterName = "get" + fieldKey.charAt(0).toUpperCase() + fieldKey.substring(1);
var getterFn = (function (payloadKey) {
return function (defaultValue) {
if (this.hasOwnProperty(payloadKey)) {
return this[payloadKey];
}
return defaultValue;
};
} (payloadKey));
getterFn.name = getterName;
ctor.prototype[getterName] = getterFn;
}
}
//
// Export
//
model.DomainObject = DomainObject;
});
/**
* Simple Marionette-alike template-based view.
*
* @author Alexander Shabanov
*/
module(["view", "$"], function (view, $) {
function View(create) {
this.create = create;
this.ui = {};
}
View.prototype.prependTo = function ($target) {
this.create();
$target.prepend(this.$el);
return this;
}
View.prototype.appendTo = function ($target) {
this.create();
$target.append(this.$el);
return this;
}
View.prototype.onRender = function () {
// do nothing
}
View.prototype.remove = function () {
this.$el.remove();
}
function makeCreateViewFn(template, el, ui, events) {
var $template;
var $elem;
if (el) {
$elem = $(el);
if ($elem.size() === 0) {
throw new Error("There is no element associated with selector " + el);
}
}
if (!$elem) {
var $template = $(template);
if ($template.size() !== 1) {
throw new Error("one template expected, got " + $template + " for " + template);
}
}
var eventBinderFns = []; // event binder functions
$.each(events, function (event, handler) {
var selectorIndex = event.indexOf(" ") + 1;
var selector = (selectorIndex > 0 ? event.substring(selectorIndex) : undefined);
event = (selectorIndex > 0 ? event.substring(0, selectorIndex - 1) : event);
eventBinderFns.push(function () {
var self = this;
var el = (selector ? $(selector, this.$el): this.$el);
el.on(event, function () {
return self[handler].apply(self, arguments);
});
});
});
return function createView() {
this.$el = $elem || $($template.text());
// activate ui
for (var e in ui) {
if (!ui.hasOwnProperty(e)) { continue; }
this.ui[e] = $(ui[e], this.$el);
}
// bind events
for (var e in eventBinderFns) {
if (!eventBinderFns.hasOwnProperty(e)) { continue; }
eventBinderFns[e].call(this);
}
this.onRender();
return this;
};
}
View.extend = function View_extend(viewOptions) {
if (typeof viewOptions !== "object") {
throw new Error("options argument is not an object");
}
viewOptions = $.extend({
ui: {},
events: {}
}, viewOptions); // safe copy with defaults
// make 'create view' lambda
var createView = makeCreateViewFn(viewOptions.template, viewOptions.el, viewOptions.ui, viewOptions.events);
// return view object
var newViewClass = function (options) {
options = options || {};
View.call(this, createView);
if (typeof options.model !== "undefined") {
this.model = options.model;
}
};
if (typeof viewOptions.name !== "undefined") {
newViewClass.name = viewOptions.name;
}
newViewClass.prototype.__proto__ = View.prototype;
// copy member functions - except for 'ui', 'template'
delete viewOptions.ui;
delete viewOptions.events;
delete viewOptions.template;
for (var memberName in viewOptions) {
if (viewOptions.hasOwnProperty(memberName)) {
newViewClass.prototype[memberName] = viewOptions[memberName];
}
}
return newViewClass;
}
//
// Export
//
view.View = View;
});
| module |
printyamljson.py | #!/usr/bin/env python
import yaml, json
with open("testlist.yml", "r") as f:
y = yaml.load(f)
print "Here's the pretty YAML:"
print yaml.dump(y)
with open("testlist.json", "r") as f: | print json.dumps(j, indent=4) | j = json.load(f)
print "Here's the pretty JSON:" |
Tracing.py | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Outputs to the user.
Printing with intends or plain, mostly a compensation for the print strangeness.
We want to avoid "from __future__ import print_function" in every file out
there, which makes adding another debug print rather tedious. This should
cover all calls/uses of "print" we have to do, and the make it easy to simply
to "print for_debug" without much hassle (braces).
"""
from __future__ import print_function
import os
import sys
import traceback
from contextlib import contextmanager
from nuitka.utils.ThreadedExecutor import RLock
# Written by Options module.
is_quiet = False
# We have to interact with displayed progress bars when doing out trace outputs.
progress = None
def printIndented(level, *what):
print(" " * level, *what)
def printSeparator(level=0):
print(" " * level, "*" * 10)
def printLine(*what):
print(*what)
def printError(message):
print(message, file=sys.stderr)
def flushStandardOutputs():
sys.stdout.flush()
sys.stderr.flush()
def getEnableStyleCode(style):
if style == "pink":
style = "\033[95m"
elif style == "blue":
style = "\033[94m"
elif style == "green":
style = "\033[92m"
elif style == "yellow":
style = "\033[93m"
elif style == "red":
style = "\033[91m"
elif style == "bold":
style = "\033[1m"
elif style == "underline":
|
else:
style = None
return style
_enabled_ansi = False
def _enableAnsi():
# singleton, pylint: disable=global-statement
global _enabled_ansi
if not _enabled_ansi:
# Only necessary on Windows, as a side effect of this, ANSI colors get enabled
# for the terminal and never deactivated, so we are free to use them after
# this.
if os.name == "nt":
os.system("")
_enabled_ansi = True
def getDisableStyleCode():
return "\033[0m"
# Locking seems necessary to avoid colored output split up.
trace_lock = RLock()
@contextmanager
def withTraceLock():
""" Hold a lock, so traces cannot be output at the same time mixing them up. """
trace_lock.acquire()
yield
trace_lock.release()
def my_print(*args, **kwargs):
"""Make sure we flush after every print.
Not even the "-u" option does more than that and this is easy enough.
Use kwarg style=[option] to print in a style listed below
"""
if progress:
progress.hideProgressBar()
with withTraceLock():
if "style" in kwargs:
style = kwargs["style"]
del kwargs["style"]
if "end" in kwargs:
end = kwargs["end"]
del kwargs["end"]
else:
end = "\n"
if style is not None and sys.stdout.isatty():
enable_style = getEnableStyleCode(style)
if enable_style is None:
raise ValueError(
"%r is an invalid value for keyword argument style" % style
)
_enableAnsi()
print(enable_style, end="", **kwargs)
print(*args, end=end, **kwargs)
if style is not None and sys.stdout.isatty():
print(getDisableStyleCode(), end="", **kwargs)
else:
print(*args, **kwargs)
# Flush the output.
kwargs.get("file", sys.stdout).flush()
if progress:
progress.resumeProgressBar()
class OurLogger(object):
def __init__(self, name, quiet=False, base_style=None):
self.name = name
self.base_style = base_style
self.is_quiet = quiet
def my_print(self, message, **kwargs):
# For overload, pylint: disable=no-self-use
my_print(message, **kwargs)
def warning(self, message, style="red"):
if self.name:
message = "%s:WARNING: %s" % (self.name, message)
else:
message = "WARNING: %s" % message
style = style or self.base_style
self.my_print(message, style=style, file=sys.stderr)
def sysexit(self, message, exit_code=1):
self.my_print("FATAL: %s" % message, style="red", file=sys.stderr)
sys.exit(exit_code)
def sysexit_exception(self, message, exception, exit_code=1):
self.my_print("FATAL: %s" % message, style="red", file=sys.stderr)
traceback.print_exc()
self.sysexit("FATAL:" + repr(exception), exit_code=exit_code)
def isQuiet(self):
return is_quiet or self.is_quiet
def info(self, message, style=None):
if not self.isQuiet():
if self.name:
message = "%s:INFO: %s" % (self.name, message)
style = style or self.base_style
self.my_print(message, style=style)
class FileLogger(OurLogger):
def __init__(self, name, quiet=False, base_style=None, file_handle=sys.stdout):
OurLogger.__init__(self, name=name, quiet=quiet, base_style=base_style)
self.file_handle = file_handle
def my_print(self, message, **kwargs):
message = message + "\n"
self.file_handle.write(message)
self.file_handle.flush()
def setFileHandle(self, file_handle):
self.file_handle = file_handle
def info(self, message, style=None):
if not self.isQuiet() or self.file_handle is not sys.stdout:
message = "%s:INFO: %s" % (self.name, message)
style = style or self.base_style
self.my_print(message, style=style)
def debug(self, message, style=None):
if self.file_handle is not sys.stdout:
message = "%s:DEBUG: %s" % (self.name, message)
style = style or self.base_style
self.my_print(message, style=style)
def info_fileoutput(self, message, other_logger, style=None):
if self.file_handle is not sys.stdout:
self.info(message, style=style)
other_logger.info(message, style=style)
general = OurLogger("Nuitka")
codegen_missing = OurLogger("Nuitka-codegen-missing")
plugins_logger = OurLogger("Nuitka-Plugins")
recursion_logger = OurLogger("Nuitka-Recursion")
progress_logger = OurLogger("Nuitka-Progress", quiet=True)
memory_logger = OurLogger("Nuitka-Memory")
dependencies_logger = OurLogger("Nuitka-Dependencies")
optimization_logger = FileLogger("Nuitka-Optimization")
codegen_logger = OurLogger("Nuitka-Codegen")
inclusion_logger = FileLogger("Nuitka-Inclusion")
scons_logger = OurLogger("Nuitka-Scons")
scons_details_logger = OurLogger("Nuitka-Scons")
postprocessing_logger = OurLogger("Nuitka-Postprocessing")
options_logger = OurLogger("Nuitka-Options")
unusual_logger = OurLogger("Nuitka-Unusual")
datacomposer_logger = OurLogger("Nuitka-Datacomposer")
| style = "\033[4m" |
files_test.go | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package files
import (
"reflect"
"sort"
"testing"
"github.com/coreos/ignition/v2/config/v3_3_experimental/types"
"github.com/coreos/ignition/v2/internal/exec/util"
)
func TestEntrySort(t *testing.T) {
type in struct {
data []types.Directory
}
type out struct {
data []types.Directory
}
tests := []struct {
in in | in: in{data: []types.Directory{
{
Node: types.Node{
Path: "/a/b/c/d/e/",
},
},
{
Node: types.Node{
Path: "/a/b/c/d/",
},
},
{
Node: types.Node{
Path: "/a/b/c/",
},
},
{
Node: types.Node{
Path: "/a/b/",
},
},
{
Node: types.Node{
Path: "/a/",
},
},
}},
out: out{data: []types.Directory{
{
Node: types.Node{
Path: "/a/",
},
},
{
Node: types.Node{
Path: "/a/b/",
},
},
{
Node: types.Node{
Path: "/a/b/c/",
},
},
{
Node: types.Node{
Path: "/a/b/c/d/",
},
},
{
Node: types.Node{
Path: "/a/b/c/d/e/",
},
},
}},
},
{
in: in{data: []types.Directory{
{
Node: types.Node{
Path: "/a////b/c/d/e/",
},
},
{
Node: types.Node{
Path: "/",
},
},
{
Node: types.Node{
Path: "/a/b/c//d/",
},
},
{
Node: types.Node{
Path: "/a/b/c/",
},
},
{
Node: types.Node{
Path: "/a/b/",
},
},
{
Node: types.Node{
Path: "/a/",
},
},
}},
out: out{data: []types.Directory{
{
Node: types.Node{
Path: "/",
},
},
{
Node: types.Node{
Path: "/a/",
},
},
{
Node: types.Node{
Path: "/a/b/",
},
},
{
Node: types.Node{
Path: "/a/b/c/",
},
},
{
Node: types.Node{
Path: "/a/b/c//d/",
},
},
{
Node: types.Node{
Path: "/a////b/c/d/e/",
},
},
}},
},
{
in: in{data: []types.Directory{
{
Node: types.Node{
Path: "/a/",
},
},
{
Node: types.Node{
Path: "/a/../a/b",
},
},
{
Node: types.Node{
Path: "/",
},
},
}},
out: out{data: []types.Directory{
{
Node: types.Node{
Path: "/",
},
},
{
Node: types.Node{
Path: "/a/",
},
},
{
Node: types.Node{
Path: "/a/../a/b",
},
},
}},
},
}
for i, test := range tests {
entries := []filesystemEntry{}
for _, entry := range test.in.data {
entries = append(entries, dirEntry(entry))
}
sort.Slice(entries, func(i, j int) bool { return util.Depth(entries[i].node().Path) < util.Depth(entries[j].node().Path) })
outpaths := make([]types.Directory, len(test.in.data))
for j, dir := range entries {
outpaths[j].Node.Path = dir.node().Path
}
if !reflect.DeepEqual(test.out.data, outpaths) {
t.Errorf("#%d: bad error: want %v, got %v", i, test.out.data, outpaths)
}
}
} | out out
}{
{ |
communication.ts | import {
Observable,
Subject,
Subscription,
of,
BehaviorSubject
} from "rxjs";
import {
map,
flatMap,
catchError,
withLatestFrom,
tap
} from "rxjs/operators";
// import { createStore, StoreEnhancer } from "redux";
import {
WsMessage,
makeChatMessage,
CHAT_MESSAGE_ADD,
WsCommand,
MessageEvent as MsgEvent
} from "./message-types";
import {
createLoginAction,
chatMessageAction,
webcamCamAction,
remoteVideoAction
} from "./action-creators";
import { USER_CONNECTION_EVT } from "./types";
import { shuffle, take } from "../utils/utils";
// import { reducers } from "../state/store";
// type WindowWithDevTools = Window & {
// __REDUX_DEVTOOLS_EXTENSION__: () => StoreEnhancer<unknown, {}>
// };
// const isReduxDevtoolsExtenstionExist = (
// arg: Window | WindowWithDevTools
// ): arg is WindowWithDevTools => {
// return "__REDUX_DEVTOOLS_EXTENSION__" in arg;
// };
// const foo = isReduxDevtoolsExtenstionExist(window) ?
// window.__REDUX_DEVTOOLS_EXTENSION__() : undefined;
// const store = createStore(reducers, foo);
// type StoreType = typeof store;
const logger = console;
const log = logger.log;
export interface WSSetup {
auth: any,
loginAction: typeof createLoginAction,
chatAction: typeof chatMessageAction,
}
export interface ConnectionEvent {
connected_users: string[]
}
/** Type alias for a function that handles incoming WsMessages */
type Hdlr = (msg: WsMessage<any>) => void;
/** Type alias for the remote media streams */
type RemoteMediaStreams = Map<string, MediaStream | null>
export class | {
stream: MediaStream | null;
constructor(stream: MediaStream | null) {
this.stream = stream;
}
}
/**
* This class holds all the data and functionality for communication between clients
*
* @field user$: Holds username
* @field targets$: usernames that user clicks to do video chat with
* @field socket$: websocket object
* @field send$: Helper to let other clients send to websocket
* @field peer: FIXME make this a map of RTCPeerConnections
* @field #commHandlers: Map of handlers for CommandRequest messages
* @field streamLocal$: stream of local videocam streams
* @field streamRemotes$: stream of remote videocam streams
* @field cmdHandler: A map of string to handler functions to handle websocket messages
* @field transceiver: transceiver to control send/rcv messages
* @field videoOfferSubscription: Subscription to cancel Video Offer
* @field webcamDispatch: dispatch function to set webcam state in redux
* @field remoteVideoDispatch: typeof remoteVideoAction;
* @field evtMediaStream$: stream of events for
* @field evtNegotiation$: Subject<Event>;
* @field evtVideoOffer$: Subject<string>;
* @field evtIceCandidate$: Subject<RTCPeerConnectionIceEvent>
*/
export class WebComm {
user$: Subject<string>;
targets$: Subject<string>;
socket$: Subject<WebSocket>;
send$: Subject<string>;
peer: RTCPeerConnection | null;
#commHandlers: Map<string, Hdlr>;
streamLocal$: BehaviorSubject<LocalMediaStream>;
streamRemotes$: BehaviorSubject<RemoteMediaStreams>;
cmdHandler: CommandHandler;
transceiver: RTCRtpTransceiver | null;
videoOfferSubscription: Subscription | null;
webcamDispatch: typeof webcamCamAction;
remoteVideoDispatch: typeof remoteVideoAction;
evtMediaStream$: Subject<MediaStream>;
evtNegotiation$: Subject<Event>;
evtVideoOffer$: Subject<WsMessage<WsCommand<string>>>;
evtIceCandidate$: Subject<RTCIceCandidate | null>
iceEvtSub: Subscription;
signout$: Subject<boolean>;
ping$: Subject<WsMessage<any>>;
currentUser: string;
testSock: WebSocket;
constructor(
webcamDispath: typeof webcamCamAction,
remoteVideoDispatch: typeof remoteVideoAction,
//store: StoreType
) {
this.user$ = new Subject();
this.targets$ = new Subject(); // Stream of remote usernames
this.socket$ = new Subject();
this.peer = null;
this.#commHandlers = new Map();
this.streamLocal$ = new BehaviorSubject(new LocalMediaStream(null));
this.transceiver = null;
this.videoOfferSubscription = null;
this.signout$ = new Subject();
this.ping$ = new Subject();
this.currentUser = "";
this.testSock = new WebSocket("ws://localhost:13172")
/** Helper for sending over the websocket */
const { sock$, subscription: socksub } = this.makeSocketStream();
this.send$ = sock$;
/** Dispatches to hook into redux */
this.webcamDispatch = webcamDispath;
this.remoteVideoDispatch = remoteVideoDispatch;
/** Streams that have events from various RTC state */
this.evtMediaStream$ = new Subject();
this.evtNegotiation$ = new Subject();
this.evtVideoOffer$ = new Subject();
this.evtIceCandidate$ = new Subject();
this.iceEvtSub = this.configIceCandidateEventStream();
const initRemoteStream: RemoteMediaStreams = new Map();
this.streamRemotes$ = new BehaviorSubject(initRemoteStream);
this.cmdHandler = new CommandHandler(this);
this.setupCmdHandlers();
this.initHandleTrackEvent();
this.initWebsockOnUser();
this.initSignoutStream();
}
initSignoutStream = () => {
this.signout$.pipe(
withLatestFrom(this.socket$)
).subscribe({
next: ([res, socket]) => {
if (!res) return
socket.close();
this.user$.next("");
}
});
}
/**
* Once we get a new user pushed to user$, we need to create a websocket
*/
initWebsockOnUser = () => {
this.user$.subscribe({
next: (user) => {
logger.log("In user$, got user", user)
if (user === "") {
this.currentUser = user;
return;
}
if (user !== this.currentUser) {
logger.log("Setting user to ", user);
this.currentUser = user;
this.createSocket(user);
} else {
logger.warn("Same user, reusing existing socket");
}
}
});
}
/**
* When we get event from handleTrackEvent, it will push a MediaStream into evtMediaStream$. We
* combine this with our latest target value. Then, we push the map of {target:stream} to
* streamRemotes$.
*/
initHandleTrackEvent = () => {
this.evtMediaStream$.pipe(
withLatestFrom(this.targets$)
).subscribe({
next: ([stream, target]) => {
const localStream = this.streamLocal$.value.stream;
if (localStream && localStream.id === stream.id) {
logger.log("Event was for local stream, returning");
return;
}
logger.info("Got track event for", target, stream);
let obj: Map<string, MediaStream> = new Map();
obj.set(target, stream);
// Send event to dispatch so that the VideoStream component will update
this.remoteVideoDispatch(obj, "REMOTE_EVENT");
}
});
}
createSocket = (user: string) => {
const origin = window.location.host;
// FIXME: Add JWT token
const url = `wss://${origin}/chat/${user}`;
logger.log(`Connecting to ${url}`);
this.socket$.next(new WebSocket(url));
}
/**
* Creates a Subject that takes strings that are pre-subscribed to be sent by socket
*
* Instead of handing out references to the socket, we hand this out instead and user can do this:
*
* ```typescript
* let { sock$, subscription } = webcomm.makeSocketStream();
* sock$.next("hello world");
* subscription.unsubscribe();
* ```
*/
makeSocketStream = () => {
let sock$: Subject<string> = new Subject();
let msgSock$ = sock$.pipe(
withLatestFrom(this.socket$)
)
let subscription = msgSock$.subscribe(([msg, socket]) => socket.send(msg));
return {sock$, subscription};
}
/**
* Handles Websocket intialization when the user selects Menu -> Chat
*
* This is where we sort out the messages received by the websocket.
*/
socketSetup = (props: WSSetup) => {
this.socket$.subscribe({
next: (socket) => {
socket.onmessage = (evt: MessageEvent) => {
const msg: WsMessage<any> = JSON.parse(evt.data);
const auth = props.auth;
// Log the events separately, so we dont get a flood of messages from Ping/Pong events
switch (msg.event_type) {
case "Disconnect":
case "Connect":
logger.log(`Got ${msg.event_type} websocket event`, msg);
const {connected_users} = msg.body as ConnectionEvent;
logger.log("Connected users: ", connected_users);
props.loginAction(connected_users, "", auth, USER_CONNECTION_EVT);
logger.log(`loginAction is`, props.loginAction);
break;
case "Data":
logger.log(`Got ${msg.event_type} websocket event`, msg);
break;
case "Message":
logger.log(`Got ${msg.event_type} websocket event`, msg);
props.chatAction(makeChatMessage(msg), CHAT_MESSAGE_ADD);
break;
case "CommandRequest":
// Pass it to the commandHandler
this.commandHandler(msg).catch(logger.error);
break;
default:
logger.log("Unknown message type", msg.event_type);
}
};
},
error: (err) => logger.error(err),
complete: () => logger.info("this.socket$ got complete event")
})
}
/**
* Dynamically adds handlers for commands
*/
addCmdHdlr = (action: string, handler: Hdlr) => {
this.#commHandlers.set(action, handler);
}
commandHandler = async (msg: WsMessage<any>) => {
const cmd = msg.body as WsCommand<any>;
logger.debug("command is =", cmd);
const hdlr = this.#commHandlers.get(cmd.cmd.op);
if (!hdlr) {
logger.warn(`No handler for ${cmd.cmd.op}. No action taken`);
return;
}
hdlr(msg);
}
/**
* Sets up our initial commandHandlers
*
* Later, we can dynamically add handlers. This is why we do it this way instead of as a switch
* statement.
*/
setupCmdHandlers = () => {
this.addCmdHdlr("Ping", this.cmdHandler.initPingRequestHandler);
this.addCmdHdlr("SDPOffer", this.cmdHandler.handleVideoOfferMsg);
this.addCmdHdlr("SDPAnswer", this.cmdHandler.handleVideoAnswerMsg);
this.addCmdHdlr("IceCandidate", this.cmdHandler.handleNewICECandidateMsg);
}
/**
* This is the main function that sets up the RTCPeerConnection, which in turn sets up our ICE
* establishment
*/
createPeerConnection = () => {
let urls = [
"stun:stun.l.google.com:19305",
"stun:stun1.l.google.com:19305",
"stun:stun2.l.google.com:19305",
"stun:stun3.l.google.com:19305",
"stun:stun4.l.google.com:19305",
];
// pick 2 at random since you get a warning in the DOM if you use more than 2
shuffle(urls);
urls = take(urls)(2)
const peer = new RTCPeerConnection({
iceServers: [
{
urls
}
]
});
if (!peer) {
throw new Error("Unable to create RTCPeerConnection");
}
peer.onicecandidate = this.handleICECandidateEvent;
peer.oniceconnectionstatechange = this.handleICEConnectionStateChangeEvent;
peer.onicegatheringstatechange = this.handleICEGatheringStateChangeEvent;
peer.onsignalingstatechange = this.handleSignalingStateChangeEvent;
peer.onnegotiationneeded = this.handleNegotiationNeededEvent;
peer.ontrack = this.handleTrackEvent;
this.negotiationTargetSetup(peer);
return peer;
}
configIceCandidateEventStream = () => {
let userAndTarget$ = this.targets$.pipe(
withLatestFrom(this.user$)
);
const iceHandler$ = this.evtIceCandidate$.pipe(
withLatestFrom(userAndTarget$),
map(([candidate, [receiver, user]]) => {
if (receiver === "") {
logger.debug("Dummy value for target");
return null;
}
if (candidate === null) {
logger.debug("No candidate in event");
return null;
}
const mesg = makeWsICECandMsg(user, receiver, {
type: "new-ice-candidate",
candidate: JSON.stringify(candidate)
});
return mesg
})
)
return iceHandler$.subscribe({
next: (msg) => {
if (msg === null) {
logger.debug("Unable to create new-ice-candidate message");
return;
}
this.send$.next(JSON.stringify(msg));
},
error: err => logger.error(err),
complete: () => logger.log("User subject has completed")
});
}
handleICECandidateEvent = (event: RTCPeerConnectionIceEvent) => {
if (event.candidate) {
logger.debug("*** Outgoing ICE candidate: " + event.candidate.candidate);
this.evtIceCandidate$.next(event.candidate)
} else {
logger.warn("no candidate in event", event);
}
};
negotiationTargetSetup = (peer: RTCPeerConnection) => {
const { send$ } = this;
const userAndTarget$ = this.targets$.pipe(
withLatestFrom(this.user$)
)
const handle$ = this.evtNegotiation$.pipe(
withLatestFrom(userAndTarget$),
flatMap(([_, [sender, user]]) => {
logger.log("Creating offer for: ", sender);
return peer.createOffer().then((offer) => {
return { sender, user, offer };
});
}),
map((state) => {
const { offer } = state;
if (peer.signalingState !== "stable") {
logger.log(" -- The connection isn't stable yet; postponing...");
return of(state);
}
// Establish the offer as the local peer's current description.
logger.log("---> Setting local description to the offer");
return peer.setLocalDescription(offer).then((_) => {
return state;
});
}),
flatMap(state => state),
map((state) => {
const { sender, user } = state;
// Send the offer to the remote peer. This will be received by the remote websocket
logger.log(`---> Sending the offer to the remote peer ${sender}`);
logger.debug("---> peer.localDescription", peer.localDescription);
let sdp = new RTCSessionDescription({
type: "offer",
sdp: JSON.stringify(peer.localDescription)
});
logger.debug("---> sdp is ", sdp);
const msg = makeWsSDPMessage(user, sender, sdp);
send$.next(JSON.stringify(msg));
return true;
}),
catchError((err) => {
logger.error("Error occurred while handling the negotiationneeded event:", err);
return of(false);
})
);
handle$.subscribe({
next: res => logger.info(`Negotiation success was ${res}`)
});
}
handleNegotiationNeededEvent = (evt: Event) => {
const {peer} = this;
if (!peer) {
logger.error("RTCPeerConnection not setup yet");
return;
}
logger.log("*** Negotiation needed");
this.evtNegotiation$.next(evt);
}
handleICEConnectionStateChangeEvent = (event: Event) => {
const {peer} = this;
if (!peer) {
logger.error("RTCPeerConnection not created yet");
return;
}
logger.log("*** ICE connection state changed to " + peer.iceConnectionState);
switch (peer.iceConnectionState) {
case "closed":
case "failed":
case "disconnected":
this.closeVideoCall();
break;
}
}
/**
* Handle the |icegatheringstatechange| event. This lets us know what the ICE engine is currently
* working on: "new" means no networking has happened yet, "gathering" means the ICE engine is
* currently gathering candidates, and "complete" means gathering is complete. Note that the
* engine can alternate between "gathering" and "complete" repeatedly as needs and
* circumstances change.
*
* We don't need to do anything when this happens, but we log it to the console so you can see
* what's going on when playing with the sample.
*/
handleICEGatheringStateChangeEvent = (event: Event) => {
if (!this.peer) {
logger.error("No RTCPeerConnection yet");
return;
}
logger.log("*** ICE gathering state changed to: " + this.peer.iceGatheringState);
};
/**
* Set up a |signalingstatechange| event handler. This will detect when
* the signaling connection is closed
*
* NOTE: This will actually move to the new RTCPeerConnectionState enum
* returned in the property RTCPeerConnection.connectionState when
* browsers catch up with the latest version of the specification!
* @param event
*/
handleSignalingStateChangeEvent = (event: Event) => {
if (!this.peer) {
logger.error("No RTCPeerConnection yet");
return;
}
logger.log("*** WebRTC signaling state changed to: " + this.peer.signalingState);
switch (this.peer.signalingState) {
case "closed":
this.closeVideoCall();
break;
}
};
/**
* Called by the WebRTC layer when events occur on the media tracks
* on our WebRTC call. This includes when streams are added to and
* removed from the call.
*
* track events include the following fields
*
* RTCRtpReceiver receiver
* MediaStreamTrack track
* MediaStream[] streams
* RTCRtpTransceiver transceiver
*
* In our case, we're just taking the first stream found and attaching
* it to the <video> element for incoming media.
*/
handleTrackEvent = (event: RTCTrackEvent) => {
// Here, we add the stream to the remote-video html element. We need to tell the chat container
// to add the remote-video element and display it and add the stream
logger.log("Handling track event. Sending MediaStream to remote", event.type);
event.streams.forEach(stream => {
this.evtMediaStream$.next(stream);
});
}
/**
* Closes the RTCPeerConnection
*/
closeVideoCall = () => {
const localVideo = document.getElementById("local_video") as HTMLVideoElement;
logger.log("Closing the call");
// Close the RTCPeerConnection
if (!this.peer) {
logger.error("RTCPeerConnection was null");
return;
}
logger.log("--> Closing the peer connection");
// Disconnect all our event listeners; we don't want stray events
// to interfere with the hangup while it's ongoing.
this.peer.ontrack = null;
this.peer.onicecandidate = null;
this.peer.oniceconnectionstatechange = null;
this.peer.onsignalingstatechange = null;
this.peer.onicegatheringstatechange = null;
this.peer.onnegotiationneeded = null;
// Stop all transceivers on the connection
this.peer.getTransceivers().forEach((transceiver) => {
transceiver.stop();
});
// Stop the webcam preview as well by pausing the <video>
// element, then stopping each of the getUserMedia() tracks
// on it.
if (localVideo && localVideo.srcObject) {
localVideo.pause();
const stream = localVideo.srcObject as MediaStream;
stream.getTracks().forEach((track) => {
track.stop();
});
}
// Close the peer connection
this.peer.close();
this.peer = null;
this.webcamDispatch({ active: false }, "WEBCAM_DISABLE");
};
handleGetUserMediaError = (e: Error) => {
logger.error(e);
switch (e.name) {
case "NotFoundError":
alert("Unable to open your call because no camera and/or microphone" +
"were found.");
break;
case "SecurityError":
case "PermissionDeniedError":
// Do nothing; this is the same as the user canceling the call.
break;
default:
alert("Error opening your camera and/or microphone: " + e.message);
break;
}
// Make sure we shut down our end of the RTCPeerConnection so we're
// ready to try again.
this.closeVideoCall();
}
}
export interface ICECandidateMessage {
type: "new-ice-candidate",
candidate: string // sdp candidate string describing offered protocol
}
export const makeWsICECandMsg = (sender: string, reciever: string, cand: ICECandidateMessage) => {
const msg: WsMessage<string> = {
sender,
recipients: [ reciever ],
time: Date.now(),
body: JSON.stringify({
cmd: {
op: "IceCandidate",
id: "",
ack: true
},
args: JSON.stringify(cand)
}),
event_type: "CommandRequest"
};
return msg;
};
/**
* Creates Websocket messages to be sent to remote user
*
* @param sender
* @param receiver
* @param sdp
* @param kind
*/
export const makeWsSDPMessage = (
sender: string,
receiver: string,
sdp: RTCSessionDescription,
kind: "SDPOffer" | "SDPAnswer" = "SDPOffer"
) => {
let wscmd: WsCommand<RTCSessionDescription>;
const msg: WsMessage<string> = {
sender,
recipients: [ receiver ],
event_type: "CommandRequest",
body: JSON.stringify({
cmd: {
op: kind,
id: "",
ack: kind === "SDPOffer" ? true : false
},
args: JSON.stringify(sdp)
}),
time: Date.now()
};
logger.debug("Created SDPOffer message", msg);
return msg;
};
export const makeGenericMsg = <T>(
sender: string,
receiver: string,
event_type: MsgEvent,
args: T,
op: string,
ack: boolean
) => {
const msg: WsMessage<string> = {
sender,
recipients: [ receiver ],
event_type,
body: JSON.stringify({
cmd: {
op,
id: "",
ack,
},
args
}),
time: Date.now()
};
return msg;
}
/**
* Handler for websocket messages
*/
class CommandHandler {
webcomm: WebComm;
streamLocalConfigured: boolean;
constructor(wc: WebComm) {
this.webcomm = wc;
this.streamLocalConfigured = false;
this.setupPingSubscription();
}
/**
* Sets up the stream that receives CommandRequest of Ping type
*
* We do all the setup of what the stream does here to avoid cluttering the constructor
*/
private setupPingSubscription = () => {
this.webcomm.ping$.pipe(
withLatestFrom(this.webcomm.user$)
).subscribe({
next: ([msg, user]) => {
const cmd = msg.body as WsCommand<any>;
const args = cmd.args as string[];
const replyMsg: WsMessage<string> = {
sender: msg.sender,
recipients: msg.recipients,
event_type: "CommandReply",
time: Date.now(),
body: JSON.stringify({
cmd: {
op: "pong",
ack: false,
id: user
},
args
})
};
this.webcomm.send$.next(JSON.stringify(replyMsg));
logger.debug("Sent reply: ", replyMsg);
}
});
}
/**
* Handles a Ping type of WsCommand, used as a keep alive mechanism
*
* @param socket
*/
initPingRequestHandler = async (msg: WsMessage<any>) => {
this.webcomm.ping$.next(msg);
}
/**
* Configures the wecomm.streamLocal$ stream for handing incoming SDPOffer messages
*/
private streamLocalConfig = (peer: RTCPeerConnection | null) => {
// This is all some ugliness to make the compiler happy
if (peer === null) {
throw new Error("Unable to create RTCPeerConnection");
}
this.streamLocalConfigured = true;
const finalPeer = peer;
// The WebComm dynamically gets MediaStream's as they are created and destroyed. So we have to
// subscribe to the stream of them.
const sdp$: Observable<{
msg: WsMessage<WsCommand<string>>,
success: boolean
}> = this.webcomm.evtVideoOffer$.pipe(
withLatestFrom(this.webcomm.streamLocal$),
// At this point, we should have a valid MediaStream. Create
flatMap(([msg, { stream }]) => {
let args = JSON.parse(msg.body.args);
let sdp = JSON.parse(args.sdp);
logger.debug("Args in message from SDPOffer", args);
const desc = new RTCSessionDescription(sdp);
if (peer.signalingState !== "stable") {
// Set the local and remove descriptions for rollback; don't proceed until returned
logger.log(" - But the signaling state isn't stable, so triggering rollback");
return Promise.all([
peer.setLocalDescription({type: "rollback"}),
peer.setRemoteDescription(desc)
]).then(_ => {
return { stream, msg }
});
} else {
logger.log(" - Setting remote description");
return peer.setRemoteDescription(desc).then(() => {
return { stream , msg }
});
}
}),
tap(({ stream, msg }) => {
logger.log("streamLocalConfig: message is", msg, "stream is ", stream);
}),
// Check if our current MediaStream is null. If it is, create one, and restart
flatMap(({ stream, msg }) => {
if (stream === null) {
logger.log("Creating local media stream");
return navigator.mediaDevices.getUserMedia({ audio: true, video: true })
.then(stream => {
return { preset: false, stream, msg }
});
} else {
return of({ preset: true, stream, msg });
}
}),
map((res) => {
if (!res.preset) {
// Add this to our strealLocal so that <VideoStream /> can pick it up
this.webcomm.streamLocal$.next(new LocalMediaStream(res.stream));
this.webcomm.webcamDispatch({ active: true }, "WEBCAM_ENABLE");
logger.debug("Resetting media stream");
}
return res
}),
map(({ stream, msg }) => {
try {
logger.log("Adding track for transceiver");
stream.getTracks().forEach((track) => {
this.webcomm.transceiver = finalPeer.addTransceiver(track, {streams: [ stream ] });
});
} catch (err) {
this.webcomm.handleGetUserMediaError(err);
return {
msg,
success: false
};
}
return {
msg,
success: true
};
}),
flatMap((res) => {
const { msg, success} = res;
if (!success) {
logger.log("Unable to add tracks to transceiver");
return of({ msg, description: null});
}
log("---> Creating and sending answer to caller");
return finalPeer.createAnswer().then(description => {
return { msg, description }
});
}),
flatMap(({ msg, description}) => {
if (description === null) {
return of({ msg, success: false});
}
return finalPeer.setLocalDescription(description).then(() => {
return { msg, success: true}
});
})
);
const userAndSdp$ = sdp$.pipe(
withLatestFrom(this.webcomm.user$)
)
this.webcomm.videoOfferSubscription = userAndSdp$.subscribe({
next: ([{msg, success}, user]) => {
if (!success) {
logger.error("Unable to make SDP Message");
return;
}
if ( msg.sender === "") {
logger.error("No targets added yet. Waiting for real target");
return;
}
if (!finalPeer.localDescription) {
logger.error("RTCPeerConnection does not have a local description yet");
return;
}
logger.debug("Sending SDPAnswer with peer description: ", finalPeer.localDescription)
// Create the SDPMessage with the SDPAnswer
const mesg = makeWsSDPMessage(
user,
msg.sender,
finalPeer.localDescription,
"SDPAnswer");
this.webcomm.send$.next(JSON.stringify(mesg));
},
error: logger.error,
complete: () => logger.info("videoRefLocal$ is complete")
});
}
/**
* This is a handler for a WsCommand of SDPOffer
*
* We will get this when the remote client sends us an SDPOffer message over the websocket. Upon
* receipt we will do several things:
*
* - Create a RTCPeerConnection if one does not already exist
* - Add the sender of the message to our target$ stream
* - Create a RTCSessionDescription and set it to our peer
*/
handleVideoOfferMsg = async (msg: WsMessage<WsCommand<string>>) => {
let {peer} = this.webcomm;
if (!peer) {
logger.info("No RTCPeerConnection yet...creating");
peer = this.webcomm.createPeerConnection();
}
if (!this.streamLocalConfigured) {
logger.debug("Calling streamLocalConfig()");
this.streamLocalConfig(peer)
}
// This is all some ugliness to make the compiler happy
if (peer === null) {
throw new Error("Unable to create RTCPeerConnection");
}
this.webcomm.peer = peer;
logger.log("Received video chat offer from ", msg.sender);
// Trigger our stream handler by pushing in the sender to the evtVideoOffer$ stream
this.webcomm.evtVideoOffer$.next(msg);
// Update the latest target which is the message sender
this.webcomm.targets$.next(msg.sender);
}
/**
* Handler for when the callee has returned back an SDPAnswer message
*/
handleVideoAnswerMsg = async (msg: WsMessage<WsCommand<string>>) => {
if (!this.webcomm.peer) {
logger.error("No RTCPeerConnection yet");
return;
}
log("*** Call recipient has accepted our call");
// Configure the remote description, which is the SDP payload in our "video-answer" message.
const sdp: RTCSessionDescription = JSON.parse(msg.body.args) as RTCSessionDescription;
logger.debug("SDP Answer is: ", sdp);
var desc = new RTCSessionDescription(sdp);
await this.webcomm.peer.setRemoteDescription(desc).catch(logger.error);
}
handleNewICECandidateMsg = async (msg: WsMessage<WsCommand<string>>) => {
if (!this.webcomm.peer) {
logger.error("No RTCPeerConnection yet");
return;
}
let candidate = JSON.parse(msg.body.args);
candidate = JSON.parse(candidate.candidate);
candidate = new RTCIceCandidate(candidate);
logger.debug("*** Adding received ICE candidate: ", candidate);
try {
await this.webcomm.peer.addIceCandidate(candidate)
} catch(err) {
logger.warn(err);
}
}
handleEditorMsg = (msg: WsMessage<WsCommand<string>>) => {
}
}
/**
this.evtTrack$.pipe(
withLatestFrom(this.targets$)
).subscribe({
next: ([event, target]) => {
const { track } = event;
const fn = (s: MediaStream, kind: string) => {
logger.log(`Got track event for ${kind} MediaStream`, s.id);
log("Checking stream", s.id);
if(s.getTracks().filter(t => t.id === track.id).length) {
logger.log("Track already exists on ", s.id);
return false;
}
if (this.peer) {
logger.log("Adding track to peer", track);
this.peer.addTrack(track);
return true
} else {
logger.error("No RTCPeerConnection to add Track to");
return false
}
}
event.streams.forEach(stream => {
const localStream = this.streamLocal$.value.stream;
if (localStream && localStream.id === stream.id) {
// FIXME: Do we need to update the VideoStream component if fn returns true?
fn(stream, "local");
} else {
// Check the new track, and if we added it, send a new REMOTE_EVENT to update component
if (fn(stream, "remote")) {
let obj: Map<string, MediaStream> = new Map();
obj.set(target, stream);
// Send event to dispatch so that the VideoStream component will update
this.remoteVideoDispatch(obj, "REMOTE_EVENT");
}
}
})
}
});
*/ | LocalMediaStream |
model_query.go | // generated by qbg -output misc/fixture/f/model_query.go misc/fixture/f; DO NOT EDIT
package f
import (
"github.com/favclip/qbg/qbgutils"
"google.golang.org/appengine/datastore"
)
// SampleQueryBuilder build query for Sample.
type SampleQueryBuilder struct {
q *datastore.Query
plugin qbgutils.Plugin
Kind *SampleQueryProperty
Foo *SampleQueryProperty
}
// SampleQueryProperty has property information for SampleQueryBuilder.
type SampleQueryProperty struct {
bldr *SampleQueryBuilder
name string
}
// NewSampleQueryBuilder create new SampleQueryBuilder.
func NewSampleQueryBuilder() *SampleQueryBuilder {
return NewSampleQueryBuilderWithKind("sample_kind")
}
// NewSampleQueryBuilderWithKind create new SampleQueryBuilder with specific kind.
func NewSampleQueryBuilderWithKind(kind string) *SampleQueryBuilder {
q := datastore.NewQuery(kind)
bldr := &SampleQueryBuilder{q: q}
bldr.Kind = &SampleQueryProperty{
bldr: bldr,
name: "Kind",
}
bldr.Foo = &SampleQueryProperty{
bldr: bldr,
name: "Foo",
}
if plugger, ok := interface{}(bldr).(qbgutils.Plugger); ok {
bldr.plugin = plugger.Plugin()
bldr.plugin.Init("Sample")
}
return bldr
}
// Ancestor sets parent key to ancestor query.
func (bldr *SampleQueryBuilder) Ancestor(parentKey *datastore.Key) *SampleQueryBuilder {
bldr.q = bldr.q.Ancestor(parentKey)
if bldr.plugin != nil {
bldr.plugin.Ancestor(parentKey)
}
return bldr
}
// KeysOnly sets keys only option to query.
func (bldr *SampleQueryBuilder) KeysOnly() *SampleQueryBuilder {
bldr.q = bldr.q.KeysOnly()
if bldr.plugin != nil {
bldr.plugin.KeysOnly()
}
return bldr
}
// Start setup to query.
func (bldr *SampleQueryBuilder) Start(cur datastore.Cursor) *SampleQueryBuilder {
bldr.q = bldr.q.Start(cur)
if bldr.plugin != nil {
bldr.plugin.Start(cur)
}
return bldr
}
// Offset setup to query.
func (bldr *SampleQueryBuilder) Offset(offset int) *SampleQueryBuilder {
bldr.q = bldr.q.Offset(offset)
if bldr.plugin != nil {
bldr.plugin.Offset(offset)
}
return bldr
}
// Limit setup to query.
func (bldr *SampleQueryBuilder) Limit(limit int) *SampleQueryBuilder {
bldr.q = bldr.q.Limit(limit)
if bldr.plugin != nil {
bldr.plugin.Limit(limit)
}
return bldr
}
// Query returns *datastore.Query.
func (bldr *SampleQueryBuilder) Query() *datastore.Query {
return bldr.q
}
// Filter with op & value.
func (p *SampleQueryProperty) Filter(op string, value interface{}) *SampleQueryBuilder {
switch op {
case "<=":
p.LessThanOrEqual(value)
case ">=":
p.GreaterThanOrEqual(value)
case "<":
p.LessThan(value)
case ">":
p.GreaterThan(value)
case "=":
p.Equal(value)
default:
p.bldr.q = p.bldr.q.Filter(p.name+" "+op, value) // error raised by native query
}
if p.bldr.plugin != nil {
p.bldr.plugin.Filter(p.name, op, value)
}
return p.bldr
}
// LessThanOrEqual filter with value.
func (p *SampleQueryProperty) LessThanOrEqual(value interface{}) *SampleQueryBuilder {
p.bldr.q = p.bldr.q.Filter(p.name+" <=", value)
if p.bldr.plugin != nil { |
// GreaterThanOrEqual filter with value.
func (p *SampleQueryProperty) GreaterThanOrEqual(value interface{}) *SampleQueryBuilder {
p.bldr.q = p.bldr.q.Filter(p.name+" >=", value)
if p.bldr.plugin != nil {
p.bldr.plugin.Filter(p.name, ">=", value)
}
return p.bldr
}
// LessThan filter with value.
func (p *SampleQueryProperty) LessThan(value interface{}) *SampleQueryBuilder {
p.bldr.q = p.bldr.q.Filter(p.name+" <", value)
if p.bldr.plugin != nil {
p.bldr.plugin.Filter(p.name, "<", value)
}
return p.bldr
}
// GreaterThan filter with value.
func (p *SampleQueryProperty) GreaterThan(value interface{}) *SampleQueryBuilder {
p.bldr.q = p.bldr.q.Filter(p.name+" >", value)
if p.bldr.plugin != nil {
p.bldr.plugin.Filter(p.name, ">", value)
}
return p.bldr
}
// Equal filter with value.
func (p *SampleQueryProperty) Equal(value interface{}) *SampleQueryBuilder {
p.bldr.q = p.bldr.q.Filter(p.name+" =", value)
if p.bldr.plugin != nil {
p.bldr.plugin.Filter(p.name, "=", value)
}
return p.bldr
}
// Asc order.
func (p *SampleQueryProperty) Asc() *SampleQueryBuilder {
p.bldr.q = p.bldr.q.Order(p.name)
if p.bldr.plugin != nil {
p.bldr.plugin.Asc(p.name)
}
return p.bldr
}
// Desc order.
func (p *SampleQueryProperty) Desc() *SampleQueryBuilder {
p.bldr.q = p.bldr.q.Order("-" + p.name)
if p.bldr.plugin != nil {
p.bldr.plugin.Desc(p.name)
}
return p.bldr
} | p.bldr.plugin.Filter(p.name, "<=", value)
}
return p.bldr
} |
select.js | /**
* angular-strap
* @version v2.0.0-rc.1 - 2014-01-28
* @link http://mgcrea.github.io/angular-strap
* @author [object Object]
* @license MIT License, http://www.opensource.org/licenses/MIT
*/
'use strict';
angular.module('mgcrea.ngStrap.select', [
'mgcrea.ngStrap.tooltip',
'mgcrea.ngStrap.helpers.parseOptions'
]).provider('$select', function () {
var defaults = this.defaults = {
animation: 'animation-fade',
prefixClass: 'select',
placement: 'bottom-left',
template: 'select/select.tpl.html',
trigger: 'focus',
container: false,
keyboard: true,
html: false,
delay: 0,
multiple: false,
sort: true,
caretHtml: ' <span class="caret"></span>',
placeholder: 'Choose among the following...'
};
this.$get = [
'$window',
'$document',
'$rootScope',
'$tooltip',
function ($window, $document, $rootScope, $tooltip) {
var bodyEl = angular.element($window.document.body);
var isTouch = 'createTouch' in $window.document;
function | (element, controller, config) {
var $select = {};
var options = angular.extend({}, defaults, config);
$select = $tooltip(element, options);
var parentScope = config.scope;
var scope = $select.$scope;
scope.$matches = [];
scope.$activeIndex = 0;
scope.$isMultiple = options.multiple;
scope.$activate = function (index) {
scope.$$postDigest(function () {
$select.activate(index);
});
};
scope.$select = function (index, evt) {
scope.$$postDigest(function () {
$select.select(index);
});
};
scope.$isVisible = function () {
return $select.$isVisible();
};
scope.$isActive = function (index) {
return $select.$isActive(index);
};
$select.update = function (matches) {
scope.$matches = matches;
if (controller.$modelValue && matches.length) {
if (options.multiple && angular.isArray(controller.$modelValue)) {
scope.$activeIndex = controller.$modelValue.map(function (value) {
return $select.$getIndex(value);
});
} else {
scope.$activeIndex = $select.$getIndex(controller.$modelValue);
}
} else if (scope.$activeIndex >= matches.length) {
scope.$activeIndex = options.multiple ? [] : 0;
}
};
$select.activate = function (index) {
if (options.multiple) {
scope.$activeIndex.sort();
$select.$isActive(index) ? scope.$activeIndex.splice(scope.$activeIndex.indexOf(index), 1) : scope.$activeIndex.push(index);
if (options.sort)
scope.$activeIndex.sort();
} else {
scope.$activeIndex = index;
}
return scope.$activeIndex;
};
$select.select = function (index) {
var value = scope.$matches[index].value;
$select.activate(index);
if (options.multiple) {
controller.$setViewValue(scope.$activeIndex.map(function (index) {
return scope.$matches[index].value;
}));
} else {
controller.$setViewValue(value);
}
controller.$render();
if (parentScope)
parentScope.$digest();
if (!options.multiple) {
if (options.trigger === 'focus')
element[0].blur();
else if ($select.$isShown)
$select.hide();
}
scope.$emit('$select.select', value, index);
};
$select.$isVisible = function () {
if (!options.minLength || !controller) {
return scope.$matches.length;
}
return scope.$matches.length && controller.$viewValue.length >= options.minLength;
};
$select.$isActive = function (index) {
if (options.multiple) {
return scope.$activeIndex.indexOf(index) !== -1;
} else {
return scope.$activeIndex === index;
}
};
$select.$getIndex = function (value) {
var l = scope.$matches.length, i = l;
if (!l)
return;
for (i = l; i--;) {
if (scope.$matches[i].value === value)
break;
}
if (i < 0)
return;
return i;
};
$select.$onElementMouseDown = function (evt) {
evt.preventDefault();
evt.stopPropagation();
if ($select.$isShown) {
element[0].blur();
} else {
element[0].focus();
}
};
$select.$onMouseDown = function (evt) {
evt.preventDefault();
evt.stopPropagation();
if (isTouch) {
var targetEl = angular.element(evt.target);
targetEl.triggerHandler('click');
}
};
$select.$onKeyDown = function (evt) {
if (!/(38|40|13)/.test(evt.keyCode))
return;
evt.preventDefault();
evt.stopPropagation();
if (evt.keyCode === 13) {
return $select.select(scope.$activeIndex);
}
if (evt.keyCode === 38 && scope.$activeIndex > 0)
scope.$activeIndex--;
else if (evt.keyCode === 40 && scope.$activeIndex < scope.$matches.length - 1)
scope.$activeIndex++;
else if (angular.isUndefined(scope.$activeIndex))
scope.$activeIndex = 0;
scope.$digest();
};
var _init = $select.init;
$select.init = function () {
_init();
element.on(isTouch ? 'touchstart' : 'mousedown', $select.$onElementMouseDown);
};
var _destroy = $select.destroy;
$select.destroy = function () {
_destroy();
element.off(isTouch ? 'touchstart' : 'mousedown', $select.$onElementMouseDown);
};
var _show = $select.show;
$select.show = function () {
_show();
if (options.multiple) {
$select.$element.addClass('select-multiple');
}
setTimeout(function () {
$select.$element.on(isTouch ? 'touchstart' : 'mousedown', $select.$onMouseDown);
if (options.keyboard) {
element.on('keydown', $select.$onKeyDown);
}
});
};
var _hide = $select.hide;
$select.hide = function () {
$select.$element.off(isTouch ? 'touchstart' : 'mousedown', $select.$onMouseDown);
if (options.keyboard) {
element.off('keydown', $select.$onKeyDown);
}
_hide();
};
return $select;
}
SelectFactory.defaults = defaults;
return SelectFactory;
}
];
}).directive('bsSelect', [
'$window',
'$parse',
'$q',
'$select',
'$parseOptions',
function ($window, $parse, $q, $select, $parseOptions) {
var defaults = $select.defaults;
return {
restrict: 'EAC',
require: 'ngModel',
link: function postLink(scope, element, attr, controller) {
var options = { scope: scope };
angular.forEach([
'placement',
'container',
'delay',
'trigger',
'keyboard',
'html',
'animation',
'template',
'placeholder',
'multiple'
], function (key) {
if (angular.isDefined(attr[key]))
options[key] = attr[key];
});
var parsedOptions = $parseOptions(attr.ngOptions);
var select = $select(element, controller, options);
scope.$watch(parsedOptions.$match[7], function (newValue, oldValue) {
parsedOptions.valuesFn(scope, controller).then(function (values) {
select.update(values);
controller.$render();
});
});
controller.$render = function () {
var selected, index;
if (options.multiple && angular.isArray(controller.$modelValue)) {
selected = controller.$modelValue.map(function (value) {
index = select.$getIndex(value);
return angular.isDefined(index) ? select.$scope.$matches[index].label : false;
}).filter(angular.isDefined).join(', ');
} else {
index = select.$getIndex(controller.$modelValue);
selected = angular.isDefined(index) ? select.$scope.$matches[index].label : false;
}
element.html((selected ? selected : attr.placeholder || defaults.placeholder) + defaults.caretHtml);
};
scope.$on('$destroy', function () {
select.destroy();
options = null;
select = null;
});
}
};
}
]); | SelectFactory |
mod.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The main parser interface
use rustc_data_structures::sync::{Lrc, Lock};
use ast::{self, CrateConfig};
use codemap::{CodeMap, FilePathMapping};
use syntax_pos::{self, Span, FileMap, NO_EXPANSION, FileName};
use errors::{Handler, ColorConfig, DiagnosticBuilder};
use feature_gate::UnstableFeatures;
use parse::parser::Parser;
use ptr::P;
use str::char_at;
use symbol::Symbol;
use tokenstream::{TokenStream, TokenTree};
use diagnostics::plugin::ErrorMap;
use std::cell::RefCell;
use std::collections::HashSet;
use std::iter;
use std::path::{Path, PathBuf};
use std::str;
pub type PResult<'a, T> = Result<T, DiagnosticBuilder<'a>>;
#[macro_use]
pub mod parser;
pub mod lexer;
pub mod token;
pub mod attr;
pub mod common;
pub mod classify;
pub mod obsolete;
/// Info about a parsing session.
pub struct ParseSess {
pub span_diagnostic: Handler,
pub unstable_features: UnstableFeatures,
pub config: CrateConfig,
pub missing_fragment_specifiers: RefCell<HashSet<Span>>,
/// The registered diagnostics codes
pub registered_diagnostics: Lock<ErrorMap>,
// Spans where a `mod foo;` statement was included in a non-mod.rs file.
// These are used to issue errors if the non_modrs_mods feature is not enabled.
pub non_modrs_mods: RefCell<Vec<(ast::Ident, Span)>>,
/// Used to determine and report recursive mod inclusions
included_mod_stack: RefCell<Vec<PathBuf>>,
code_map: Lrc<CodeMap>,
}
impl ParseSess {
pub fn new(file_path_mapping: FilePathMapping) -> Self {
let cm = Lrc::new(CodeMap::new(file_path_mapping));
let handler = Handler::with_tty_emitter(ColorConfig::Auto,
true,
false,
Some(cm.clone()));
ParseSess::with_span_handler(handler, cm)
}
pub fn with_span_handler(handler: Handler, code_map: Lrc<CodeMap>) -> ParseSess {
ParseSess {
span_diagnostic: handler,
unstable_features: UnstableFeatures::from_environment(),
config: HashSet::new(),
missing_fragment_specifiers: RefCell::new(HashSet::new()),
registered_diagnostics: Lock::new(ErrorMap::new()),
included_mod_stack: RefCell::new(vec![]),
code_map,
non_modrs_mods: RefCell::new(vec![]),
}
}
pub fn codemap(&self) -> &CodeMap {
&self.code_map
}
}
#[derive(Clone)]
pub struct Directory {
pub path: PathBuf,
pub ownership: DirectoryOwnership,
}
#[derive(Copy, Clone)]
pub enum DirectoryOwnership {
Owned {
// None if `mod.rs`, `Some("foo")` if we're in `foo.rs`
relative: Option<ast::Ident>,
},
UnownedViaBlock,
UnownedViaMod(bool /* legacy warnings? */),
}
// a bunch of utility functions of the form parse_<thing>_from_<source>
// where <thing> includes crate, expr, item, stmt, tts, and one that
// uses a HOF to parse anything, and <source> includes file and
// source_str.
pub fn parse_crate_from_file<'a>(input: &Path, sess: &'a ParseSess) -> PResult<'a, ast::Crate> {
let mut parser = new_parser_from_file(sess, input);
parser.parse_crate_mod()
}
pub fn parse_crate_attrs_from_file<'a>(input: &Path, sess: &'a ParseSess)
-> PResult<'a, Vec<ast::Attribute>> {
let mut parser = new_parser_from_file(sess, input);
parser.parse_inner_attributes()
}
pub fn parse_crate_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<ast::Crate> {
new_parser_from_source_str(sess, name, source).parse_crate_mod()
}
pub fn parse_crate_attrs_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<Vec<ast::Attribute>> {
new_parser_from_source_str(sess, name, source).parse_inner_attributes()
}
pub fn parse_expr_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<P<ast::Expr>> {
new_parser_from_source_str(sess, name, source).parse_expr()
}
/// Parses an item.
///
/// Returns `Ok(Some(item))` when successful, `Ok(None)` when no item was found, and `Err`
/// when a syntax error occurred.
pub fn parse_item_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<Option<P<ast::Item>>> {
new_parser_from_source_str(sess, name, source).parse_item()
}
pub fn parse_meta_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<ast::MetaItem> {
new_parser_from_source_str(sess, name, source).parse_meta_item()
}
pub fn parse_stmt_from_source_str(name: FileName, source: String, sess: &ParseSess)
-> PResult<Option<ast::Stmt>> {
new_parser_from_source_str(sess, name, source).parse_stmt()
}
pub fn parse_stream_from_source_str(name: FileName, source: String, sess: &ParseSess,
override_span: Option<Span>)
-> TokenStream {
filemap_to_stream(sess, sess.codemap().new_filemap(name, source), override_span)
}
// Create a new parser from a source string
pub fn new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String)
-> Parser {
let mut parser = filemap_to_parser(sess, sess.codemap().new_filemap(name, source));
parser.recurse_into_file_modules = false;
parser
}
/// Create a new parser, handling errors as appropriate
/// if the file doesn't exist
pub fn new_parser_from_file<'a>(sess: &'a ParseSess, path: &Path) -> Parser<'a> {
filemap_to_parser(sess, file_to_filemap(sess, path, None))
}
/// Given a session, a crate config, a path, and a span, add
/// the file at the given path to the codemap, and return a parser.
/// On an error, use the given span as the source of the problem.
pub fn new_sub_parser_from_file<'a>(sess: &'a ParseSess,
path: &Path,
directory_ownership: DirectoryOwnership,
module_name: Option<String>,
sp: Span) -> Parser<'a> {
let mut p = filemap_to_parser(sess, file_to_filemap(sess, path, Some(sp)));
p.directory.ownership = directory_ownership;
p.root_module_name = module_name;
p
}
/// Given a filemap and config, return a parser
pub fn filemap_to_parser(sess: & ParseSess, filemap: Lrc<FileMap>) -> Parser |
// must preserve old name for now, because quote! from the *existing*
// compiler expands into it
pub fn new_parser_from_tts(sess: &ParseSess, tts: Vec<TokenTree>) -> Parser {
stream_to_parser(sess, tts.into_iter().collect())
}
// base abstractions
/// Given a session and a path and an optional span (for error reporting),
/// add the path to the session's codemap and return the new filemap.
fn file_to_filemap(sess: &ParseSess, path: &Path, spanopt: Option<Span>)
-> Lrc<FileMap> {
match sess.codemap().load_file(path) {
Ok(filemap) => filemap,
Err(e) => {
let msg = format!("couldn't read {:?}: {}", path.display(), e);
match spanopt {
Some(sp) => sess.span_diagnostic.span_fatal(sp, &msg).raise(),
None => sess.span_diagnostic.fatal(&msg).raise()
}
}
}
}
/// Given a filemap, produce a sequence of token-trees
pub fn filemap_to_stream(sess: &ParseSess, filemap: Lrc<FileMap>, override_span: Option<Span>)
-> TokenStream {
let mut srdr = lexer::StringReader::new(sess, filemap);
srdr.override_span = override_span;
srdr.real_token();
panictry!(srdr.parse_all_token_trees())
}
/// Given stream and the `ParseSess`, produce a parser
pub fn stream_to_parser(sess: &ParseSess, stream: TokenStream) -> Parser {
Parser::new(sess, stream, None, true, false)
}
/// Parse a string representing a character literal into its final form.
/// Rather than just accepting/rejecting a given literal, unescapes it as
/// well. Can take any slice prefixed by a character escape. Returns the
/// character and the number of characters consumed.
pub fn char_lit(lit: &str, diag: Option<(Span, &Handler)>) -> (char, isize) {
use std::char;
// Handle non-escaped chars first.
if lit.as_bytes()[0] != b'\\' {
// If the first byte isn't '\\' it might part of a multi-byte char, so
// get the char with chars().
let c = lit.chars().next().unwrap();
return (c, 1);
}
// Handle escaped chars.
match lit.as_bytes()[1] as char {
'"' => ('"', 2),
'n' => ('\n', 2),
'r' => ('\r', 2),
't' => ('\t', 2),
'\\' => ('\\', 2),
'\'' => ('\'', 2),
'0' => ('\0', 2),
'x' => {
let v = u32::from_str_radix(&lit[2..4], 16).unwrap();
let c = char::from_u32(v).unwrap();
(c, 4)
}
'u' => {
assert_eq!(lit.as_bytes()[2], b'{');
let idx = lit.find('}').unwrap();
let s = &lit[3..idx].chars().filter(|&c| c != '_').collect::<String>();
let v = u32::from_str_radix(&s, 16).unwrap();
let c = char::from_u32(v).unwrap_or_else(|| {
if let Some((span, diag)) = diag {
let mut diag = diag.struct_span_err(span, "invalid unicode character escape");
if v > 0x10FFFF {
diag.help("unicode escape must be at most 10FFFF").emit();
} else {
diag.help("unicode escape must not be a surrogate").emit();
}
}
'\u{FFFD}'
});
(c, (idx + 1) as isize)
}
_ => panic!("lexer should have rejected a bad character escape {}", lit)
}
}
pub fn escape_default(s: &str) -> String {
s.chars().map(char::escape_default).flat_map(|x| x).collect()
}
/// Parse a string representing a string literal into its final form. Does
/// unescaping.
pub fn str_lit(lit: &str, diag: Option<(Span, &Handler)>) -> String {
debug!("parse_str_lit: given {}", escape_default(lit));
let mut res = String::with_capacity(lit.len());
// FIXME #8372: This could be a for-loop if it didn't borrow the iterator
let error = |i| format!("lexer should have rejected {} at {}", lit, i);
/// Eat everything up to a non-whitespace
fn eat<'a>(it: &mut iter::Peekable<str::CharIndices<'a>>) {
loop {
match it.peek().map(|x| x.1) {
Some(' ') | Some('\n') | Some('\r') | Some('\t') => {
it.next();
},
_ => { break; }
}
}
}
let mut chars = lit.char_indices().peekable();
while let Some((i, c)) = chars.next() {
match c {
'\\' => {
let ch = chars.peek().unwrap_or_else(|| {
panic!("{}", error(i))
}).1;
if ch == '\n' {
eat(&mut chars);
} else if ch == '\r' {
chars.next();
let ch = chars.peek().unwrap_or_else(|| {
panic!("{}", error(i))
}).1;
if ch != '\n' {
panic!("lexer accepted bare CR");
}
eat(&mut chars);
} else {
// otherwise, a normal escape
let (c, n) = char_lit(&lit[i..], diag);
for _ in 0..n - 1 { // we don't need to move past the first \
chars.next();
}
res.push(c);
}
},
'\r' => {
let ch = chars.peek().unwrap_or_else(|| {
panic!("{}", error(i))
}).1;
if ch != '\n' {
panic!("lexer accepted bare CR");
}
chars.next();
res.push('\n');
}
c => res.push(c),
}
}
res.shrink_to_fit(); // probably not going to do anything, unless there was an escape.
debug!("parse_str_lit: returning {}", res);
res
}
/// Parse a string representing a raw string literal into its final form. The
/// only operation this does is convert embedded CRLF into a single LF.
pub fn raw_str_lit(lit: &str) -> String {
debug!("raw_str_lit: given {}", escape_default(lit));
let mut res = String::with_capacity(lit.len());
let mut chars = lit.chars().peekable();
while let Some(c) = chars.next() {
if c == '\r' {
if *chars.peek().unwrap() != '\n' {
panic!("lexer accepted bare CR");
}
chars.next();
res.push('\n');
} else {
res.push(c);
}
}
res.shrink_to_fit();
res
}
// check if `s` looks like i32 or u1234 etc.
fn looks_like_width_suffix(first_chars: &[char], s: &str) -> bool {
s.len() > 1 &&
first_chars.contains(&char_at(s, 0)) &&
s[1..].chars().all(|c| '0' <= c && c <= '9')
}
macro_rules! err {
($opt_diag:expr, |$span:ident, $diag:ident| $($body:tt)*) => {
match $opt_diag {
Some(($span, $diag)) => { $($body)* }
None => return None,
}
}
}
pub fn lit_token(lit: token::Lit, suf: Option<Symbol>, diag: Option<(Span, &Handler)>)
-> (bool /* suffix illegal? */, Option<ast::LitKind>) {
use ast::LitKind;
match lit {
token::Byte(i) => (true, Some(LitKind::Byte(byte_lit(&i.as_str()).0))),
token::Char(i) => (true, Some(LitKind::Char(char_lit(&i.as_str(), diag).0))),
// There are some valid suffixes for integer and float literals,
// so all the handling is done internally.
token::Integer(s) => (false, integer_lit(&s.as_str(), suf, diag)),
token::Float(s) => (false, float_lit(&s.as_str(), suf, diag)),
token::Str_(s) => {
let s = Symbol::intern(&str_lit(&s.as_str(), diag));
(true, Some(LitKind::Str(s, ast::StrStyle::Cooked)))
}
token::StrRaw(s, n) => {
let s = Symbol::intern(&raw_str_lit(&s.as_str()));
(true, Some(LitKind::Str(s, ast::StrStyle::Raw(n))))
}
token::ByteStr(i) => {
(true, Some(LitKind::ByteStr(byte_str_lit(&i.as_str()))))
}
token::ByteStrRaw(i, _) => {
(true, Some(LitKind::ByteStr(Lrc::new(i.to_string().into_bytes()))))
}
}
}
fn filtered_float_lit(data: Symbol, suffix: Option<Symbol>, diag: Option<(Span, &Handler)>)
-> Option<ast::LitKind> {
debug!("filtered_float_lit: {}, {:?}", data, suffix);
let suffix = match suffix {
Some(suffix) => suffix,
None => return Some(ast::LitKind::FloatUnsuffixed(data)),
};
Some(match &*suffix.as_str() {
"f32" => ast::LitKind::Float(data, ast::FloatTy::F32),
"f64" => ast::LitKind::Float(data, ast::FloatTy::F64),
suf => {
err!(diag, |span, diag| {
if suf.len() >= 2 && looks_like_width_suffix(&['f'], suf) {
// if it looks like a width, lets try to be helpful.
let msg = format!("invalid width `{}` for float literal", &suf[1..]);
diag.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit()
} else {
let msg = format!("invalid suffix `{}` for float literal", suf);
diag.struct_span_err(span, &msg)
.help("valid suffixes are `f32` and `f64`")
.emit();
}
});
ast::LitKind::FloatUnsuffixed(data)
}
})
}
pub fn float_lit(s: &str, suffix: Option<Symbol>, diag: Option<(Span, &Handler)>)
-> Option<ast::LitKind> {
debug!("float_lit: {:?}, {:?}", s, suffix);
// FIXME #2252: bounds checking float literals is deferred until trans
let s = s.chars().filter(|&c| c != '_').collect::<String>();
filtered_float_lit(Symbol::intern(&s), suffix, diag)
}
/// Parse a string representing a byte literal into its final form. Similar to `char_lit`
pub fn byte_lit(lit: &str) -> (u8, usize) {
let err = |i| format!("lexer accepted invalid byte literal {} step {}", lit, i);
if lit.len() == 1 {
(lit.as_bytes()[0], 1)
} else {
assert_eq!(lit.as_bytes()[0], b'\\', "{}", err(0));
let b = match lit.as_bytes()[1] {
b'"' => b'"',
b'n' => b'\n',
b'r' => b'\r',
b't' => b'\t',
b'\\' => b'\\',
b'\'' => b'\'',
b'0' => b'\0',
_ => {
match u64::from_str_radix(&lit[2..4], 16).ok() {
Some(c) =>
if c > 0xFF {
panic!(err(2))
} else {
return (c as u8, 4)
},
None => panic!(err(3))
}
}
};
(b, 2)
}
}
pub fn byte_str_lit(lit: &str) -> Lrc<Vec<u8>> {
let mut res = Vec::with_capacity(lit.len());
// FIXME #8372: This could be a for-loop if it didn't borrow the iterator
let error = |i| format!("lexer should have rejected {} at {}", lit, i);
/// Eat everything up to a non-whitespace
fn eat<I: Iterator<Item=(usize, u8)>>(it: &mut iter::Peekable<I>) {
loop {
match it.peek().map(|x| x.1) {
Some(b' ') | Some(b'\n') | Some(b'\r') | Some(b'\t') => {
it.next();
},
_ => { break; }
}
}
}
// byte string literals *must* be ASCII, but the escapes don't have to be
let mut chars = lit.bytes().enumerate().peekable();
loop {
match chars.next() {
Some((i, b'\\')) => {
let em = error(i);
match chars.peek().expect(&em).1 {
b'\n' => eat(&mut chars),
b'\r' => {
chars.next();
if chars.peek().expect(&em).1 != b'\n' {
panic!("lexer accepted bare CR");
}
eat(&mut chars);
}
_ => {
// otherwise, a normal escape
let (c, n) = byte_lit(&lit[i..]);
// we don't need to move past the first \
for _ in 0..n - 1 {
chars.next();
}
res.push(c);
}
}
},
Some((i, b'\r')) => {
let em = error(i);
if chars.peek().expect(&em).1 != b'\n' {
panic!("lexer accepted bare CR");
}
chars.next();
res.push(b'\n');
}
Some((_, c)) => res.push(c),
None => break,
}
}
Lrc::new(res)
}
pub fn integer_lit(s: &str, suffix: Option<Symbol>, diag: Option<(Span, &Handler)>)
-> Option<ast::LitKind> {
// s can only be ascii, byte indexing is fine
let s2 = s.chars().filter(|&c| c != '_').collect::<String>();
let mut s = &s2[..];
debug!("integer_lit: {}, {:?}", s, suffix);
let mut base = 10;
let orig = s;
let mut ty = ast::LitIntType::Unsuffixed;
if char_at(s, 0) == '0' && s.len() > 1 {
match char_at(s, 1) {
'x' => base = 16,
'o' => base = 8,
'b' => base = 2,
_ => { }
}
}
// 1f64 and 2f32 etc. are valid float literals.
if let Some(suf) = suffix {
if looks_like_width_suffix(&['f'], &suf.as_str()) {
let err = match base {
16 => Some("hexadecimal float literal is not supported"),
8 => Some("octal float literal is not supported"),
2 => Some("binary float literal is not supported"),
_ => None,
};
if let Some(err) = err {
err!(diag, |span, diag| diag.span_err(span, err));
}
return filtered_float_lit(Symbol::intern(s), Some(suf), diag)
}
}
if base != 10 {
s = &s[2..];
}
if let Some(suf) = suffix {
if suf.as_str().is_empty() {
err!(diag, |span, diag| diag.span_bug(span, "found empty literal suffix in Some"));
}
ty = match &*suf.as_str() {
"isize" => ast::LitIntType::Signed(ast::IntTy::Isize),
"i8" => ast::LitIntType::Signed(ast::IntTy::I8),
"i16" => ast::LitIntType::Signed(ast::IntTy::I16),
"i32" => ast::LitIntType::Signed(ast::IntTy::I32),
"i64" => ast::LitIntType::Signed(ast::IntTy::I64),
"i128" => ast::LitIntType::Signed(ast::IntTy::I128),
"usize" => ast::LitIntType::Unsigned(ast::UintTy::Usize),
"u8" => ast::LitIntType::Unsigned(ast::UintTy::U8),
"u16" => ast::LitIntType::Unsigned(ast::UintTy::U16),
"u32" => ast::LitIntType::Unsigned(ast::UintTy::U32),
"u64" => ast::LitIntType::Unsigned(ast::UintTy::U64),
"u128" => ast::LitIntType::Unsigned(ast::UintTy::U128),
suf => {
// i<digits> and u<digits> look like widths, so lets
// give an error message along those lines
err!(diag, |span, diag| {
if looks_like_width_suffix(&['i', 'u'], suf) {
let msg = format!("invalid width `{}` for integer literal", &suf[1..]);
diag.struct_span_err(span, &msg)
.help("valid widths are 8, 16, 32, 64 and 128")
.emit();
} else {
let msg = format!("invalid suffix `{}` for numeric literal", suf);
diag.struct_span_err(span, &msg)
.help("the suffix must be one of the integral types \
(`u32`, `isize`, etc)")
.emit();
}
});
ty
}
}
}
debug!("integer_lit: the type is {:?}, base {:?}, the new string is {:?}, the original \
string was {:?}, the original suffix was {:?}", ty, base, s, orig, suffix);
Some(match u128::from_str_radix(s, base) {
Ok(r) => ast::LitKind::Int(r, ty),
Err(_) => {
// small bases are lexed as if they were base 10, e.g, the string
// might be `0b10201`. This will cause the conversion above to fail,
// but these cases have errors in the lexer: we don't want to emit
// two errors, and we especially don't want to emit this error since
// it isn't necessarily true.
let already_errored = base < 10 &&
s.chars().any(|c| c.to_digit(10).map_or(false, |d| d >= base));
if !already_errored {
err!(diag, |span, diag| diag.span_err(span, "int literal is too large"));
}
ast::LitKind::Int(0, ty)
}
})
}
#[cfg(test)]
mod tests {
use super::*;
use syntax_pos::{self, Span, BytePos, Pos, NO_EXPANSION};
use codemap::{respan, Spanned};
use ast::{self, Ident, PatKind};
use abi::Abi;
use attr::first_attr_value_str_by_name;
use parse;
use parse::parser::Parser;
use print::pprust::item_to_string;
use ptr::P;
use tokenstream::{self, TokenTree};
use util::parser_testing::{string_to_stream, string_to_parser};
use util::parser_testing::{string_to_expr, string_to_item, string_to_stmt};
use util::ThinVec;
use with_globals;
// produce a syntax_pos::span
fn sp(a: u32, b: u32) -> Span {
Span::new(BytePos(a), BytePos(b), NO_EXPANSION)
}
fn str2seg(s: &str, lo: u32, hi: u32) -> ast::PathSegment {
ast::PathSegment::from_ident(Ident::from_str(s), sp(lo, hi))
}
#[test] fn path_exprs_1() {
with_globals(|| {
assert!(string_to_expr("a".to_string()) ==
P(ast::Expr{
id: ast::DUMMY_NODE_ID,
node: ast::ExprKind::Path(None, ast::Path {
span: sp(0, 1),
segments: vec![str2seg("a", 0, 1)],
}),
span: sp(0, 1),
attrs: ThinVec::new(),
}))
})
}
#[test] fn path_exprs_2 () {
with_globals(|| {
assert!(string_to_expr("::a::b".to_string()) ==
P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprKind::Path(None, ast::Path {
span: sp(0, 6),
segments: vec![ast::PathSegment::crate_root(sp(0, 0)),
str2seg("a", 2, 3),
str2seg("b", 5, 6)]
}),
span: sp(0, 6),
attrs: ThinVec::new(),
}))
})
}
#[should_panic]
#[test] fn bad_path_expr_1() {
with_globals(|| {
string_to_expr("::abc::def::return".to_string());
})
}
// check the token-tree-ization of macros
#[test]
fn string_to_tts_macro () {
with_globals(|| {
let tts: Vec<_> =
string_to_stream("macro_rules! zip (($a)=>($a))".to_string()).trees().collect();
let tts: &[TokenTree] = &tts[..];
match (tts.len(), tts.get(0), tts.get(1), tts.get(2), tts.get(3)) {
(
4,
Some(&TokenTree::Token(_, token::Ident(name_macro_rules))),
Some(&TokenTree::Token(_, token::Not)),
Some(&TokenTree::Token(_, token::Ident(name_zip))),
Some(&TokenTree::Delimited(_, ref macro_delimed)),
)
if name_macro_rules.name == "macro_rules"
&& name_zip.name == "zip" => {
let tts = ¯o_delimed.stream().trees().collect::<Vec<_>>();
match (tts.len(), tts.get(0), tts.get(1), tts.get(2)) {
(
3,
Some(&TokenTree::Delimited(_, ref first_delimed)),
Some(&TokenTree::Token(_, token::FatArrow)),
Some(&TokenTree::Delimited(_, ref second_delimed)),
)
if macro_delimed.delim == token::Paren => {
let tts = &first_delimed.stream().trees().collect::<Vec<_>>();
match (tts.len(), tts.get(0), tts.get(1)) {
(
2,
Some(&TokenTree::Token(_, token::Dollar)),
Some(&TokenTree::Token(_, token::Ident(ident))),
)
if first_delimed.delim == token::Paren && ident.name == "a" => {},
_ => panic!("value 3: {:?}", *first_delimed),
}
let tts = &second_delimed.stream().trees().collect::<Vec<_>>();
match (tts.len(), tts.get(0), tts.get(1)) {
(
2,
Some(&TokenTree::Token(_, token::Dollar)),
Some(&TokenTree::Token(_, token::Ident(ident))),
)
if second_delimed.delim == token::Paren
&& ident.name == "a" => {},
_ => panic!("value 4: {:?}", *second_delimed),
}
},
_ => panic!("value 2: {:?}", *macro_delimed),
}
},
_ => panic!("value: {:?}",tts),
}
})
}
#[test]
fn string_to_tts_1() {
with_globals(|| {
let tts = string_to_stream("fn a (b : i32) { b; }".to_string());
let expected = TokenStream::concat(vec![
TokenTree::Token(sp(0, 2), token::Ident(Ident::from_str("fn"))).into(),
TokenTree::Token(sp(3, 4), token::Ident(Ident::from_str("a"))).into(),
TokenTree::Delimited(
sp(5, 14),
tokenstream::Delimited {
delim: token::DelimToken::Paren,
tts: TokenStream::concat(vec![
TokenTree::Token(sp(6, 7), token::Ident(Ident::from_str("b"))).into(),
TokenTree::Token(sp(8, 9), token::Colon).into(),
TokenTree::Token(sp(10, 13),
token::Ident(Ident::from_str("i32"))).into(),
]).into(),
}).into(),
TokenTree::Delimited(
sp(15, 21),
tokenstream::Delimited {
delim: token::DelimToken::Brace,
tts: TokenStream::concat(vec![
TokenTree::Token(sp(17, 18), token::Ident(Ident::from_str("b"))).into(),
TokenTree::Token(sp(18, 19), token::Semi).into(),
]).into(),
}).into()
]);
assert_eq!(tts, expected);
})
}
#[test] fn ret_expr() {
with_globals(|| {
assert!(string_to_expr("return d".to_string()) ==
P(ast::Expr{
id: ast::DUMMY_NODE_ID,
node:ast::ExprKind::Ret(Some(P(ast::Expr{
id: ast::DUMMY_NODE_ID,
node:ast::ExprKind::Path(None, ast::Path{
span: sp(7, 8),
segments: vec![str2seg("d", 7, 8)],
}),
span:sp(7,8),
attrs: ThinVec::new(),
}))),
span:sp(0,8),
attrs: ThinVec::new(),
}))
})
}
#[test] fn parse_stmt_1 () {
with_globals(|| {
assert!(string_to_stmt("b;".to_string()) ==
Some(ast::Stmt {
node: ast::StmtKind::Expr(P(ast::Expr {
id: ast::DUMMY_NODE_ID,
node: ast::ExprKind::Path(None, ast::Path {
span:sp(0,1),
segments: vec![str2seg("b", 0, 1)],
}),
span: sp(0,1),
attrs: ThinVec::new()})),
id: ast::DUMMY_NODE_ID,
span: sp(0,1)}))
})
}
fn parser_done(p: Parser){
assert_eq!(p.token.clone(), token::Eof);
}
#[test] fn parse_ident_pat () {
with_globals(|| {
let sess = ParseSess::new(FilePathMapping::empty());
let mut parser = string_to_parser(&sess, "b".to_string());
assert!(panictry!(parser.parse_pat())
== P(ast::Pat{
id: ast::DUMMY_NODE_ID,
node: PatKind::Ident(ast::BindingMode::ByValue(ast::Mutability::Immutable),
Spanned{ span:sp(0, 1),
node: Ident::from_str("b")
},
None),
span: sp(0,1)}));
parser_done(parser);
})
}
// check the contents of the tt manually:
#[test] fn parse_fundecl () {
with_globals(|| {
// this test depends on the intern order of "fn" and "i32"
let item = string_to_item("fn a (b : i32) { b; }".to_string()).map(|m| {
m.map(|mut m| {
m.tokens = None;
m
})
});
assert_eq!(item,
Some(
P(ast::Item{ident:Ident::from_str("a"),
attrs:Vec::new(),
id: ast::DUMMY_NODE_ID,
tokens: None,
node: ast::ItemKind::Fn(P(ast::FnDecl {
inputs: vec![ast::Arg{
ty: P(ast::Ty{id: ast::DUMMY_NODE_ID,
node: ast::TyKind::Path(None, ast::Path{
span:sp(10,13),
segments: vec![str2seg("i32", 10, 13)],
}),
span:sp(10,13)
}),
pat: P(ast::Pat {
id: ast::DUMMY_NODE_ID,
node: PatKind::Ident(
ast::BindingMode::ByValue(
ast::Mutability::Immutable),
Spanned{
span: sp(6,7),
node: Ident::from_str("b")},
None
),
span: sp(6,7)
}),
id: ast::DUMMY_NODE_ID
}],
output: ast::FunctionRetTy::Default(sp(15, 15)),
variadic: false
}),
ast::Unsafety::Normal,
Spanned {
span: sp(0,2),
node: ast::Constness::NotConst,
},
Abi::Rust,
ast::Generics{
params: Vec::new(),
where_clause: ast::WhereClause {
id: ast::DUMMY_NODE_ID,
predicates: Vec::new(),
span: syntax_pos::DUMMY_SP,
},
span: syntax_pos::DUMMY_SP,
},
P(ast::Block {
stmts: vec![ast::Stmt {
node: ast::StmtKind::Semi(P(ast::Expr{
id: ast::DUMMY_NODE_ID,
node: ast::ExprKind::Path(None,
ast::Path{
span:sp(17,18),
segments: vec![str2seg("b", 17, 18)],
}),
span: sp(17,18),
attrs: ThinVec::new()})),
id: ast::DUMMY_NODE_ID,
span: sp(17,19)}],
id: ast::DUMMY_NODE_ID,
rules: ast::BlockCheckMode::Default, // no idea
span: sp(15,21),
recovered: false,
})),
vis: respan(sp(0, 0), ast::VisibilityKind::Inherited),
span: sp(0,21)})));
})
}
#[test] fn parse_use() {
with_globals(|| {
let use_s = "use foo::bar::baz;";
let vitem = string_to_item(use_s.to_string()).unwrap();
let vitem_s = item_to_string(&vitem);
assert_eq!(&vitem_s[..], use_s);
let use_s = "use foo::bar as baz;";
let vitem = string_to_item(use_s.to_string()).unwrap();
let vitem_s = item_to_string(&vitem);
assert_eq!(&vitem_s[..], use_s);
})
}
#[test] fn parse_extern_crate() {
with_globals(|| {
let ex_s = "extern crate foo;";
let vitem = string_to_item(ex_s.to_string()).unwrap();
let vitem_s = item_to_string(&vitem);
assert_eq!(&vitem_s[..], ex_s);
let ex_s = "extern crate foo as bar;";
let vitem = string_to_item(ex_s.to_string()).unwrap();
let vitem_s = item_to_string(&vitem);
assert_eq!(&vitem_s[..], ex_s);
})
}
fn get_spans_of_pat_idents(src: &str) -> Vec<Span> {
let item = string_to_item(src.to_string()).unwrap();
struct PatIdentVisitor {
spans: Vec<Span>
}
impl<'a> ::visit::Visitor<'a> for PatIdentVisitor {
fn visit_pat(&mut self, p: &'a ast::Pat) {
match p.node {
PatKind::Ident(_ , ref spannedident, _) => {
self.spans.push(spannedident.span.clone());
}
_ => {
::visit::walk_pat(self, p);
}
}
}
}
let mut v = PatIdentVisitor { spans: Vec::new() };
::visit::walk_item(&mut v, &item);
return v.spans;
}
#[test] fn span_of_self_arg_pat_idents_are_correct() {
with_globals(|| {
let srcs = ["impl z { fn a (&self, &myarg: i32) {} }",
"impl z { fn a (&mut self, &myarg: i32) {} }",
"impl z { fn a (&'a self, &myarg: i32) {} }",
"impl z { fn a (self, &myarg: i32) {} }",
"impl z { fn a (self: Foo, &myarg: i32) {} }",
];
for &src in &srcs {
let spans = get_spans_of_pat_idents(src);
let (lo, hi) = (spans[0].lo(), spans[0].hi());
assert!("self" == &src[lo.to_usize()..hi.to_usize()],
"\"{}\" != \"self\". src=\"{}\"",
&src[lo.to_usize()..hi.to_usize()], src)
}
})
}
#[test] fn parse_exprs () {
with_globals(|| {
// just make sure that they parse....
string_to_expr("3 + 4".to_string());
string_to_expr("a::z.froob(b,&(987+3))".to_string());
})
}
#[test] fn attrs_fix_bug () {
with_globals(|| {
string_to_item("pub fn mk_file_writer(path: &Path, flags: &[FileFlag])
-> Result<Box<Writer>, String> {
#[cfg(windows)]
fn wb() -> c_int {
(O_WRONLY | libc::consts::os::extra::O_BINARY) as c_int
}
#[cfg(unix)]
fn wb() -> c_int { O_WRONLY as c_int }
let mut fflags: c_int = wb();
}".to_string());
})
}
#[test] fn crlf_doc_comments() {
with_globals(|| {
let sess = ParseSess::new(FilePathMapping::empty());
let name = FileName::Custom("source".to_string());
let source = "/// doc comment\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name.clone(), source, &sess)
.unwrap().unwrap();
let doc = first_attr_value_str_by_name(&item.attrs, "doc").unwrap();
assert_eq!(doc, "/// doc comment");
let source = "/// doc comment\r\n/// line 2\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name.clone(), source, &sess)
.unwrap().unwrap();
let docs = item.attrs.iter().filter(|a| a.path == "doc")
.map(|a| a.value_str().unwrap().to_string()).collect::<Vec<_>>();
let b: &[_] = &["/// doc comment".to_string(), "/// line 2".to_string()];
assert_eq!(&docs[..], b);
let source = "/** doc comment\r\n * with CRLF */\r\nfn foo() {}".to_string();
let item = parse_item_from_source_str(name, source, &sess).unwrap().unwrap();
let doc = first_attr_value_str_by_name(&item.attrs, "doc").unwrap();
assert_eq!(doc, "/** doc comment\n * with CRLF */");
});
}
#[test]
fn ttdelim_span() {
with_globals(|| {
let sess = ParseSess::new(FilePathMapping::empty());
let expr = parse::parse_expr_from_source_str(PathBuf::from("foo").into(),
"foo!( fn main() { body } )".to_string(), &sess).unwrap();
let tts: Vec<_> = match expr.node {
ast::ExprKind::Mac(ref mac) => mac.node.stream().trees().collect(),
_ => panic!("not a macro"),
};
let span = tts.iter().rev().next().unwrap().span();
match sess.codemap().span_to_snippet(span) {
Ok(s) => assert_eq!(&s[..], "{ body }"),
Err(_) => panic!("could not get snippet"),
}
});
}
// This tests that when parsing a string (rather than a file) we don't try
// and read in a file for a module declaration and just parse a stub.
// See `recurse_into_file_modules` in the parser.
#[test]
fn out_of_line_mod() {
with_globals(|| {
let sess = ParseSess::new(FilePathMapping::empty());
let item = parse_item_from_source_str(
PathBuf::from("foo").into(),
"mod foo { struct S; mod this_does_not_exist; }".to_owned(),
&sess,
).unwrap().unwrap();
if let ast::ItemKind::Mod(ref m) = item.node {
assert!(m.items.len() == 2);
} else {
panic!();
}
});
}
}
| {
let end_pos = filemap.end_pos;
let mut parser = stream_to_parser(sess, filemap_to_stream(sess, filemap, None));
if parser.token == token::Eof && parser.span == syntax_pos::DUMMY_SP {
parser.span = Span::new(end_pos, end_pos, NO_EXPANSION);
}
parser
} |
utility.ts | import {spawnSync} from "child_process";
import fs from "fs";
import path from "path";
import glob from "glob";
import console from "./log";
export function replace_all(str: string, search: string, replacement: string): string {
return str.split(search).join(replacement);
}
export function read_text(filepath: string): string {
return fs.readFileSync(filepath, "utf8");
}
export function write_text(filepath: string, text: string): void {
fs.writeFileSync(filepath, text, "utf8");
}
export function replace_in_file(filepath: string, dict: { [s: string]: string }) {
let text = read_text(filepath);
for (const [k, v] of Object.entries(dict)) {
text = replace_all(text, k, v);
}
write_text(filepath, text);
}
export function search_files(pattern: string, search_path: string, out_files_list: string[]) {
const files = glob.sync(pattern, {
cwd: search_path
});
for (let file of files) {
out_files_list.push(path.join(search_path, file));
}
}
export function copyFolderRecursiveSync(source: string, target: string) {
makeDirs(target);
//copy
if (fs.lstatSync(source).isDirectory()) {
fs.readdirSync(source).forEach(function (file) {
const curSource = path.join(source, file);
if (fs.lstatSync(curSource).isDirectory()) {
copyFolderRecursiveSync(curSource, path.join(target, file));
} else {
fs.copyFileSync(curSource, path.join(target, file));
}
});
}
}
export function deleteFolderRecursive(p: string) {
if (fs.existsSync(p)) {
fs.readdirSync(p).forEach(function (file, index) {
const curPath = p + "/" + file;
if (fs.lstatSync(curPath).isDirectory()) { // recurse
deleteFolderRecursive(curPath);
} else { // delete file
fs.unlinkSync(curPath);
}
});
fs.rmdirSync(p);
}
}
export function copyFile(src: string, dest: string) {
fs.copyFileSync(src, dest);
}
export function isFile(p: string) {
return fs.existsSync(p) && fs.lstatSync(p).isFile();
}
export function isDir(p: string) {
return fs.existsSync(p) && fs.lstatSync(p).isDirectory();
}
export function makeDirs(p: string) {
if (!isDir(p)) { | }
}
export function execute(cmd: string, args: string[] = [], wd: undefined | string = undefined) {
console.debug(">> " + [cmd].concat(args).join(" "));
const cwd = wd ?? process.cwd();
console.debug(`CWD: ${cwd}`);
const child = spawnSync(cmd, args, {
stdio: 'pipe',
encoding: 'utf-8',
cwd: cwd
}
);
console.log("stderr", child.stderr ? child.stderr.toString() : null);
console.log("stdout", child.stdout ? child.stdout.toString() : null);
console.log("exit code", child.status);
if (child.error) {
console.error(child.error);
}
return child.status;
} | fs.mkdirSync(p, {recursive: true}); |
roman_numerals.py | """ A program that is able to convert a number between 1 and 4999 (inclusive)
to a roman numeral with variables, arithmetic operators, and functions."""
def main():
# Input the number
|
if __name__ == '__main__':
main()
| number = roman_num = int(input('Enter number:'))
# The quotient gotten from 'number // 1000' is the number of 'M'
# in the roman numeral system.
num1 = number // 1000
roman_num1 = 'M' * num1
number = number - num1 * 1000
# The quotient gotten from 'number // 500' is
# the number of 'D' in the roman numeral system.
num2 = number // 500
roman_num2 = roman_num1 + 'D' * num2
number = number - num2 * 500
# The quotient gotten from 'number // 100' is
# the number of 'C' in the roman numeral system.
num3 = number // 100
roman_num3 = roman_num2 + 'C' * num3
number = number - num3 * 100
# The quotient gotten from 'number // 50' is the number of 'L'
# in the roman numeral system.
num4 = number // 50
roman_num4 = roman_num3 + 'L' * num4
number = number - num4 * 50
# The similar reason that 'num5','num6','nums7' is the number of
# 'X','V' and 'I' in the roman numeral system.
num5 = number // 10
roman_num5 = roman_num4 + 'X' * num5
number = number - 10 * num5
num6 = number // 5
roman_num6 = roman_num5 + 'V' * num6
number = number - num6 * 5
num7 = number // 1
roman_num7 = roman_num6 + 'I' * num7
# Print the final string using '+' to connect the
# number of 'M','D','C','L','X','V',I'.
print(roman_num, 'is', roman_num7) |
utils.go | // Copyright 2019 MSolution.IO
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package instanceCount
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/json"
"time"
"github.com/trackit/jsonlog"
taws "github.com/trackit/trackit/aws"
"github.com/trackit/trackit/aws/usageReports"
"github.com/trackit/trackit/es"
)
type (
// InstanceCount is saved in ES to have all the information of an InstanceCount
InstanceCountReport struct {
utils.ReportBase
InstanceCount InstanceCount `json:"instanceCount"`
}
// InstanceCount contains all the information of an InstanceCount
InstanceCount struct {
Type string `json:"instanceType"`
Region string `json:"region"`
Hours []InstanceCountHours `json:"hours"`
}
InstanceCountHours struct {
Hour time.Time `json:"hour"`
Count float64 `json:"count"`
}
)
// importInstanceCountsToEs imports Instance Count in ElasticSearch.
// It calls createIndexEs if the index doesn't exist.
func | (ctx context.Context, aa taws.AwsAccount, reports []InstanceCountReport) error {
logger := jsonlog.LoggerFromContextOrDefault(ctx)
logger.Info("Updating InstanceCount for AWS account.", map[string]interface{}{
"awsAccount": aa,
})
index := es.IndexNameForUserId(aa.UserId, IndexPrefixInstanceCountReport)
bp, err := utils.GetBulkProcessor(ctx)
if err != nil {
logger.Error("Failed to get bulk processor.", err.Error())
return err
}
for _, report := range reports {
id, err := generateId(report)
if err != nil {
logger.Error("Error when marshaling instanceCount var", err.Error())
return err
}
bp = utils.AddDocToBulkProcessor(bp, report, TypeInstanceCountReport, index, id)
}
bp.Flush()
err = bp.Close()
if err != nil {
logger.Error("Fail to put InstanceCount in ES", err.Error())
return err
}
logger.Info("InstanceCount put in ES", nil)
return nil
}
func generateId(report InstanceCountReport) (string, error) {
ji, err := json.Marshal(struct {
Account string `json:"account"`
ReportDate time.Time `json:"reportDate"`
Type string `json:"type"`
Region string `json:"region"`
ReportType string `json:"reportType"`
}{
report.Account,
report.ReportDate,
report.InstanceCount.Type,
report.InstanceCount.Region,
report.ReportType,
})
if err != nil {
return "", err
}
hash := md5.Sum(ji)
hash64 := base64.URLEncoding.EncodeToString(hash[:])
return hash64, nil
}
| importInstanceCountToEs |
version.go | /*
* @Descripation: 版本号
* @Date: 2021-11-03 16:03:06
*/
package srt
import (
"strconv"
"strings"
)
/**
* @description: 组件依赖版本号
*/
type Version struct {
Org string `json:"org"`
Nums []int `json:"nums,omitempty"`
Suffix string `json:"suffix,omitempty"`
}
/**
* @description: 获取当前版本的后缀权重
* @return {int} 后缀权重,权重越大版本越高
*/
func (ver *Version) weight() (weight int) {
if len(ver.Suffix) > 0 {
// 后缀权重
suffixs := map[string]int{"alpha": 1, "beta": 2, "milestone": 3, "rc": 4, "cr": 4, "snapshot": 5, "release": 6, "final": 6, "ga": 6, "sp": 7}
if w, ok := suffixs[ver.Suffix]; ok {
// 后缀在后缀列表中取对应后缀权重
weight = w
} else {
// 后缀不在后缀列表中权重为5
weight = 5
}
} else {
// 不存在后缀权重为4
weight = 6
}
return weight
}
/**
* @description: 解析版本字符串
* @param {string} verStr 版本字符串
* @return {*Version} *Version结构
*/
func NewVersion(verStr string) *Version {
verStr = strings.TrimSpace(verStr)
ver := &Version{Nums: []int{}, Org: verStr}
verStr = strings.TrimLeft(verStr, "vV^")
// 获取后缀
index := strings.Index(verStr, "-")
if index != -1 {
ver.Suffix = verStr[index+1:]
verStr = verStr[:index]
}
// 解析版本号
tags := strings.Split(verStr, ".")
for i, numStr := range tags {
if num, err := strconv.Atoi(numStr); err == nil {
ver.Nums = append(ver.Nums, num)
} else {
ver.Suffix = strings.Join(tags[i:], ".")
break
}
}
// 去除结尾零值
for len(ver.Nums) > 1 {
length := len(ver.Nums)
if ver.Nums[length-1] == 0 {
ver.Nums = ver.Nums[:length-1]
} else {
break
}
}
return ver
}
/**
* @description: 判断是否严格小于另一个版本号
* @param {*Version} other 另一个版本号
* @return {bool} 当前版本小于另一个版本号返回true
*/
func (ver *Version) Less(other *Version) bool {
length := len(ver.Nums)
if length > len(other.Nums) {
length = len(other.Nums)
}
// 比较数字大小
for i := 0; i < length; i++ {
if ver.Nums[i] < other.Nums[i] {
return true
} else if ver.Nums[i] > other.Nums[i] {
return false
}
}
// 数字多时查看是否有非零值
if len(ver.Nums) < len(other.Nums) {
for i := len(other.Nums) - 1; i >= len(ver.Nums); i-- {
if other.Nums[i] != 0 {
return true
}
}
}
// 比较后缀
vw, ow := ver.weight(), other.weight()
if vw == ow {
return ver.Suffix < other.Suffix
} else {
return vw < ow
}
}
/**
* @description: 判断是否等于另一个版本号
* @param {*Version} other 另一个版本号
* @return {bool} 两个版本号相等返回true
*/
func (ver *Version) Equal(other *Version) bool {
if len(ver.Nums) != len(other.Nums) {
return false
}
// 比较数字大小
for i, n := range ver.Nums {
if other.Nums[i] != n {
return false
}
}
// 比较后缀
vw, ow := ver.weight(), other.weight()
return vw == ow
}
/**
* @description: 判断一个版本是否在一个版本区间内
* @param {*Version} ver 要判断的版本
* @param {string} interval 版本区间
* @return {bool} 在版本区间内返回true
*/
func InRangeInterval(ver *Version, interval string) bool {
// 当前版本
// 遍历所有区间
for _, interval := range strings.Split(interval, "||") {
if len(interval) < 2 {
continue
}
// 判断左边界是否为闭
left := interval[0] == '['
// 判断右边界是否为闭
right := interval[len(interval)-1] == ']'
// 逗号所在位置
index := strings.Index(interval, ",")
if index == -1 {
return false
}
// 区间左值
leftValue := NewVersion(interval[1:index])
// 区间右值
rightValue := NewVersion(interval[index+1 : len(interval)-1])
// 判断是否在区间边界
if (left && ver.Equal(leftValue)) || (right && ver.Equal(rightValue)) {
return true
}
// 判断是否在区间内部
// 大于左值并(右值为空或小于右值)
// leftValue < version && ( isempty(rightValue) || version < rightValue )
if leftValue.Less(ver) && (len(rightValue.Nums) == 0 || ver.Less(rightValue)) {
return true
}
}
// 不在任何一个区间内则返回false
return false
}
/**
* @des | * @return {bool} 合法版本号返回true
*/
func (v *Version) Ok() bool {
return !strings.Contains(v.Org, "$") && len(v.Nums) > 0
}
| cription: 检测是否为合法版本号
|
RP1DClustering.py | import numpy as np
from .PCASmallestEig import pca_smallest_eig, pca_smallest_eig_powermethod
from .Withness import withness
from .CalculateAngle import get_angle
#RP1D clustering from
#Han, Sangchun, and Mireille Boutin. "The hidden structure of image datasets." 2015 IEEE International Conference on Image Processing (ICIP). IEEE, 2015.
############################################
def ClusteringMeanRP1D(P,N,T,A=0,UsePCA=True,UsePower=False):
n = N.shape[0]
d = N.shape[1]
v = np.random.rand(T,d)
#u = np.mean(N,axis=0)
if UsePower:
N1 = pca_smallest_eig_powermethod(N, center=False)
N1 = np.reshape(N1,(3,))
else:
N1 = pca_smallest_eig(N, center=False)
N2 = np.sum(N,axis=0)
v = np.cross(N1,N2)
v = v/np.linalg.norm(v)
m = np.mean(P,axis=0)
dist = np.sqrt(np.sum((P - m)**2,axis=1))
i = np.argmin(dist)
radius = np.max(dist)
D = (P - P[i,:])/radius
#The A=2 is just hand tuned. Larger A encourages the clustering to split the patch in half
#A=0 is the previous version of the virtual goniometer
x = np.sum(v*N,axis=1) + A*np.sum(v*D,axis=1)
#Clustering
_, m = withness(x)
C = np.zeros(n,)
C[x>m] = 1
C[x<=m] = 2
P1 = P[C==1,:]
P2 = P[C==2,:]
N1 = N[C==1,:]
N2 = N[C==2,:]
theta, n1, n2 = get_angle(P1,P2,N1,N2,UsePCA = UsePCA, UsePower = UsePower)
return C,n1,n2,theta
def ClusteringRandomRP1D(X,T):
| n = X.shape[0]
d = X.shape[1]
v = np.random.rand(T,d)
u = np.mean(X,axis=0)
wmin = float("inf")
imin = 0
#w_list = []
#m_list = []
for i in range(T):
x = np.sum((v[i,:]-(np.dot(v[i,:],u)/np.dot(v[i,:],v[i,:]))*u)*X,axis=1)
w,m = withness(x)
if w < wmin:
wmin = w
imin = i
x = np.sum((v[imin,:]-(np.dot(v[imin,:],u)/np.dot(v[imin,:],v[imin,:]))*u)*X,axis=1)
_,m = withness(x)
C = np.zeros(n,)
C[x>m] = 1
C[x<=m] = 2
return C |
|
split_fragnet_candidates.py | #!/usr/bin/env python
# Copyright 2020 Informatics Matters Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse, os, sys, json, traceback
from pipelines_utils import utils
from pipelines_utils import utils
def gen_filename(id, generate_filenames):
if generate_filenames:
return str(count)
else:
return id
def execute(candidates_json, generate_filenames):
with open(candidates_json, 'r') as f:
candidates = json.load(f)
queries = candidates['queries']['molecules']
results = candidates['results']
hitCounts = candidates['hitCounts']
utils.log('Processing', len(queries), 'queries and', len(results), 'results')
num_mols = 0
num_hits = 0
count = 0
ids2Filenames = {}
for query in queries:
id = query['id']
if id in hitCounts:
molfile = query['originalMol']
if generate_filenames:
fname = str(count).zfil(3)
else:
fname = id
utils.log('Using file name of', fname)
with open(fname + '.mol', 'w') as f:
f.write(molfile)
num_hits += 1
ids2Filenames[id] = fname
count += 1
writers = {}
for result in results:
num_mols += 1
for id in result['sourceMols']:
if id in writers:
writer = writers[id]
else:
fname = ids2Filenames[id]
writer = open(fname + '.smi', 'w')
writers[id] = writer
smiles = result['smiles']
#utils.log('Processing', smiles)
writer.write(smiles + '\n')
for w in writers.values():
w.close()
utils.log('Totals - hits:', num_hits, 'outputs:', num_mols)
def main():
|
if __name__ == "__main__":
main()
| """
Example usage:
python -m pipelines.xchem.split-fragnet-candidates -i ../../data/mpro/expanded-17.json
:return:
"""
parser = argparse.ArgumentParser(description='Split fragnet candidates - Split fragment network expansion into individual sets')
parser.add_argument('-i', '--input', help='JSON containing the expanded candidates)')
parser.add_argument('-g', '--generate-filenames', action='store_true', help='Use automatically generated file names instead of the title field)')
args = parser.parse_args()
utils.log("Split fragnet candidates args: ", args)
infile = args.input
execute(infile, args.generate_filenames) |
interface.go | /*
Copyright The Kubernetes Authors.
Copyright 2020 Authors of Arktos - file modified.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// CronJobs returns a CronJobInformer.
CronJobs() CronJobInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
tenant string
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func | (f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, tenant: "system", namespace: namespace, tweakListOptions: tweakListOptions}
}
func NewWithMultiTenancy(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc, tenant string) Interface {
return &version{factory: f, tenant: tenant, namespace: namespace, tweakListOptions: tweakListOptions}
}
// CronJobs returns a CronJobInformer.
func (v *version) CronJobs() CronJobInformer {
return &cronJobInformer{factory: v.factory, namespace: v.namespace, tenant: v.tenant, tweakListOptions: v.tweakListOptions}
}
| New |
game.py | import numpy as np
from copy import copy
from rlcard.games.leducholdem.dealer import LeducholdemDealer as Dealer
from rlcard.games.leducholdem.player import LeducholdemPlayer as Player
from rlcard.games.leducholdem.judger import LeducholdemJudger as Judger
from rlcard.games.leducholdem.round import LeducholdemRound as Round
from rlcard.games.limitholdem.game import LimitholdemGame
class | (LimitholdemGame):
def __init__(self, allow_step_back=False):
''' Initialize the class leducholdem Game
'''
self.allow_step_back = allow_step_back
''' No big/small blind
# Some configarations of the game
# These arguments are fixed in Leduc Hold'em Game
# Raise amount and allowed times
self.raise_amount = 2
self.allowed_raise_num = 2
self.num_players = 2
'''
# Some configarations of the game
# These arguments can be specified for creating new games
# Small blind and big blind
self.small_blind = 1
self.big_blind = 2 * self.small_blind
# Raise amount and allowed times
self.raise_amount = self.big_blind
self.allowed_raise_num = 2
self.num_players = 2
def init_game(self):
''' Initialilze the game of Limit Texas Hold'em
This version supports two-player limit texas hold'em
Returns:
(tuple): Tuple containing:
(dict): The first state of the game
(int): Current player's id
'''
# Initilize a dealer that can deal cards
self.dealer = Dealer()
# Initilize two players to play the game
self.players = [Player(i) for i in range(self.num_players)]
# Initialize a judger class which will decide who wins in the end
self.judger = Judger()
# Prepare for the first round
for i in range(self.num_players):
self.players[i].hand = self.dealer.deal_card()
# Randomly choose a small blind and a big blind
s = np.random.randint(0, self.num_players)
b = (s + 1) % self.num_players
self.players[b].in_chips = self.big_blind
self.players[s].in_chips = self.small_blind
self.public_card = None
# The player with small blind plays the first
self.game_pointer = s
# Initilize a bidding round, in the first round, the big blind and the small blind needs to
# be passed to the round for processing.
self.round = Round(raise_amount=self.raise_amount,
allowed_raise_num=self.allowed_raise_num,
num_players=self.num_players)
self.round.start_new_round(game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players])
# Count the round. There are 2 rounds in each game.
self.round_counter = 0
# Save the hisory for stepping back to the last state.
self.history = []
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def step(self, action):
''' Get the next state
Args:
action (str): a specific action. (call, raise, fold, or check)
Returns:
(tuple): Tuple containing:
(dict): next player's state
(int): next plater's id
'''
if self.allow_step_back:
# First snapshot the current state
r = copy(self.round)
r_raised = copy(self.round.raised)
gp = self.game_pointer
r_c = self.round_counter
d_deck = copy(self.dealer.deck)
p = copy(self.public_card)
ps = [copy(self.players[i]) for i in range(self.num_players)]
ps_hand = [copy(self.players[i].hand) for i in range(self.num_players)]
self.history.append((r, r_raised, gp, r_c, d_deck, p, ps, ps_hand))
# Then we proceed to the next round
self.game_pointer = self.round.proceed_round(self.players, action)
# If a round is over, we deal more public cards
if self.round.is_over():
# For the first round, we deal 1 card as public card. Double the raise amount for the second round
if self.round_counter == 0:
self.public_card = self.dealer.deal_card()
self.round.raise_amount = 2 * self.raise_amount
self.round_counter += 1
self.round.start_new_round(self.game_pointer)
state = self.get_state(self.game_pointer)
return state, self.game_pointer
def get_state(self, player):
''' Return player's state
Args:
player_id (int): player id
Returns:
(dict): The state of the player
'''
chips = [self.players[i].in_chips for i in range(self.num_players)]
legal_actions = self.get_legal_actions()
state = self.players[player].get_state(self.public_card, chips, legal_actions)
state['current_player'] = self.game_pointer
return state
def is_over(self):
''' Check if the game is over
Returns:
(boolean): True if the game is over
'''
alive_players = [1 if p.status=='alive' else 0 for p in self.players]
# If only one player is alive, the game is over.
if sum(alive_players) == 1:
return True
# If all rounds are finshed
if self.round_counter >= 2:
return True
return False
def get_payoffs(self):
''' Return the payoffs of the game
Returns:
(list): Each entry corresponds to the payoff of one player
'''
chips_payoffs = self.judger.judge_game(self.players, self.public_card)
payoffs = np.array(chips_payoffs) / (self.big_blind)
return payoffs
def step_back(self):
''' Return to the previous state of the game
Returns:
(bool): True if the game steps back successfully
'''
if len(self.history) > 0:
self.round, r_raised, self.game_pointer, self.round_counter, d_deck, self.public_card, self.players, ps_hand = self.history.pop()
self.round.raised = r_raised
self.dealer.deck = d_deck
for i, hand in enumerate(ps_hand):
self.players[i].hand = hand
return True
return False
# Test the game
#if __name__ == "__main__":
# game = LeducholdemGame(allow_step_back=True)
# while True:
# print('New Game')
# state, game_pointer = game.init_game()
# print(game_pointer, state)
# i = 1
# while not game.is_over():
# i += 1
# legal_actions = game.get_legal_actions()
# if i == 4:
# print('Step back')
# print(game.step_back())
# game_pointer = game.get_player_id()
# print(game_pointer)
# state = game.get_state(game_pointer)
# legal_actions = game.get_legal_actions()
# # action = input()
# action = np.random.choice(legal_actions)
# print(game_pointer, action, legal_actions, state)
# state, game_pointer = game.step(action)
# print(game_pointer, state)
#
# print(game.get_payoffs())
| LeducholdemGame |
clk_peri_ctrl.rs | #[doc = "Reader of register CLK_PERI_CTRL"]
pub type R = crate::R<u32, super::CLK_PERI_CTRL>;
#[doc = "Writer for register CLK_PERI_CTRL"]
pub type W = crate::W<u32, super::CLK_PERI_CTRL>;
#[doc = "Register CLK_PERI_CTRL `reset()`'s with value 0"]
impl crate::ResetValue for super::CLK_PERI_CTRL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `ENABLE`"]
pub type ENABLE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ENABLE`"]
pub struct ENABLE_W<'a> {
w: &'a mut W,
}
impl<'a> ENABLE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `KILL`"]
pub type KILL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `KILL`"]
pub struct KILL_W<'a> {
w: &'a mut W,
}
impl<'a> KILL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Selects the auxiliary clock source, will glitch when switching\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum AUXSRC_A {
#[doc = "0: `0`"]
CLK_SYS = 0,
#[doc = "1: `1`"]
CLKSRC_PLL_SYS = 1,
#[doc = "2: `10`"]
CLKSRC_PLL_USB = 2,
#[doc = "3: `11`"]
ROSC_CLKSRC_PH = 3,
#[doc = "4: `100`"]
XOSC_CLKSRC = 4,
#[doc = "5: `101`"]
CLKSRC_GPIN0 = 5,
#[doc = "6: `110`"]
CLKSRC_GPIN1 = 6,
}
impl From<AUXSRC_A> for u8 {
#[inline(always)]
fn from(variant: AUXSRC_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `AUXSRC`"]
pub type AUXSRC_R = crate::R<u8, AUXSRC_A>;
impl AUXSRC_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, AUXSRC_A> {
use crate::Variant::*;
match self.bits {
0 => Val(AUXSRC_A::CLK_SYS),
1 => Val(AUXSRC_A::CLKSRC_PLL_SYS),
2 => Val(AUXSRC_A::CLKSRC_PLL_USB),
3 => Val(AUXSRC_A::ROSC_CLKSRC_PH),
4 => Val(AUXSRC_A::XOSC_CLKSRC),
5 => Val(AUXSRC_A::CLKSRC_GPIN0),
6 => Val(AUXSRC_A::CLKSRC_GPIN1),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `CLK_SYS`"]
#[inline(always)]
pub fn is_clk_sys(&self) -> bool {
*self == AUXSRC_A::CLK_SYS
}
#[doc = "Checks if the value of the field is `CLKSRC_PLL_SYS`"]
#[inline(always)]
pub fn is_clksrc_pll_sys(&self) -> bool {
*self == AUXSRC_A::CLKSRC_PLL_SYS
}
#[doc = "Checks if the value of the field is `CLKSRC_PLL_USB`"]
#[inline(always)]
pub fn is_clksrc_pll_usb(&self) -> bool {
*self == AUXSRC_A::CLKSRC_PLL_USB
}
#[doc = "Checks if the value of the field is `ROSC_CLKSRC_PH`"]
#[inline(always)]
pub fn is_rosc_clksrc_ph(&self) -> bool {
*self == AUXSRC_A::ROSC_CLKSRC_PH
}
#[doc = "Checks if the value of the field is `XOSC_CLKSRC`"]
#[inline(always)]
pub fn is_xosc_clksrc(&self) -> bool {
*self == AUXSRC_A::XOSC_CLKSRC
}
#[doc = "Checks if the value of the field is `CLKSRC_GPIN0`"]
#[inline(always)]
pub fn is_clksrc_gpin0(&self) -> bool {
*self == AUXSRC_A::CLKSRC_GPIN0
}
#[doc = "Checks if the value of the field is `CLKSRC_GPIN1`"]
#[inline(always)]
pub fn is_clksrc_gpin1(&self) -> bool {
*self == AUXSRC_A::CLKSRC_GPIN1
}
}
#[doc = "Write proxy for field `AUXSRC`"]
pub struct AUXSRC_W<'a> {
w: &'a mut W,
}
impl<'a> AUXSRC_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: AUXSRC_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "`0`"]
#[inline(always)]
pub fn clk_sys(self) -> &'a mut W |
#[doc = "`1`"]
#[inline(always)]
pub fn clksrc_pll_sys(self) -> &'a mut W {
self.variant(AUXSRC_A::CLKSRC_PLL_SYS)
}
#[doc = "`10`"]
#[inline(always)]
pub fn clksrc_pll_usb(self) -> &'a mut W {
self.variant(AUXSRC_A::CLKSRC_PLL_USB)
}
#[doc = "`11`"]
#[inline(always)]
pub fn rosc_clksrc_ph(self) -> &'a mut W {
self.variant(AUXSRC_A::ROSC_CLKSRC_PH)
}
#[doc = "`100`"]
#[inline(always)]
pub fn xosc_clksrc(self) -> &'a mut W {
self.variant(AUXSRC_A::XOSC_CLKSRC)
}
#[doc = "`101`"]
#[inline(always)]
pub fn clksrc_gpin0(self) -> &'a mut W {
self.variant(AUXSRC_A::CLKSRC_GPIN0)
}
#[doc = "`110`"]
#[inline(always)]
pub fn clksrc_gpin1(self) -> &'a mut W {
self.variant(AUXSRC_A::CLKSRC_GPIN1)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 5)) | (((value as u32) & 0x07) << 5);
self.w
}
}
impl R {
#[doc = "Bit 11 - Starts and stops the clock generator cleanly"]
#[inline(always)]
pub fn enable(&self) -> ENABLE_R {
ENABLE_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 10 - Asynchronously kills the clock generator"]
#[inline(always)]
pub fn kill(&self) -> KILL_R {
KILL_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bits 5:7 - Selects the auxiliary clock source, will glitch when switching"]
#[inline(always)]
pub fn auxsrc(&self) -> AUXSRC_R {
AUXSRC_R::new(((self.bits >> 5) & 0x07) as u8)
}
}
impl W {
#[doc = "Bit 11 - Starts and stops the clock generator cleanly"]
#[inline(always)]
pub fn enable(&mut self) -> ENABLE_W {
ENABLE_W { w: self }
}
#[doc = "Bit 10 - Asynchronously kills the clock generator"]
#[inline(always)]
pub fn kill(&mut self) -> KILL_W {
KILL_W { w: self }
}
#[doc = "Bits 5:7 - Selects the auxiliary clock source, will glitch when switching"]
#[inline(always)]
pub fn auxsrc(&mut self) -> AUXSRC_W {
AUXSRC_W { w: self }
}
}
| {
self.variant(AUXSRC_A::CLK_SYS)
} |
utils.rs | pub(crate) mod duration;
use serde::de::{Deserialize, MapAccess, SeqAccess};
use std::marker::PhantomData;
/// Re-Implementation of `serde::private::de::size_hint::cautious`
#[inline]
pub(crate) fn size_hint_cautious(hint: Option<usize>) -> usize {
std::cmp::min(hint.unwrap_or(0), 4096)
}
pub(crate) const NANOS_PER_SEC: u32 = 1_000_000_000;
// pub(crate) const NANOS_PER_MILLI: u32 = 1_000_000;
// pub(crate) const NANOS_PER_MICRO: u32 = 1_000;
// pub(crate) const MILLIS_PER_SEC: u64 = 1_000;
// pub(crate) const MICROS_PER_SEC: u64 = 1_000_000;
pub(crate) struct MapIter<'de, A, K, V> {
pub(crate) access: A,
marker: PhantomData<(&'de (), K, V)>,
}
impl<'de, A, K, V> MapIter<'de, A, K, V> {
pub(crate) fn new(access: A) -> Self
where
A: MapAccess<'de>,
{
Self {
access,
marker: PhantomData,
}
}
}
impl<'de, A, K, V> Iterator for MapIter<'de, A, K, V>
where
A: MapAccess<'de>,
K: Deserialize<'de>,
V: Deserialize<'de>,
{
type Item = Result<(K, V), A::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.access.next_entry().transpose()
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.access.size_hint() {
Some(size) => (size, Some(size)),
None => (0, None),
}
}
}
pub(crate) struct SeqIter<'de, A, T> {
access: A,
marker: PhantomData<(&'de (), T)>,
}
impl<'de, A, T> SeqIter<'de, A, T> {
pub(crate) fn new(access: A) -> Self
where
A: SeqAccess<'de>,
{
Self {
access,
marker: PhantomData,
}
}
}
impl<'de, A, T> Iterator for SeqIter<'de, A, T>
where
A: SeqAccess<'de>,
T: Deserialize<'de>,
{
type Item = Result<T, A::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.access.next_element().transpose()
}
fn size_hint(&self) -> (usize, Option<usize>) |
}
pub(crate) fn duration_as_secs_f64(dur: &std::time::Duration) -> f64 {
(dur.as_secs() as f64) + (dur.subsec_nanos() as f64) / (NANOS_PER_SEC as f64)
}
pub(crate) fn duration_signed_from_secs_f64(
secs: f64,
) -> Result<self::duration::DurationSigned, String> {
const MAX_NANOS_F64: f64 = ((u64::max_value() as u128 + 1) * (NANOS_PER_SEC as u128)) as f64;
// TODO why are the seconds converted to nanoseconds first?
// Does it make sense to just truncate the value?
let mut nanos = secs * (NANOS_PER_SEC as f64);
if !nanos.is_finite() {
return Err("got non-finite value when converting float to duration".into());
}
if nanos >= MAX_NANOS_F64 {
return Err("overflow when converting float to duration".into());
}
let mut sign = self::duration::Sign::Positive;
if nanos < 0.0 {
nanos = -nanos;
sign = self::duration::Sign::Negative;
}
let nanos = nanos as u128;
Ok(self::duration::DurationSigned::new(
sign,
(nanos / (NANOS_PER_SEC as u128)) as u64,
(nanos % (NANOS_PER_SEC as u128)) as u32,
))
}
| {
match self.access.size_hint() {
Some(size) => (size, Some(size)),
None => (0, None),
}
} |
s.go | package s // Subroutines
import (
"bufio"
"log"
"os"
"regexp"
"runtime/debug"
)
type ChannelBuf struct {
Str string
channel chan string
eof bool
}
func (this *ChannelBuf) Next() bool {
if !this.eof {
if line, ok := <-this.channel; ok {
this.Str = line
return true
} else {
this.eof = true
}
}
return false
}
func Open(fname string) *ChannelBuf {
ch := make(chan string, 32) // Some arbitrary amount of readahead
file, err := os.Open(fname)
if err != nil {
debug.PrintStack()
log.Fatal(err)
}
scanner := bufio.NewScanner(file)
go func() {
defer func() {
file.Close()
close(ch)
}()
for scanner.Scan() {
ch <- scanner.Text()
}
if err := scanner.Err(); err != nil {
debug.PrintStack()
log.Fatal(err)
}
}()
return &ChannelBuf{"", ch, false}
}
func Match(target, re string) []string {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
return match
}
func Match1(target, re string) (bool, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 2 {
return true, match[1]
}
return false, ""
}
func Match2(target, re string) (bool, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 3 {
return true, match[1], match[2]
}
return false, "", ""
}
func Match3(target, re string) (bool, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 4 {
return true, match[1], match[2], match[3]
}
return false, "", "", ""
}
func Match4(target, re string) (bool, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 5 {
return true, match[1], match[2], match[3], match[4]
}
return false, "", "", "", ""
}
func Match5(target, re string) (bool, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 6 {
return true, match[1], match[2], match[3], match[4], match[5]
}
return false, "", "", "", "", ""
}
func Match6(target, re string) (bool, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil |
match := r.FindStringSubmatch(target)
if len(match) == 7 {
return true, match[1], match[2], match[3], match[4], match[5], match[6]
}
return false, "", "", "", "", "", ""
}
func Match7(target, re string) (bool, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 8 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7]
}
return false, "", "", "", "", "", "", ""
}
func Match8(target, re string) (bool, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 9 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8]
}
return false, "", "", "", "", "", "", "", ""
}
func Match9(target, re string) (bool, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 10 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9]
}
return false, "", "", "", "", "", "", "", "", ""
}
func Match10(target, re string) (bool, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 11 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10]
}
return false, "", "", "", "", "", "", "", "", "", ""
}
func Match11(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 12 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11]
}
return false, "", "", "", "", "", "", "", "", "", "", ""
}
func Match12(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 13 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12]
}
return false, "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match13(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 14 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match14(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 15 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13], match[14]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match15(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 16 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13], match[14], match[15]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match16(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 17 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13], match[14], match[15], match[16]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match17(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 18 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13], match[14], match[15], match[16], match[17]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match18(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 19 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13], match[14], match[15], match[16], match[17], match[18]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match19(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 20 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13], match[14], match[15], match[16], match[17], match[18], match[19]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""
}
func Match20(target, re string) (bool, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string, string) {
r, error := regexp.Compile(re)
if error != nil {
debug.PrintStack()
log.Fatal(error)
}
match := r.FindStringSubmatch(target)
if len(match) == 21 {
return true, match[1], match[2], match[3], match[4], match[5], match[6], match[7], match[8], match[9], match[10], match[11], match[12], match[13], match[14], match[15], match[16], match[17], match[18], match[19], match[20]
}
return false, "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""
}
// vim:ts=4:sw=4:et
| {
debug.PrintStack()
log.Fatal(error)
} |
regexlib_1484.py | # 1484
# <a[a-zA-Z0-9 ="'.:;?]*(href=[\"\'](http:\/\/|\.\/|\/)?\w+(\.\w+)*(\/\w+(\.\w+)?)*(\/|\?\w*=\w*(&\w*=\w*)*)?[\"\'])*(>[a-zA-Z0-9 ="'<>.:;?]*</a>)
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:"<a"+"href="0""*5000+"! _1_POA(i)" |
regex = """<a[a-zA-Z0-9 ="'.:;?]*(href=[\"\'](http:\/\/|\.\/|\/)?\w+(\.\w+)*(\/\w+(\.\w+)?)*(\/|\?\w*=\w*(&\w*=\w*)*)?[\"\'])*(>[a-zA-Z0-9 ="'<>.:;?]*</a>)"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "<a" + "href=\"0\"" * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") |
import re
from time import perf_counter |
types.ts | /*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { Line } from '../../../../geoms/types';
import { Dimensions } from '../../../../utils/dimensions';
import { LineAnnotationDatum } from '../../utils/specs';
import { AnnotationMarker } from '../types';
/** @internal */
export interface AnnotationLineProps {
specId: string;
id: string;
datum: LineAnnotationDatum; | * The path points of a line annotation
*/
linePathPoints: Line;
markers: Array<AnnotationMarker>;
panel: Dimensions;
} | /** |
index.js | import React from 'react';
import { Container } from '../../styles/global';
import { ContainerItens } from './styles';
import SquareContainer from '../../components/SquareContainer';
import CircleContainer from '../../components/CircleContainer';
import IndexText from '../../components/IndexText';
export default function Main() {
return (
<Container>
{/* Recently Played */}
<IndexText Text="Tocadas Recentemente" />
<ContainerItens>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
</ContainerItens>
{/* Favorites */}
<IndexText Text="Favoritos" />
<ContainerItens>
<CircleContainer
LinkAdress="Artist"
Title="Lorem Ipsum Artist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
/>
<CircleContainer
LinkAdress="Artist"
Title="Lorem Ipsum Artist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
/>
<CircleContainer
LinkAdress="Artist"
Title="Lorem Ipsum Artist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
/>
<CircleContainer
LinkAdress="Artist"
Title="Lorem Ipsum Artist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
/>
</ContainerItens>
{/* Recommendation */}
<IndexText Text="Recomendações" />
<ContainerItens>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/> | Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
<SquareContainer
LinkAdress="Playlist"
Title="Lorem Ipsum Playlist"
Image="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAA1BMVEWIiIhYZW6zAAAASElEQVR4nO3BgQAAAADDoPlTX+AIVQEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADwDcaiAAFXD1ujAAAAAElFTkSuQmCC"
Description="Lorem ipsum dolor sit amet, consectetur adipiscing elit."
/>
</ContainerItens>
</Container>
);
} | <SquareContainer
LinkAdress="Playlist" |
pint_sim.py | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from pint.residuals import resids
import pint.toa as toa
from pint import models
__all__ = ['make_ideal',
'createfourierdesignmatrix_red',
'add_rednoise',
'add_dm_rednoise',
'add_efac',
'add_equad',
'add_ecorr']
def make_ideal(toas, model, iterations=2):
'''
Takes a pint.toas and pint.model object and effectively zeros out the residuals.
'''
for ii in range(iterations):
rs=resids(toas, model)
toas.adjust_TOAs(TimeDelta(-1.0*rs.time_resids))
def createfourierdesignmatrix_red(toas, nmodes=30, Tspan=None,
logf=False, fmin=None, fmax=None,
pshift=False, modes=None):
"""
Construct fourier design matrix from eq 11 of Lentati et al, 2013
Parameters
----------
toas : array
Vector of time series in seconds.
nmodes : int
Number of fourier coefficients to use.
Tspan : float
Option to us some other Tspan [s]
logf : bool
Use log frequency spacing.
fmin : float
Lower sampling frequency.
fmax : float
Upper sampling frequency.
pshift : bool
Option to add random phase shift.
modes : array
Option to provide explicit list or array of sampling frequencies.
Returns
-------
F : array
fourier design matrix, [NTOAs x 2 nfreqs].
f : arraty
Sampling frequencies, [2 nfreqs].
"""
T = Tspan if Tspan is not None else toas.max() - toas.min()
# define sampling frequencies
if modes is not None:
nmodes = len(modes)
f = modes
elif fmin is None and fmax is None and not logf:
# make sure partially overlapping sets of modes
# have identical frequencies
f = 1.0 * np.arange(1, nmodes + 1) / T
else:
# more general case
if fmin is None:
fmin = 1 / T
if fmax is None:
fmax = nmodes / T
if logf:
f = np.logspace(np.log10(fmin), np.log10(fmax), nmodes)
else:
f = np.linspace(fmin, fmax, nmodes)
# add random phase shift to basis functions
ranphase = (np.random.uniform(0.0, 2 * np.pi, nmodes)
if pshift else np.zeros(nmodes))
Ffreqs = np.repeat(f, 2)
N = len(toas)
F = np.zeros((N, 2 * nmodes))
# The sine/cosine modes
F[:,::2] = np.sin(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
F[:,1::2] = np.cos(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
return F, Ffreqs
def add_rednoise(TOAs, A, gamma, components=30,
seed=None, modes=None, Tspan=None):
"""Add red noise with P(f) = A^2 / (12 pi^2) (f * year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed."""
# nobs=len(psr.toas)
nobs = len(TOAs.table)
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
if modes is not None:
print('Must use linear spacing.')
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_dm_rednoise(TOAs, A, gamma, components=30, rf_ref=1400,
seed=None, modes=None, Tspan=None, useDM=False):
"""Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed."""
# nobs=len(psr.toas)
nobs = len(TOAs.table)
radio_freqs = TOAs.table['freq']
if useDM:
rf_ref = 4.15e3
chrom = rf_ref**2 / radio_freqs**2
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = chrom.quantity.value * np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_equad(TOAs, equad, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
# default equadvec
equadvec = np.zeros(TOAs.ntoas)
# check that equad is scalar if flags is None
if flags is None:
if not np.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = np.ones(TOAs.ntoas) * equad
if flags is not None and flagid is not None and not np.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
equadvec[ind] = equad[ct]
equadvec = equadvec * u.s * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(equadvec.to('day')))
def add_efac(TOAs, efac, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
# default equadvec
efacvec = np.zeros(TOAs.ntoas)
# check that equad is scalar if flags is None
if flags is None:
if not np.isscalar(efac):
raise ValueError('ERROR: If flags is None, efac must be a scalar')
else:
efacvec = np.ones(TOAs.ntoas) * efac
if flags is not None and flagid is not None and not np.isscalar(efac):
if len(efac) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
efacvec[ind] = efac[ct]
dt = efacvec * TOAs.get_errors().to('s') * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def | (times, flags=None, dt=1.0):
isort = np.argsort(times)
bucket_ref = [times[isort[0]]]
bucket_ind = [[isort[0]]]
for i in isort[1:]:
if times[i] - bucket_ref[-1] < dt:
bucket_ind[-1].append(i)
else:
bucket_ref.append(times[i])
bucket_ind.append([i])
avetoas = np.array([np.mean(times[l]) for l in bucket_ind],'d')
if flags is not None:
aveflags = np.array([flags[l[0]] for l in bucket_ind])
U = np.zeros((len(times),len(bucket_ind)),'d')
for i,l in enumerate(bucket_ind):
U[l,i] = 1
if flags is not None:
return avetoas, aveflags, U
else:
return avetoas, U
def add_ecorr(TOAs, ecorr, flagid=None, flags=None, coarsegrain=1*u.s, seed=None):
"""Add correlated quadrature noise of rms `ecorr` [s],
with coarse-graining time `coarsegrain` [days].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
times = np.array(TOAs.table['tdbld'], dtype='float64')
if flags is None:
t, U = quantize(times, dt=coarsegrain.to('day').value)
elif flags is not None and flagid is not None:
flagvals = np.array([f[flagid] for f in TOAs.table['flags'].data])
t, f, U = quantize(times, flagvals, dt=coarsegrain.to('day').value)
# default ecorr value
ecorrvec = np.zeros(len(t))
# check that ecorr is scalar if flags is None
if flags is None:
if not np.isscalar(ecorr):
raise ValueError('ERROR: If flags is None, ecorr must be a scalar')
else:
ecorrvec = np.ones(len(t)) * ecorr
if flags is not None and flagid is not None and not np.isscalar(ecorr):
if len(ecorr) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array(f)
ecorrvec[ind] = ecorr[ct]
ecorrvec = np.dot(U * ecorrvec, np.random.randn(U.shape[1])) * u.s
TOAs.adjust_TOAs(TimeDelta(ecorrvec.to('day')))
| quantize |
classifier_cov_pow_toy_pvalue.py | from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import argparse
import pandas as pd
from tqdm.auto import tqdm
from datetime import datetime
from sklearn.metrics import log_loss
import seaborn as sns
import matplotlib.pyplot as plt
from utils.functions import train_clf, compute_statistics_single_t0, clf_prob_value, compute_bayesfactor_single_t0, \
odds_ratio_loss, train_pvalue_clf
from models.toy_poisson import ToyPoissonLoader
from models.toy_gmm import ToyGMMLoader
from models.toy_gamma import ToyGammaLoader
from or_classifiers.toy_example_list import classifier_dict, classifier_dict_mlpcomp, classifier_pvalue_dict
model_dict = {
'poisson': ToyPoissonLoader,
'gmm': ToyGMMLoader,
'gamma': ToyGammaLoader
}
def | (run, rep, b, b_prime, alpha, t0_val, sample_size_obs, test_statistic, mlp_comp=False,
monte_carlo_samples=500, debug=False, seed=7, size_check=1000, verbose=False, marginal=False,
size_marginal=1000, guided_sim=False, guided_sample=1000, empirical_marginal=True):
# Changing values if debugging
b = b if not debug else 100
b_prime = b_prime if not debug else 100
size_check = size_check if not debug else 100
rep = rep if not debug else 2
model_obj = model_dict[run](marginal=marginal, size_marginal=size_marginal, empirical_marginal=empirical_marginal)
classifier_dict_run = classifier_dict_mlpcomp if mlp_comp else classifier_dict
# Get the correct functions
msnh_sampling_func = model_obj.sample_msnh_algo5
grid_param = model_obj.grid
gen_obs_func = model_obj.sample_sim
gen_sample_func = model_obj.generate_sample
gen_param_fun = model_obj.sample_param_values
t0_grid = model_obj.pred_grid
tp_func = model_obj.compute_exact_prob
# Creating sample to check entropy about
np.random.seed(seed)
sample_check = gen_sample_func(sample_size=size_check, marginal=marginal)
theta_vec = sample_check[:, :model_obj.d]
x_vec = sample_check[:, (model_obj.d + 1):]
bern_vec = sample_check[:, model_obj.d]
true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)
entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1
else np.log(1 - true_prob_vec[kk])
for kk, el in enumerate(bern_vec)])
# Loop over repetitions and classifiers
# Each time we train the different classifiers, we build the intervals and we record
# whether the point is in or not.
out_val = []
out_cols = ['test_statistic', 'b_prime', 'b', 'classifier', 'classifier_pvalue', 'run', 'rep', 'sample_size_obs',
'cross_entropy_loss', 'cross_entropy_loss_pvalue', 't0_true_val', 'theta_0_current', 'on_true_t0',
'estimated_pvalue', 'in_confint', 'out_confint', 'size_CI', 'true_entropy', 'or_loss_value',
'monte_carlo_samples', 'guided_sim', 'empirical_marginal', 'guided_sample']
pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s, b=%s' % (sample_size_obs, b))
rep_counter = 0
not_update_flag = False
while rep_counter < rep:
# Generates samples for each t0 values, so to be able to check both coverage and power
x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)
# Train the classifier for the odds
clf_odds_fitted = {}
clf_pvalue_fitted = {}
for clf_name, clf_model in sorted(classifier_dict_run.items(), key=lambda x: x[0]):
clf_odds = train_clf(sample_size=b, clf_model=clf_model, gen_function=gen_sample_func,
clf_name=clf_name, nn_square_root=True)
if verbose:
print('----- %s Trained' % clf_name)
if test_statistic == 'acore':
tau_obs = np.array([
compute_statistics_single_t0(
clf=clf_odds, obs_sample=x_obs, t0=theta_0, grid_param_t1=grid_param,
d=model_obj.d, d_obs=model_obj.d_obs) for theta_0 in t0_grid])
elif test_statistic == 'avgacore':
tau_obs = np.array([
compute_bayesfactor_single_t0(
clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,
d=model_obj.d, d_obs=model_obj.d_obs, log_out=False) for theta_0 in t0_grid])
elif test_statistic == 'logavgacore':
tau_obs = np.array([
compute_bayesfactor_single_t0(
clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,
d=model_obj.d, d_obs=model_obj.d_obs, log_out=True) for theta_0 in t0_grid])
else:
raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'
' Currently %s' % test_statistic)
# Calculating cross-entropy
est_prob_vec = clf_prob_value(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec, d=model_obj.d,
d_obs=model_obj.d_obs)
loss_value = log_loss(y_true=bern_vec, y_pred=est_prob_vec)
# Calculating or loss
or_loss_value = odds_ratio_loss(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec,
bern_vec=bern_vec, d=1, d_obs=1)
clf_odds_fitted[clf_name] = (tau_obs, loss_value, or_loss_value)
# Train the P-value regression algorithm for confidence levels
if guided_sim:
# Commenting the above -- we now sample a set of thetas from the parameter (of size guided_sample)
# budget, then resample them according to the odds values, fit a gaussian and then sample the
# datasets from that.
theta_mat_sample = gen_param_fun(sample_size=guided_sample)
if test_statistic == 'acore':
stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,
func1d=lambda row: compute_statistics_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row,
grid_param_t1=grid_param,
d=model_obj.d,
d_obs=model_obj.d_obs
))
elif test_statistic == 'avgacore':
stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row,
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples
))
elif test_statistic == 'logavgacore':
stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row,
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples,
log_out=True
))
else:
raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'
' Currently %s' % test_statistic)
# If there are log-odds, then some of the values might be negative, so we need to exponentiate them
# so to make sure that the large negative numbers are counted correctly (i.e. as very low probability,
# not probabilities with large magnitudes).
if test_statistic in ['acore', 'logavgacore']:
stats_sample = np.exp(stats_sample)
stats_sample = stats_sample/np.sum(stats_sample)
theta_mat_gaussian_fit = np.random.choice(a=theta_mat_sample, p=stats_sample.reshape(-1, ),
size=guided_sample)
std_gaussian_fit = np.std(theta_mat_gaussian_fit) if np.std(theta_mat_gaussian_fit) == 0.0 else 1.0
theta_mat = np.clip(
a=np.random.normal(size=b_prime, loc=np.mean(theta_mat_gaussian_fit),
scale=std_gaussian_fit),
a_min=model_obj.low_int, a_max=model_obj.high_int)
sample_mat = np.apply_along_axis(arr=theta_mat.reshape(-1, 1), axis=1,
func1d=lambda row: gen_obs_func(sample_size=sample_size_obs,
true_param=row))
else:
# Generate a matrix with values for both the sampled thetas as the actual samples
theta_mat, sample_mat = msnh_sampling_func(b_prime=b_prime, sample_size=sample_size_obs)
full_mat = np.hstack((theta_mat.reshape(-1, 1), sample_mat))
if test_statistic == 'acore':
stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_statistics_single_t0(
clf=clf_odds,
obs_sample=row[model_obj.d:],
t0=row[:model_obj.d],
grid_param_t1=grid_param,
d=model_obj.d,
d_obs=model_obj.d_obs
))
stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_statistics_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row[:model_obj.d],
grid_param_t1=grid_param,
d=model_obj.d,
d_obs=model_obj.d_obs
))
elif test_statistic == 'avgacore':
stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=row[model_obj.d:],
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples
))
stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples
))
elif test_statistic == 'logavgacore':
stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=row[model_obj.d:],
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples,
log_out=True
))
stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples,
log_out=True
))
else:
raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'
' Currently %s' % test_statistic)
if np.any(np.isnan(stats_mat_generated)) or not np.all(np.isfinite(stats_mat_generated)) or \
np.any(np.isnan(stats_mat_observed)) or not np.all(np.isfinite(stats_mat_observed)):
not_update_flag = True
break
# Comparing the two vectors of values
clf_pvalue_fitted[clf_name] = {}
indicator_vec = np.greater(stats_mat_observed, stats_mat_generated).astype(int)
for clf_name_pvalue, clf_model_pvalue in sorted(classifier_pvalue_dict.items(), key=lambda x: x[0]):
# If there the indicator_vec is either all 0 or all 1, do not fit a classifier or sklearn will throw
# an error out. Just return the class.
if sum(indicator_vec) <= 1 or sum(indicator_vec) >= len(indicator_vec) - 1:
pval_pred = np.repeat(sum(indicator_vec) / len(indicator_vec), b_prime)
loss_value_pval = np.nan
else:
clf_pvalue = train_pvalue_clf(clf_model=clf_model_pvalue, X=theta_mat.reshape(-1, model_obj.d),
y=indicator_vec.reshape(-1, ), clf_name=clf_name_pvalue,
nn_square_root=True)
pval_pred = clf_pvalue.predict_proba(t0_grid.reshape(-1, model_obj.d))[:, 1]
theta_mat_pred = clf_pvalue.predict_proba(theta_mat.reshape(-1, model_obj.d))[:, 1]
loss_value_pval = log_loss(y_true=indicator_vec, y_pred=theta_mat_pred)
clf_pvalue_fitted[clf_name][clf_name_pvalue] = (pval_pred, loss_value_pval)
# If there were some problems in calculating the statistics, get out of the loop
if not_update_flag:
not_update_flag = False
continue
# At this point all it's left is to record
for clf_name, (tau_obs_val, cross_ent_loss, or_loss_value) in clf_odds_fitted.items():
for clf_name_qr, (pvalue_val, pvalue_celoss_val) in clf_pvalue_fitted[clf_name].items():
size_temp = np.mean((pvalue_val > alpha).astype(int))
for kk, theta_0_current in enumerate(t0_grid):
out_val.append([
test_statistic, b_prime, b, clf_name, clf_name_qr, run, rep_counter, sample_size_obs,
cross_ent_loss, pvalue_celoss_val, t0_val, theta_0_current, int(t0_val == theta_0_current),
pvalue_val[kk], int(pvalue_val[kk] > alpha),
int(pvalue_val[kk] <= alpha), size_temp, entropy_est, or_loss_value,
monte_carlo_samples, int(guided_sim), int(empirical_marginal), guided_sample
])
pbar.update(1)
rep_counter += 1
# Saving the results
out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)
out_dir = 'sims/classifier_cov_pow_toy/'
out_filename = 'classifier_reps_cov_pow_toy_pvalues_%steststats_%s_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s%s_%s.csv' % (
test_statistic, 'mlp_comp' if mlp_comp else 'toyclassifiers', b, b_prime, run, rep,
str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'),
'_empirmarg' if empirical_marginal else '',
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
out_df.to_csv(out_dir + out_filename)
# Print results
cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'classifier_pvalue', 'in_confint',
'cross_entropy_loss', 'cross_entropy_loss_pvalue', 'size_CI']]
print(cov_df.groupby(['classifier', 'classifier_pvalue']).agg({'in_confint': [np.average],
'size_CI': [np.average, np.std],
'cross_entropy_loss': [np.average],
'cross_entropy_loss_pvalue': [np.average]}))
# Power plots
out_df['class_combo'] = out_df[['classifier', 'classifier_pvalue']].apply(lambda x: x[0] + '---' + x[1], axis = 1)
plot_df = out_df[['class_combo', 'theta_0_current', 'out_confint']].groupby(
['class_combo', 'theta_0_current']).mean().reset_index()
fig = plt.figure(figsize=(20, 10))
sns.lineplot(x='theta_0_current', y='out_confint', hue='class_combo', data=plot_df, palette='cubehelix')
plt.legend(loc='best', fontsize=25)
plt.xlabel(r'$\theta$', fontsize=25)
plt.ylabel('Power', fontsize=25)
plt.title("Power of Hypothesis Test, B=%s, B'=%s, n=%s, %s" % (
b, b_prime, sample_size_obs, run.title()), fontsize=25)
out_dir = 'images/classifier_cov_pow_toy/'
outfile_name = 'power_classifier_reps_pvalue_%steststats_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s_%s.pdf' % (
test_statistic, b, b_prime, run, rep, str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'),
datetime.strftime(datetime.today(), '%Y-%m-%d')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', action="store", type=int, default=7,
help='Random State')
parser.add_argument('--rep', action="store", type=int, default=10,
help='Number of Repetitions for calculating the Pinball loss')
parser.add_argument('--b', action="store", type=int, default=5000,
help='Sample size to train the classifier for calculating odds')
parser.add_argument('--b_prime', action="store", type=int, default=1000,
help='Sample size to train the quantile regression algorithm')
parser.add_argument('--marginal', action='store_true', default=False,
help='Whether we are using a parametric approximation of the marginal or'
'the baseline reference G')
parser.add_argument('--alpha', action="store", type=float, default=0.1,
help='Statistical confidence level')
parser.add_argument('--run', action="store", type=str, default='poisson',
help='Problem to run')
parser.add_argument('--debug', action='store_true', default=False,
help='If true, a very small value for the sample sizes is fit to make sure the'
'file can run quickly for debugging purposes')
parser.add_argument('--verbose', action='store_true', default=False,
help='If true, logs are printed to the terminal')
parser.add_argument('--sample_size_obs', action="store", type=int, default=10,
help='Sample size of the actual observed data.')
parser.add_argument('--t0_val', action="store", type=float, default=10.0,
help='True parameter which generates the observed dataset')
parser.add_argument('--size_marginal', action="store", type=int, default=1000,
help='Sample size of the actual marginal distribution, if marginal is True.')
parser.add_argument('--monte_carlo_samples', action="store", type=int, default=500,
help='Sample size for the calculation of the avgacore and logavgacore statistic.')
parser.add_argument('--test_statistic', action="store", type=str, default='acore',
help='Test statistic to compute confidence intervals. Can be acore|avgacore|logavgacore')
parser.add_argument('--mlp_comp', action='store_true', default=False,
help='If true, we compare different MLP training algorithm.')
parser.add_argument('--empirical_marginal', action='store_true', default=False,
help='Whether we are sampling directly from the empirical marginal for G')
parser.add_argument('--guided_sim', action='store_true', default=False,
help='If true, we guided the sampling for the B prime in order to get meaningful results.')
parser.add_argument('--guided_sample', action="store", type=int, default=2500,
help='The sample size to be used for the guided simulation. Only used if guided_sim is True.')
argument_parsed = parser.parse_args()
# b_vec = [100, 500, 1000]
# for b_val in b_vec:
main(
run=argument_parsed.run,
rep=argument_parsed.rep,
marginal=argument_parsed.marginal,
b=argument_parsed.b,
b_prime=argument_parsed.b_prime,
alpha=argument_parsed.alpha,
debug=argument_parsed.debug,
sample_size_obs=argument_parsed.sample_size_obs,
t0_val=argument_parsed.t0_val,
seed=argument_parsed.seed,
verbose=argument_parsed.verbose,
size_marginal=argument_parsed.size_marginal,
monte_carlo_samples=argument_parsed.monte_carlo_samples,
test_statistic=argument_parsed.test_statistic,
mlp_comp=argument_parsed.mlp_comp,
empirical_marginal=argument_parsed.empirical_marginal,
guided_sim=argument_parsed.guided_sim,
guided_sample=argument_parsed.guided_sample
)
| main |
trunk_test.go | //go:build e2e
package trunk
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/AliyunContainerService/terway/pkg/apis/network.alibabacloud.com/v1beta1"
"github.com/AliyunContainerService/terway/tests/utils"
terwayTypes "github.com/AliyunContainerService/terway/types"
"github.com/AliyunContainerService/terway/types/controlplane"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/intstr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/e2e-framework/klient"
"sigs.k8s.io/e2e-framework/klient/k8s"
"sigs.k8s.io/e2e-framework/klient/wait"
"sigs.k8s.io/e2e-framework/klient/wait/conditions"
"sigs.k8s.io/e2e-framework/pkg/envconf"
"sigs.k8s.io/e2e-framework/pkg/features"
)
func TestDefaultConfigPodNetworking(t *testing.T) {
defaultConfig := features.New("PodNetworking/DefaultConfig").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := newPodNetworking(defaultPodNetworkingName, nil, nil, &metav1.LabelSelector{
MatchLabels: map[string]string{"trunking-pod": "true"},
}, nil)
if err := config.Client().Resources().Create(ctx, pn); err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("with default config", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
var pn v1beta1.PodNetworking
err := config.Client().Resources().Get(ctx, defaultPodNetworkingName, config.Namespace(), &pn)
if err != nil {
t.Fatal(err)
}
if len(pn.Spec.VSwitchOptions) == 0 {
t.Errorf("vSwitchOptions not set")
}
if len(pn.Spec.SecurityGroupIDs) == 0 {
t.Errorf("securityGroupIDs not set")
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: defaultPodNetworkingName, Namespace: config.Namespace()},
}
_ = config.Client().Resources().Delete(ctx, pn)
return ctx
}).
Feature()
defaultVSwitch := features.New("PodNetworking/DefaultVSwitch").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := newPodNetworking(defaultPodNetworkingName, []string{"foo"}, nil, &metav1.LabelSelector{
MatchLabels: map[string]string{"trunking-pod": "true"},
}, nil)
if err := config.Client().Resources().Create(ctx, pn); err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("vSwitchOptions is foo", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
var pn v1beta1.PodNetworking
err := config.Client().Resources().Get(ctx, defaultPodNetworkingName, config.Namespace(), &pn)
if err != nil {
t.Fatal(err)
}
if len(pn.Spec.VSwitchOptions) == 0 {
t.Errorf("vSwitchOptions not set")
}
if pn.Spec.VSwitchOptions[0] != "foo" {
t.Errorf("vSwitchOptions not equal foo")
}
if len(pn.Spec.SecurityGroupIDs) == 0 {
t.Errorf("securityGroupIDs not set")
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: defaultPodNetworkingName, Namespace: config.Namespace()},
}
_ = config.Client().Resources().Delete(ctx, pn)
return ctx
}).
Feature()
defaultSecurityGroup := features.New("PodNetworking/DefaultSecurityGroup").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := newPodNetworking(defaultPodNetworkingName, nil, []string{"foo"}, &metav1.LabelSelector{
MatchLabels: map[string]string{"trunking-pod": "true"},
}, nil)
if err := config.Client().Resources().Create(ctx, pn); err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("SecurityGroup is foo", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
var pn v1beta1.PodNetworking
err := config.Client().Resources().Get(ctx, defaultPodNetworkingName, config.Namespace(), &pn)
if err != nil {
t.Fatal(err)
}
if len(pn.Spec.VSwitchOptions) == 0 {
t.Errorf("vSwitchOptions not set")
}
if len(pn.Spec.SecurityGroupIDs) == 0 {
t.Errorf("securityGroupIDs not set")
}
if pn.Spec.SecurityGroupIDs[0] != "foo" {
t.Errorf("securityGroupIDs not equal foo")
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: defaultPodNetworkingName, Namespace: config.Namespace()},
}
_ = config.Client().Resources().Delete(ctx, pn)
return ctx
}).
Feature()
defaultAnnotationConfig := features.New("Annotation/DefaultAnnoConfig").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p := newPod(config.Namespace(), podName, nil, map[string]string{terwayTypes.PodENI: "true"})
err := config.Client().Resources().Create(ctx, p)
if err != nil {
t.Error(err)
}
return ctx
}).
Assess("with default config", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
var p corev1.Pod
err := config.Client().Resources().Get(ctx, podName, config.Namespace(), &p)
if err != nil {
t.Fatal(err)
}
pn, err := controlplane.ParsePodNetworksFromAnnotation(&p)
if err != nil {
t.Fatal(err)
}
if len(pn.PodNetworks) != 1 {
t.Errorf("annotation have invalid config")
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: config.Namespace()},
}
_ = config.Client().Resources().Delete(ctx, pod)
return ctx
}).
Feature()
defaultVSwitchAnnotationConfig := features.New("Annotation/DefaultVSwitchAnnoConfig").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p := newPod(config.Namespace(), podName, nil, map[string]string{terwayTypes.PodENI: "true", terwayTypes.PodNetworks: "{\"podNetworks\":[{\"vSwitchOptions\":[\"foo\"],\"interface\":\"eth0\"}]}"})
err := config.Client().Resources().Create(ctx, p)
if err != nil {
t.Error(err)
}
return ctx
}).
Assess("vSwitchOptions is foo", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
var p corev1.Pod
err := config.Client().Resources().Get(ctx, podName, config.Namespace(), &p)
if err != nil {
t.Fatal(err)
}
pn, err := controlplane.ParsePodNetworksFromAnnotation(&p)
if err != nil {
t.Fatal(err)
}
if len(pn.PodNetworks) != 1 {
t.Errorf("annotation have invalid config")
}
if len(pn.PodNetworks[0].VSwitchOptions) != 1 {
t.Errorf("annotation have invalid config")
}
if pn.PodNetworks[0].VSwitchOptions[0] != "foo" {
t.Errorf("VSwitchOptions not equal foo")
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: config.Namespace()},
}
_ = config.Client().Resources().Delete(ctx, pod)
return ctx
}).
Feature()
testenv.Test(t, defaultConfig, defaultVSwitch, defaultSecurityGroup, defaultAnnotationConfig, defaultVSwitchAnnotationConfig)
}
func TestSelector(t *testing.T) {
podSelector := features.New("PodNetworking/PodSelector").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := newPodNetworking(defaultPodNetworkingName, nil, nil, &metav1.LabelSelector{
MatchLabels: map[string]string{"trunking-pod": "true"},
}, nil)
if err := config.Client().Resources().Create(ctx, pn); err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("podNetworking status ready", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
err := WaitPodNetworkingReady(defaultPodNetworkingName, config.Client())
if err != nil {
t.Fatal(err)
}
return ctx
}).
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p := newPod("default", "pod-use-trunking", map[string]string{"trunking-pod": "true"}, nil)
err := config.Client().Resources().Create(ctx, p)
if err != nil {
t.Error(err)
}
return ctx
}).
Assess("pod have trunking config", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-use-trunking", Namespace: "default"},
}
err := wait.For(conditions.New(config.Client().Resources()).ResourceMatch(&pod, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
if !terwayTypes.PodUseENI(p) {
return false
}
if p.Annotations[terwayTypes.PodNetworking] != defaultPodNetworkingName {
return false
}
return true
}), wait.WithTimeout(time.Second*5))
if err != nil {
t.Fatal(err)
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p := newPod("default", podName, map[string]string{"trunking-pod": "true"}, nil)
_ = config.Client().Resources().Delete(ctx, p)
pn := &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: defaultPodNetworkingName, Namespace: config.Namespace()},
}
_ = config.Client().Resources().Delete(ctx, pn)
return ctx
}).
Feature()
nsSelector := features.New("PodNetworking/NamespaceSelector").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pn := newPodNetworking(defaultPodNetworkingName, nil, nil, nil, &metav1.LabelSelector{
MatchLabels: map[string]string{"trunking-pod": "true"},
})
if err := config.Client().Resources().Create(ctx, pn); err != nil {
t.Fatal(err)
}
return ctx
}).
Assess("podNetworking status ready", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
err := WaitPodNetworkingReady(defaultPodNetworkingName, config.Client())
if err != nil {
t.Fatal(err)
}
return ctx
}).
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p := newPod(config.Namespace(), "any-pod", nil, nil)
err := config.Client().Resources().Create(ctx, p)
if err != nil {
t.Error(err)
}
return ctx
}).
Assess("pod have trunking config", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
err := WaitPodHaveValidateConfig(config.Namespace(), "any-pod", config.Client(), defaultPodNetworkingName)
if err != nil {
t.Error(err)
}
return ctx
}).
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p := newPod("default", "different-ns", nil, nil)
err := config.Client().Resources().Create(ctx, p)
if err != nil {
t.Error(err)
}
return ctx
}).
Assess("default ns pod should not using trunking", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
var pod corev1.Pod
err := config.Client().Resources().Get(ctx, "different-ns", "default", &pod)
if err != nil {
t.Error(err)
}
if terwayTypes.PodUseENI(&pod) {
t.Error(fmt.Errorf("pod in namespace default should not use trunking"))
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p1 := newPod(config.Namespace(), "any-pod", nil, nil)
p2 := newPod("default", "different-ns", nil, nil)
_ = config.Client().Resources().Delete(ctx, p1)
_ = config.Client().Resources().Delete(ctx, p2)
pn := &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: defaultPodNetworkingName, Namespace: config.Namespace()},
}
_ = config.Client().Resources().Delete(ctx, pn)
return ctx
}).
Feature()
testenv.Test(t, podSelector, nsSelector)
}
func TestZoneLimit(t *testing.T) {
zoneLimit := features.New("PodNetworking/ZoneLimit").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
podENI := &v1beta1.PodENI{
ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: config.Namespace()},
Spec: v1beta1.PodENISpec{
Allocations: []v1beta1.Allocation{
{
AllocationType: v1beta1.AllocationType{
Type: v1beta1.IPAllocTypeFixed,
ReleaseStrategy: v1beta1.ReleaseStrategyNever,
},
IPv4: "127.0.0.1",
},
},
Zone: "foo",
},
Status: v1beta1.PodENIStatus{},
}
if err := config.Client().Resources().Create(ctx, podENI); err != nil {
t.Fatal(err)
}
t.Logf("podENI created %#v", podENI)
pn := newPodNetworking(defaultPodNetworkingName, nil, nil, &metav1.LabelSelector{
MatchLabels: map[string]string{"trunking-pod": "true"},
}, nil)
pn.Spec.AllocationType = v1beta1.AllocationType{
Type: v1beta1.IPAllocTypeFixed,
ReleaseStrategy: v1beta1.ReleaseStrategyNever,
}
if err := config.Client().Resources().Create(ctx, pn); err != nil {
t.Fatal(err)
}
t.Logf("podNetworking created %#v", pn)
return ctx
}).
Assess("podNetworking status ready", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
err := WaitPodNetworkingReady(defaultPodNetworkingName, config.Client())
if err != nil {
t.Fatal(err)
}
t.Logf("podNetworking %s status is ready", defaultPodNetworkingName)
return ctx
}).
Setup(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
p := newPod(config.Namespace(), podName, map[string]string{"trunking-pod": "true"}, nil)
p.OwnerReferences = append(p.OwnerReferences, metav1.OwnerReference{
Kind: "StatefulSet",
Name: "foo",
UID: "foo",
APIVersion: "foo",
})
p.Spec.Affinity = &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: "user-config",
Operator: corev1.NodeSelectorOpIn,
Values: []string{"bar1", "bar2"},
},
{
Key: "topology.kubernetes.io/zone",
Operator: corev1.NodeSelectorOpIn,
Values: []string{"bar1"},
},
},
},
{
MatchFields: []corev1.NodeSelectorRequirement{
{
Key: "metadata.name",
Operator: corev1.NodeSelectorOpIn,
Values: []string{"bar1"},
},
},
},
},
},
PreferredDuringSchedulingIgnoredDuringExecution: nil,
},
}
err := config.Client().Resources().Create(ctx, p)
if err != nil {
t.Fatal(err)
}
t.Logf("pod created %#v", p)
return ctx
}).
Assess("pod have NodeSelectorTerms", func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: config.Namespace()},
}
err := wait.For(conditions.New(config.Client().Resources()).ResourceMatch(&pod, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
if !terwayTypes.PodUseENI(p) {
return false
}
if p.Annotations[terwayTypes.PodNetworking] != defaultPodNetworkingName {
return false
}
return true
}), wait.WithTimeout(time.Second*5))
if err != nil {
t.Fatal(err)
}
for _, term := range pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
t.Logf("MatchFields %d MatchExpressions %d", len(term.MatchFields), len(term.MatchExpressions))
found := false
for _, match := range term.MatchExpressions {
if match.Key == "topology.kubernetes.io/zone" && match.Values[0] == "foo" {
found = true
}
}
if !found {
t.Errorf("node affinity config is not satisfy")
}
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
_ = config.Client().Resources().Delete(ctx, &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: config.Namespace()},
})
_ = config.Client().Resources().Delete(ctx, &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: defaultPodNetworkingName, Namespace: config.Namespace()},
})
_ = config.Client().Resources().Delete(ctx, &v1beta1.PodENI{
ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: config.Namespace()},
})
return ctx
}).
Feature()
testenv.Test(t, zoneLimit)
}
func TestFixedIP(t *testing.T) {
fixedIP := features.New("FixedIP").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
_ = cfg.Client().Resources().Delete(ctx, &v1beta1.PodNetworking{ObjectMeta: metav1.ObjectMeta{Name: "fixed-ip"}})
pn := newPodNetworking("fixed-ip", nil, nil, &metav1.LabelSelector{
MatchLabels: map[string]string{"fixed-ip": "true"},
}, nil)
pn.Spec.AllocationType = v1beta1.AllocationType{
Type: v1beta1.IPAllocTypeFixed,
ReleaseStrategy: v1beta1.ReleaseStrategyTTL,
ReleaseAfter: "10m",
}
if err := cfg.Client().Resources().Create(ctx, pn); err != nil {
t.Error(err)
}
err := WaitPodNetworkingReady("fixed-ip", cfg.Client())
if err != nil {
t.Fatal(err)
}
return ctx
}).
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ports := []corev1.ServicePort{
{
Name: "http",
Protocol: corev1.ProtocolTCP,
Port: int32(80),
TargetPort: intstr.FromInt(80),
},
}
for _, args := range [][]interface{}{
{
"sts-1",
"connective-test",
"l1b0k/echo:v0.0.1",
},
{
"sts-2",
"connective-test",
"l1b0k/echo:v0.0.1",
},
} {
sts := utils.NewSts(args[0].(string), cfg.Namespace(), args[1].(string), args[2].(string), 1)
sts.Sts.Spec.Template.Labels["fixed-ip"] = "true"
err := cfg.Client().Resources().Create(ctx, sts.Sts)
if err != nil {
t.Fatal(err)
}
svc := sts.Expose("")
svc.Spec.Ports = ports
err = cfg.Client().Resources().Create(ctx, svc)
if err != nil {
t.Fatal(err)
}
}
return ctx
}).
Assess("wait for pod ready", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
for _, name := range []string{"sts-1-0", "sts-2-0"} {
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: cfg.Namespace()},
}
err := wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&pod, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
if !terwayTypes.PodUseENI(p) {
return false
}
if p.Annotations[terwayTypes.PodNetworking] != "fixed-ip" {
return false
}
if p.Status.Phase != corev1.PodRunning {
return false
}
ctx = context.WithValue(ctx, fmt.Sprintf("%s-ip", pod.Name), pod.Status.PodIP)
return true
}), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2))
if err != nil {
t.Fatal(err)
}
}
return ctx
}).
Assess("test connective", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
pod := utils.NewPod("client", cfg.Namespace(), "client", "l1b0k/echo:v0.0.1")
pod.Pod.Spec.Containers[0].Command = []string{"/usr/bin/echo", "-mode", "client", "-cases", "dns://aliyun.com,http://sts-1,http://sts-2,tcp://100.100.100.200:80"}
pod.Pod.Labels["fixed-ip"] = "true"
pod.Pod.Spec.RestartPolicy = corev1.RestartPolicyNever
err := cfg.Client().Resources().Create(ctx, pod.Pod)
if err != nil {
t.Fatal(err)
}
p := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: cfg.Namespace()},
}
stopChan := make(chan struct{})
err = wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&p, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
switch p.Status.Phase {
case corev1.PodSucceeded:
return true
case corev1.PodFailed:
stopChan <- struct{}{}
t.Fatal("pod status failed")
}
return false
}), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2), wait.WithStopChannel(stopChan))
if err != nil {
t.Fatal(err)
}
err = cfg.Client().Resources().Delete(ctx, pod.Pod)
return ctx
}).
Assess("recreate pod", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
for _, name := range []string{"sts-1-0", "sts-2-0"} {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: cfg.Namespace()},
}
err := cfg.Client().Resources().Delete(ctx, pod)
if err != nil {
t.Fatal(err)
}
}
return ctx
}).
Assess("wait for pod ready", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
for _, name := range []string{"sts-1-0", "sts-2-0"} {
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: cfg.Namespace()},
}
err := wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&pod, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
if !terwayTypes.PodUseENI(p) {
return false
}
if !p.DeletionTimestamp.IsZero() {
return false
}
if p.Annotations[terwayTypes.PodNetworking] != "fixed-ip" {
return false
}
if p.Status.Phase != corev1.PodRunning {
return false
}
ip := ctx.Value(fmt.Sprintf("%s-ip", pod.Name)).(string)
if ip != pod.Status.PodIP {
t.Fatalf("pod ip changed from %s to %s", ip, pod.Status.PodIP)
}
return true
}), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2))
if err != nil {
t.Fatal(err)
}
}
return ctx
}).
Assess("re-test connective", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
pod := utils.NewPod("client", cfg.Namespace(), "client", "l1b0k/echo:v0.0.1")
pod.Pod.Spec.Containers[0].Command = []string{"/usr/bin/echo", "-mode", "client", "-cases", "dns://aliyun.com,http://sts-1,http://sts-2,tcp://100.100.100.200:80"}
pod.Pod.Labels["fixed-ip"] = "true"
pod.Pod.Spec.RestartPolicy = corev1.RestartPolicyNever
err := cfg.Client().Resources().Create(ctx, pod.Pod)
if err != nil {
t.Fatal(err)
}
p := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: cfg.Namespace()},
}
stopChan := make(chan struct{})
err = wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&p, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
switch p.Status.Phase {
case corev1.PodSucceeded:
return true
case corev1.PodFailed:
stopChan <- struct{}{}
t.Fatal("pod status failed")
}
return false
}), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2), wait.WithStopChannel(stopChan))
if err != nil {
t.Fatal(err)
}
err = cfg.Client().Resources().Delete(ctx, pod.Pod)
if err != nil {
t.Fatal(err)
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
_ = config.Client().Resources().Delete(ctx, &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: "fixed-ip"},
})
for _, arg := range []string{"sts-1", "sts-2"} {
_ = config.Client().Resources().Delete(ctx, &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{Name: arg},
})
_ = config.Client().Resources().Delete(ctx, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{Name: arg},
})
}
return ctx
}).Feature()
testenv.Test(t, fixedIP)
}
// TestConnective run test cover several cases.
// pod to pod ,pod to service, dns resolve
func TestConnective(t *testing.T) {
// this test requires at least 2 vSwitches 2 nodes
vsw := strings.Split(vSwitchIDs, ",")
if len(vsw) < 2 {
return
}
crossVSwitch := features.New("Connective/MultiVSwitch").WithLabel("env", "trunking").
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
for _, args := range [][]interface{}{
{
"use-vsw-1",
[]string{vsw[0]},
},
{
"use-vsw-2",
[]string{vsw[1]},
},
} {
pn := newPodNetworking(args[0].(string), args[1].([]string), nil, &metav1.LabelSelector{
MatchLabels: map[string]string{args[0].(string): ""},
}, nil)
err := cfg.Client().Resources().Create(ctx, pn)
if err != nil && !errors.IsAlreadyExists(err) {
t.Fatal(err)
}
}
return ctx
}).
Assess("podNetworking status ready", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
for _, arg := range []string{"use-vsw-1", "use-vsw-2"} {
err := WaitPodNetworkingReady(arg, cfg.Client())
if err != nil {
t.Fatal(err)
}
}
return ctx
}).
Setup(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
ports := []corev1.ServicePort{
{
Name: "http",
Protocol: corev1.ProtocolTCP,
Port: int32(80),
TargetPort: intstr.FromInt(80),
},
}
for _, args := range [][]interface{}{
{
"pod-1",
"connective-test",
"l1b0k/echo:v0.0.1",
"use-vsw-1",
},
{
"pod-2",
"connective-test",
"l1b0k/echo:v0.0.1",
"use-vsw-2",
},
} {
pod := utils.NewPod(args[0].(string), cfg.Namespace(), args[1].(string), args[2].(string))
pod.Pod.Labels[args[3].(string)] = ""
err := cfg.Client().Resources().Create(ctx, pod.Pod)
if err != nil {
t.Error(err)
}
svc := pod.Expose("")
svc.Spec.Ports = ports
err = cfg.Client().Resources().Create(ctx, svc)
if err != nil {
t.Error(err)
}
}
return ctx
}).
Assess("wait for pod ready", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
for _, name := range []string{"pod-1", "pod-2"} {
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: cfg.Namespace()},
}
err := wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&pod, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
if !terwayTypes.PodUseENI(p) {
return false
}
return p.Status.Phase == corev1.PodRunning
}), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2))
if err != nil {
t.Fatal(err)
}
}
return ctx
}).
Assess("test connective", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context {
pod := utils.NewPod("client", cfg.Namespace(), "client", "l1b0k/echo:v0.0.1")
pod.Pod.Spec.Containers[0].Command = []string{"/usr/bin/echo", "-mode", "client", "-cases", "dns://aliyun.com,http://pod-1,http://pod-2,tcp://100.100.100.200:80"}
pod.Pod.Labels["use-vsw-1"] = ""
pod.Pod.Spec.RestartPolicy = corev1.RestartPolicyNever
err := cfg.Client().Resources().Create(ctx, pod.Pod)
if err != nil {
t.Fatal(err)
}
p := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "client", Namespace: cfg.Namespace()},
}
err = wait.For(conditions.New(cfg.Client().Resources()).ResourceMatch(&p, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
return p.Status.Phase == corev1.PodSucceeded
}), wait.WithInterval(time.Second), wait.WithTimeout(time.Minute*2))
if err != nil {
t.Fatal(err)
}
err = cfg.Client().Resources().Delete(ctx, pod.Pod)
if err != nil {
t.Fatal(err)
}
return ctx
}).
Teardown(func(ctx context.Context, t *testing.T, config *envconf.Config) context.Context {
for _, arg := range []string{"use-vsw-1", "use-vsw-2"} {
pn1 := &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: arg},
}
_ = config.Client().Resources().Delete(ctx, pn1)
}
return ctx
}).Feature()
testenv.Test(t, crossVSwitch)
}
func WaitPodNetworkingReady(name string, client klient.Client) error {
pn := v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: name},
}
err := wait.For(conditions.New(client.Resources()).ResourceMatch(&pn, func(object k8s.Object) bool {
p := object.(*v1beta1.PodNetworking)
if len(p.Status.VSwitches) != len(p.Spec.VSwitchOptions) {
return false
}
for _, s := range p.Status.VSwitches {
if s.Zone == "" {
return false
}
}
return p.Status.Status == v1beta1.NetworkingStatusReady
}), wait.WithTimeout(time.Minute*1))
time.Sleep(5 * time.Second)
return err
}
func WaitPodHaveValidateConfig(namespace, name string, client klient.Client, podNetworkingName string) error {
pod := corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
}
return wait.For(conditions.New(client.Resources()).ResourceMatch(&pod, func(object k8s.Object) bool {
p := object.(*corev1.Pod)
if !terwayTypes.PodUseENI(p) {
return false
}
if p.Annotations[terwayTypes.PodNetworking] != podNetworkingName {
return false
}
return true
}), wait.WithTimeout(time.Second*5))
}
func newPodNetworking(name string, vSwitchOptions, securityGroupIDs []string, podSelector, namespaceSelector *metav1.LabelSelector) *v1beta1.PodNetworking {
return &v1beta1.PodNetworking{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1beta1.PodNetworkingSpec{
Selector: v1beta1.Selector{
PodSelector: podSelector,
NamespaceSelector: namespaceSelector,
},
VSwitchOptions: vSwitchOptions,
SecurityGroupIDs: securityGroupIDs,
},
}
}
func | (namespace, name string, label, anno map[string]string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, Labels: label, Annotations: anno},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "foo",
Image: "registry.cn-hangzhou.aliyuncs.com/acs/pause:3.2",
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"/pause"},
},
},
TerminationGracePeriodSeconds: func(a int64) *int64 { return &a }(0),
},
}
}
| newPod |
search.tsx | import { useRouter } from 'next/dist/client/router';
import CustomLink from '../../../components/CustomLink';
export default function Search(): JSX.Element { | return (
<div>
<h1>routes / {code} / search</h1>
<CustomLink />
</div>
);
} | const router = useRouter();
const code = router.query.code;
|
client.go | package signal
import (
"bufio"
"encoding/json"
"io/ioutil"
"mime"
"os"
"os/exec"
"path"
"strings"
"github.com/whatsapp-signal-bridge/bridge"
"github.com/whatsapp-signal-bridge/logger"
"github.com/whatsapp-signal-bridge/signal/message"
)
type SignalClient interface {
bridge.Client
receiveMessages()
}
type client struct {
bridge.Client
botNumber string
receiverNumber string
logger logger.Logger
}
type SignalClientOptions struct {
Queue bridge.Queue
BotNumber string
ReceiverNumber string
}
func | (options SignalClientOptions) {
c := &client{
bridge.NewClient(options.Queue, "signal"),
options.BotNumber,
options.ReceiverNumber,
logger.NewLogger("signal", logger.LOG_LEVEL_DEBUG),
}
c.Subscribe(bridge.WHATSAPP_QUEUE, func(msg bridge.Message) {
c.Send(msg)
})
go c.receiveMessages()
}
func (c *client) Send(msg bridge.Message) (executed bool, err error) {
textMessage := ""
// if msg.ID() != "" {
// textMessage += "id:" + msg.ID() + "\n"
// }
if msg.ChatID() != "" {
textMessage += "chatid:" + msg.ChatID() + "\n"
}
if msg.ChatName() != "" {
textMessage += "chat:" + msg.ChatName() + "\n"
}
if msg.Sender() != "" {
textMessage += "sender:" + msg.Sender() + "\n"
}
if textMessage != "" {
textMessage += "---\n"
}
if msg.Quote() != nil {
textMessage += "▒ type: " + string(msg.Quote().MessageType+"\n")
if quoteText := msg.Quote().Body; quoteText != nil {
quoteTextParts := []string{}
for _, p := range strings.Split(*quoteText, "\n") {
quoteTextParts = append(quoteTextParts, "▒ "+p)
}
textMessage += strings.Join(quoteTextParts, "\n")
}
textMessage += "\n"
}
textMessage += msg.Body()
cmd := exec.Command("signal-cli", "-u", c.botNumber, "send", c.receiverNumber, "-m", textMessage)
if msg.Attachment() != nil && msg.Attachment().Bytes != nil {
extensions, err := mime.ExtensionsByType(msg.Attachment().Type)
if extensions != nil && err == nil {
filePath := path.Join(os.TempDir(), msg.ID()+extensions[0])
ioutil.WriteFile(filePath, msg.Attachment().Bytes, 0755)
cmd.Args = append(cmd.Args, "-a", filePath)
defer func() {
if err := os.Remove(filePath); err != nil {
c.logger.LogError("error removing file:", filePath)
}
}()
}
}
if err := cmd.Start(); err != nil {
return false, err
}
if err := cmd.Wait(); err != nil {
return false, err
}
return true, nil
}
func (c *client) receiveMessages() {
cmd := exec.Command("signal-cli", "-u", c.botNumber, "receive", "--json")
stdout, err := cmd.StdoutPipe()
if err != nil {
c.logger.LogError(err)
}
cmd.Start()
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
row := scanner.Text()
c.logger.LogDebug(row)
var signalCLIMessage message.SignalCLIMessage
err := json.Unmarshal([]byte(row), &signalCLIMessage)
if err != nil {
c.logger.LogError(err)
}
if msg, err := message.NewSignalBridgeMessage(&signalCLIMessage).Build(); err == nil {
if executed, err := c.ExecuteSkill(msg); !executed || err != nil {
c.Publish(bridge.SIGNAL_QUEUE, msg)
}
}
}
cmd.Wait()
go c.receiveMessages()
}
| StartClient |
enumeration.rs | /*
* Copyright 2018 The Starlark in Rust Authors.
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Implementation of `enum` function.
use crate as starlark;
use crate::{
environment::GlobalsBuilder,
values::{enumeration::EnumType, Value},
};
#[starlark_module]
pub fn global(builder: &mut GlobalsBuilder) |
#[cfg(test)]
mod tests {
use crate::assert;
#[test]
fn test_enum() {
assert::pass(
r#"
enum_type = enum("option1", "option2", True)
x = enum_type("option1")
assert_eq(x.value, "option1")
assert_eq(enum_type(True).value, True)
"#,
);
assert::fails(
r#"
enum_type = enum("option1", "option2", True)
enum_type(False)"#,
&["Unknown enum element", "`False`", "option1"],
);
assert::fails(
r#"
enum_type = enum("option1", "option2", True)
enum_type("option3")"#,
&["Unknown enum element", "`option3`"],
);
assert::fails(
r#"
enum_type = enum("option1", "option1")
enum_type("option3")"#,
&["distinct", "option1"],
);
assert::pass(
r#"
enum_type = enum("option1","option2")
def foo(x: enum_type.type) -> "enum_type":
return x
foo(enum_type("option1"))"#,
);
assert::pass(
r#"
v = [enum("option1","option2")]
def foo(x: v[0].type) -> "enum":
return x
foo(v[0]("option1"))"#,
);
assert::pass(
r#"
enum_type = enum("option1","option2")
assert_eq([enum_type[i].value for i in range(len(enum_type))], ["option1","option2"])
assert_eq(enum_type("option2").index, 1)"#,
);
assert::pass(
r#"
enum_type = enum("option1","option2")
x = enum_type("option1")
assert_eq(str(enum_type), "enum(\"option1\", \"option2\")")
assert_eq(str(x), "\"option1\"")
"#,
);
}
}
| {
/// Creates a enumeration.
///
/// `enum` creates an enumeration type, listing the possible values.
///
/// Examples:
///
/// ```
/// # starlark::assert::is_true(r#"
/// enum_type = enum("Red", "Green", "Blue")
/// val = enum_type("Red")
/// val.value == "Red"
/// # "#);
/// ```
fn r#enum(args: Vec<Value>) -> Value<'v> {
// Every Value must either be a field or a value (the type)
EnumType::new(args, heap)
}
} |
dnn.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
_CENTERED_BIAS_WEIGHT = "centered_bias_weight"
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `_Head` instance.
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use the Adagrad
optimizer with a default learning rate of 0.05.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
head = params["head"]
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or "Adagrad"
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
parent_scope = "dnn"
input_layer_partitioner = (partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas, min_slice_size=64 << 20))
input_layer_scope = parent_scope + "/input_from_feature_columns"
with variable_scope.variable_scope(
input_layer_scope,
values=list(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
parent_scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
parent_scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[parent_scope],
scope=scope)
_add_hidden_layer_summary(logits, scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope, input_layer_scope)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging the training_loss.
summaries=[])
return head.head_ops(features, labels, mode, _train_op_fn, logits)
class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1. Note: Class labels are integers representing
the class index (i.e. values from 0 to n_classes-1). For arbitrary
label values (e.g. string labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and | learning rate for the embedding variables.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._hidden_units = hidden_units
self._feature_columns = tuple(feature_columns or [])
self._enable_centered_bias = enable_centered_bias
self._estimator = estimator.Estimator(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units":
hidden_units,
"feature_columns":
self._feature_columns,
"optimizer":
optimizer,
"activation_fn":
activation_fn,
"dropout":
dropout,
"gradient_clip_norm":
gradient_clip_norm,
"embedding_lr_multipliers":
embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
"""See trainable.Trainable. Note: Labels must be integer class indices."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""See evaluable.Evaluable. Note: Labels must be integer class indices."""
return self._estimator.evaluate(
x=x,
y=y,
input_fn=input_fn,
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self,
x=None,
input_fn=None,
batch_size=None,
as_iterable=True):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def _get_predict_ops(self, features):
"""See `Estimator` class."""
# This method exists to support some models that use the legacy interface.
# pylint: disable=protected-access
return self._estimator._get_predict_ops(features)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return self._estimator.get_variable_names()
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
`Tensor` object.
"""
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@experimental
def export_savedmodel(self,
export_dir_base,
input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
return self._estimator.export_savedmodel(
export_dir_base,
input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep)
@property
def model_dir(self):
return self._estimator.model_dir
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
hiddenlayer_weights = [
self.get_variable_value("dnn/hiddenlayer_%d/weights" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_weights = [self.get_variable_value("dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [
self.get_variable_value("dnn/hiddenlayer_%d/biases" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_bias = [self.get_variable_value("dnn/logits/biases")]
if self._enable_centered_bias:
centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)]
else:
centered_bias = []
return hiddenlayer_bias + logits_bias + centered_bias
@property
def config(self):
return self._estimator.config
class DNNRegressor(evaluable.Evaluable, trainable.Trainable):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
label_dimension=1,
embedding_lr_multipliers=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_dimension: Dimension of the label for multilabels. Defaults to 1.
embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
Returns:
A `DNNRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
self._estimator = estimator.Estimator(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units":
hidden_units,
"feature_columns":
self._feature_columns,
"optimizer":
optimizer,
"activation_fn":
activation_fn,
"dropout":
dropout,
"gradient_clip_norm":
gradient_clip_norm,
"embedding_lr_multipliers":
embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""See evaluable.Evaluable."""
# TODO(zakaria): remove once deprecation is finished (b/31229024)
custom_metrics = {}
if metrics:
for key, metric in six.iteritems(metrics):
if (not isinstance(metric, metric_spec.MetricSpec) and
not isinstance(key, tuple)):
custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric
else:
custom_metrics[key] = metric
return self._estimator.evaluate(
x=x,
y=y,
input_fn=input_fn,
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=custom_metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def _get_predict_ops(self, features):
"""See `Estimator` class."""
# This method exists to support some models that use the legacy interface.
# pylint: disable=protected-access
return self._estimator._get_predict_ops(features)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return self._estimator.get_variable_names()
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
`Tensor` object.
"""
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=signature_fn or export.regression_signature_fn,
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
def model_dir(self):
return self._estimator.model_dir
@property
def config(self):
return self._estimator.config | returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with |
atomicfiles.go | package atomicfiles
import (
"io/ioutil"
"os"
"path"
)
func | (name string, body []byte) error {
dir, _ := path.Split(name)
tmpFile, err := ioutil.TempFile(dir, ".tmp")
if err != nil {
return err
}
tmpName := tmpFile.Name()
_, err = tmpFile.Write(body)
if err != nil {
return err
}
err = tmpFile.Sync()
if err != nil {
return err
}
err = tmpFile.Close()
if err != nil {
return err
}
err = os.Rename(tmpName, name)
if err != nil {
return err
}
return nil
}
| WriteFile |
views.py | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloViewSet(viewsets.ViewSet):
"""Test ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self,request):
"""teste"""
a_viewset = [
'Use esta metodo para (list,recuperar,atualizar,atualizar um campo',
'Automaticamente mapeia as urls usando Roters',
'Proporciona mais funcionalidades com menos codigo',
]
return(Response({'message':'Hello','a_viewset':a_viewset}))
def create(self,request):
"""Cria uma nova menssagem"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
messagem=f'Hello {name}'
return Response({'messagem':messagem})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self,request, pk = None):
"""Retorna um objeto pela ID"""
return Response({'http_method':'PUT'})
def update(self, request, pk = None):
"""Atualiza um objeto"""
return Response({'http_method':'PUT'})
def partial_update(self,request,pk = None):
"""Atualiza parte de um objeto"""
return Response({'http_method':'PATCH'})
def destroy(self, request, pk = None):
"""Remove um objeto"""
return Response({'http_method':'DELETE'})
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self,request,format=None):
"""Returna uma lista de funções da APIView"""
an_apiview = [
'Usando Http metodos (get,post,put,delete,patch)',
'É similar a uma tradiciona view do django',
'te da o controle da logica da aplicação',
'e mapea manualmente as urls',
]
return Response({'message':'hello','an_apiview':an_apiview})
def post(self,request):
"""cria uma messagem de vem vindo com o nome"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self,request,pk = None):
"""Atualizando um objeto""" | def patch(self,request, pk = None):
"""Atualizando um campo de um objeto"""
return Response({'metodo':'Patch'})
def delete(self, request, pk = None):
"""Deletando um objeto"""
return Response({'methodo':'Delete'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Cria e atualiza um usuario """
serializer_class = serializers.UserProfileSerizalizer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fileds = ('name','email',)
class UserLoginApiView(ObtainAuthToken):
"""Cria um token autenticado para o usuario"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Registra e atualiza feed de usuario autenticado"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permissions_classes =(permissions.UpdateOwnStatus,IsAuthenticated)
def perform_create(self, serializer):
"""seta o usuario do perfil para o usuario logado"""
serializer.save(user_profile=self.request.user) | return Response({'metodo':'put'})
|
mod.rs | // Built-in deps
use std::{collections::VecDeque, convert::TryFrom, str::FromStr};
// External imports
use diesel::dsl::{insert_into, update};
use diesel::prelude::*;
use num::BigUint;
use web3::types::{H256, U256};
// Workspace imports
use models::{
ethereum::{ETHOperation, InsertedOperationResponse, OperationType},
Operation,
};
// Local imports
use self::records::{
ETHBinding, ETHParams, ETHStats, ETHTxHash, NewETHBinding, NewETHOperation, NewETHTxHash,
StorageETHOperation,
};
use crate::chain::operations::records::StoredOperation;
use crate::schema::*;
use crate::utils::StoredBigUint;
use crate::StorageProcessor;
pub mod records;
/// Ethereum schema is capable of storing the information about the
/// interaction with the Ethereum blockchain (mainly the list of sent
/// Ethereum transactions).
#[derive(Debug)]
pub struct EthereumSchema<'a>(pub &'a StorageProcessor);
impl<'a> EthereumSchema<'a> {
/// Loads the list of operations that were not confirmed on Ethereum,
/// each operation has a list of sent Ethereum transactions.
pub fn load_unconfirmed_operations(&self) -> QueryResult<VecDeque<ETHOperation>> {
// Load the operations with the associated Ethereum transactions
// from the database.
// Here we obtain a sequence of one-to-one mappings (ETH tx) -> (operation ID).
// Each Ethereum transaction can have no more than one associated operation, and each
// operation is associated with exactly one Ethereum transaction. Note that there may
// be ETH transactions without an operation (e.g. `completeWithdrawals` call), but for
// every operation always there is an ETH transaction.
self.0.conn().transaction(|| {
let raw_ops: Vec<(
StorageETHOperation,
Option<ETHBinding>,
Option<StoredOperation>,
)> = eth_operations::table
.left_join(
eth_ops_binding::table.on(eth_operations::id.eq(eth_ops_binding::eth_op_id)),
)
.left_join(operations::table.on(operations::id.eq(eth_ops_binding::op_id)))
.filter(eth_operations::confirmed.eq(false))
.order(eth_operations::id.asc())
.load(self.0.conn())?;
// Create a vector for the expected output.
let mut ops: VecDeque<ETHOperation> = VecDeque::with_capacity(raw_ops.len());
// Transform the `StoredOperation` to `Operation` and `StoredETHOperation` to `ETHOperation`.
for (eth_op, _, raw_op) in raw_ops {
// Load the stored txs hashes ordered by their ID,
// so the latest added hash will be the last one in the list.
let eth_tx_hashes: Vec<ETHTxHash> = eth_tx_hashes::table
.filter(eth_tx_hashes::eth_op_id.eq(eth_op.id))
.order_by(eth_tx_hashes::id.asc())
.load(self.0.conn())?;
assert!(
!eth_tx_hashes.is_empty(),
"No hashes stored for the Ethereum operation"
);
// If there is an operation, convert it to the `Operation` type.
let op = if let Some(raw_op) = raw_op {
Some(raw_op.into_op(self.0)?)
} else {
None
};
// Convert the fields into expected format.
let op_type = OperationType::from_str(eth_op.op_type.as_ref())
.expect("Stored operation type must have a valid value");
let last_used_gas_price =
U256::from_str(ð_op.last_used_gas_price.0.to_string()).unwrap();
let used_tx_hashes = eth_tx_hashes
.iter()
.map(|entry| H256::from_slice(&entry.tx_hash))
.collect();
let final_hash = eth_op.final_hash.map(|hash| H256::from_slice(&hash));
let eth_op = ETHOperation {
id: eth_op.id,
op_type,
op,
nonce: eth_op.nonce.into(),
last_deadline_block: eth_op.last_deadline_block as u64,
last_used_gas_price,
used_tx_hashes,
encoded_tx_data: eth_op.raw_tx,
confirmed: eth_op.confirmed,
final_hash,
};
ops.push_back(eth_op);
}
Ok(ops)
})
}
/// Loads the operations which were stored in `operations` table, but not
/// in the `eth_operations`. This method is intended to be used after relaunch
/// to synchronize `eth_sender` state, as operations are sent to the `eth_sender`
/// only once.
pub fn load_unprocessed_operations(&self) -> QueryResult<Vec<Operation>> {
let raw_ops: Vec<(StoredOperation, Option<ETHBinding>)> =
self.0.conn().transaction(|| {
operations::table
.left_join(eth_ops_binding::table.on(operations::id.eq(eth_ops_binding::op_id)))
.filter(operations::confirmed.eq(false))
.order(operations::id.asc())
.load(self.0.conn())
})?;
let operations: Vec<Operation> = raw_ops
.into_iter()
.filter_map(|(raw_op, maybe_binding)| {
// We are only interested in operations unknown to `eth_operations` table.
if maybe_binding.is_some() {
None
} else {
Some(raw_op.into_op(self.0).expect("Can't convert the operation"))
}
})
.collect();
Ok(operations)
}
/// Stores the sent (but not confirmed yet) Ethereum transaction in the database.
/// Returns the `ETHOperation` object containing the assigned nonce and operation ID.
pub fn save_new_eth_tx(
&self,
op_type: OperationType,
op_id: Option<i64>,
last_deadline_block: i64,
last_used_gas_price: BigUint,
raw_tx: Vec<u8>,
) -> QueryResult<InsertedOperationResponse> {
self.0.conn().transaction(|| {
// It's important to assign nonce within the same db transaction
// as saving the operation to avoid the state divergence.
let nonce = self.get_next_nonce()?;
// Create and insert the operation.
let operation = NewETHOperation {
op_type: op_type.to_string(),
nonce,
last_deadline_block,
last_used_gas_price: last_used_gas_price.into(),
raw_tx,
};
let inserted_tx = insert_into(eth_operations::table)
.values(&operation)
.returning(eth_operations::id)
.get_results(self.0.conn())?;
assert_eq!(
inserted_tx.len(),
1,
"Wrong amount of updated rows (eth_operations)"
);
// Obtain the operation ID for the follow-up queried.
let eth_op_id = inserted_tx[0];
// // Add a hash entry.
// let hash_entry = NewETHTxHash {
// eth_op_id,
// tx_hash: hash.as_bytes().to_vec(),
// };
// let inserted_hashes_rows = insert_into(eth_tx_hashes::table)
// .values(&hash_entry)
// .execute(self.0.conn())?;
// assert_eq!(
// inserted_hashes_rows, 1,
// "Wrong amount of updated rows (eth_tx_hashes)"
// );
// If the operation ID was provided, we should also insert a binding entry.
if let Some(op_id) = op_id {
let binding = NewETHBinding { op_id, eth_op_id };
insert_into(eth_ops_binding::table)
.values(&binding)
.execute(self.0.conn())?;
}
// Update the stored stats.
self.report_created_operation(op_type)?;
// Return the assigned ID and nonce.
let response = InsertedOperationResponse {
id: eth_op_id,
nonce: nonce.into(),
};
Ok(response)
})
}
/// Retrieves the Ethereum operation ID given the tx hash.
fn get_eth_op_id(&self, hash: &H256) -> QueryResult<i64> {
let hash_entry = eth_tx_hashes::table
.filter(eth_tx_hashes::tx_hash.eq(hash.as_bytes()))
.first::<ETHTxHash>(self.0.conn())?;
Ok(hash_entry.eth_op_id)
}
/// Adds a tx hash entry associated with some Ethereum operation to the database.
pub fn add_hash_entry(&self, eth_op_id: i64, hash: &H256) -> QueryResult<()> {
self.0.conn().transaction(|| {
// Insert the new hash entry.
let hash_entry = NewETHTxHash {
eth_op_id,
tx_hash: hash.as_bytes().to_vec(),
};
let inserted_hashes_rows = insert_into(eth_tx_hashes::table)
.values(&hash_entry)
.execute(self.0.conn())?;
assert_eq!(
inserted_hashes_rows, 1,
"Wrong amount of updated rows (eth_tx_hashes)"
);
Ok(())
})
}
/// Updates the Ethereum operation by adding a new tx data.
/// The new deadline block / gas value are placed instead of old values to the main entry.
pub fn update_eth_tx(
&self,
eth_op_id: i64,
new_deadline_block: i64,
new_gas_value: BigUint,
) -> QueryResult<()> {
self.0.conn().transaction(|| {
// Update the stored tx.
update(eth_operations::table.filter(eth_operations::id.eq(eth_op_id)))
.set((
eth_operations::last_used_gas_price.eq(StoredBigUint(new_gas_value)),
eth_operations::last_deadline_block.eq(new_deadline_block),
))
.execute(self.0.conn())?;
Ok(())
})
}
/// Updates the stats counter with the new operation reported.
/// This method should be called once **per operation**. It means that if transaction
/// for some operation was stuck, and another transaction was created for it, this method
/// **should not** be invoked.
///
/// This method expects the database to be initially prepared with inserting the actual
/// stats values. Currently the script `db-insert-eth-data.sh` is responsible for that
/// and it's invoked within `db-reset` subcommand.
fn report_created_operation(&self, operation_type: OperationType) -> QueryResult<()> {
self.0.conn().transaction(|| {
let mut current_stats: ETHParams = eth_parameters::table.first(self.0.conn())?;
// Increase the only one type of operations.
match operation_type {
OperationType::Commit => {
current_stats.commit_ops += 1;
}
OperationType::Verify => {
current_stats.verify_ops += 1;
}
OperationType::Withdraw => {
current_stats.withdraw_ops += 1;
}
};
// Update the stored stats.
update(eth_parameters::table.filter(eth_parameters::id.eq(true)))
.set((
eth_parameters::commit_ops.eq(current_stats.commit_ops),
eth_parameters::verify_ops.eq(current_stats.verify_ops),
eth_parameters::withdraw_ops.eq(current_stats.withdraw_ops),
))
.execute(self.0.conn())?;
Ok(())
})
}
/// Updates the stored gas price limit used by GasAdjuster.
///
/// This method expects the database to be initially prepared with inserting the actual
/// gas limit value. Currently the script `db-insert-eth-data.sh` is responsible for that
/// and it's invoked within `db-reset` subcommand.
pub fn update_gas_price_limit(&self, gas_price_limit: U256) -> QueryResult<()> {
self.0.conn().transaction(|| {
let gas_price_limit: i64 =
i64::try_from(gas_price_limit).expect("Can't convert U256 to i64");
// Update the stored gas price limit.
update(eth_parameters::table.filter(eth_parameters::id.eq(true)))
.set(eth_parameters::gas_price_limit.eq(gas_price_limit))
.execute(self.0.conn())?;
Ok(())
})
}
pub fn load_gas_price_limit(&self) -> QueryResult<U256> {
let params: ETHParams = eth_parameters::table.first::<ETHParams>(self.0.conn())?;
let gas_price_limit =
U256::try_from(params.gas_price_limit).expect("Negative gas limit value stored in DB");
Ok(gas_price_limit)
}
/// Loads the stored Ethereum operations stats.
pub fn load_stats(&self) -> QueryResult<ETHStats> {
eth_parameters::table
.first::<ETHParams>(self.0.conn())
.map(ETHStats::from)
}
/// Marks the stored Ethereum transaction as confirmed (and thus the associated `Operation`
/// is marked as confirmed as well).
pub fn confirm_eth_tx(&self, hash: &H256) -> QueryResult<()> {
self.0.conn().transaction(|| {
let eth_op_id = self.get_eth_op_id(hash)?;
// Set the `confirmed` and `final_hash` field of the entry.
let updated: Vec<i64> =
update(eth_operations::table.filter(eth_operations::id.eq(eth_op_id)))
.set((
eth_operations::confirmed.eq(true),
eth_operations::final_hash.eq(Some(hash.as_bytes().to_vec())),
))
.returning(eth_operations::id)
.get_results(self.0.conn())?;
assert_eq!(
updated.len(),
1,
"Unexpected amount of operations were confirmed"
);
let eth_op_id = updated[0];
let binding: Option<ETHBinding> = eth_ops_binding::table
.filter(eth_ops_binding::eth_op_id.eq(eth_op_id))
.first::<ETHBinding>(self.0.conn())
.optional()?;
// If there is a ZKSync operation, mark it as confirmed as well.
if let Some(binding) = binding {
let op = operations::table
.filter(operations::id.eq(binding.op_id))
.first::<StoredOperation>(self.0.conn())?;
update(operations::table.filter(operations::id.eq(op.id)))
.set(operations::confirmed.eq(true))
.execute(self.0.conn())
.map(drop)?;
}
Ok(())
})
}
/// Obtains the next nonce to use and updates the corresponding entry in the database
/// for the next invocation.
///
/// This method expects the database to be initially prepared with inserting the actual
/// nonce value. Currently the script `db-insert-eth-data.sh` is responsible for that
/// and it's invoked within `db-reset` subcommand.
pub(crate) fn | (&self) -> QueryResult<i64> {
let old_nonce: ETHParams = eth_parameters::table.first(self.0.conn())?;
let new_nonce_value = old_nonce.nonce + 1;
update(eth_parameters::table.filter(eth_parameters::id.eq(true)))
.set(eth_parameters::nonce.eq(new_nonce_value))
.execute(self.0.conn())?;
let old_nonce_value = old_nonce.nonce;
Ok(old_nonce_value)
}
/// Method that internally initializes the `eth_parameters` table.
/// Since in db tests the database is empty, we must provide a possibility
/// to initialize required db fields.
#[cfg(test)]
pub fn initialize_eth_data(&self) -> QueryResult<()> {
#[derive(Debug, Insertable)]
#[table_name = "eth_parameters"]
pub struct NewETHParams {
pub nonce: i64,
pub gas_price_limit: i64,
pub commit_ops: i64,
pub verify_ops: i64,
pub withdraw_ops: i64,
}
let old_params: Option<ETHParams> =
eth_parameters::table.first(self.0.conn()).optional()?;
if old_params.is_none() {
let params = NewETHParams {
nonce: 0,
gas_price_limit: 400 * 10e9 as i64,
commit_ops: 0,
verify_ops: 0,
withdraw_ops: 0,
};
insert_into(eth_parameters::table)
.values(¶ms)
.execute(self.0.conn())?;
}
Ok(())
}
}
| get_next_nonce |
http_test.go | package http
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"sync"
"testing"
"time"
)
func TestNewClient(t *testing.T) {
c := NewClient(time.Duration(60)*time.Second, 5)
if c.maxRetries != 5 || c.hc.Timeout != time.Duration(60)*time.Second {
t.Error("could not create new client")
}
}
func TestGetHTTPStatus(t *testing.T) {
mockHTTP := NewTestClient(func(req *http.Request) *http.Response {
reqStatusCode := strings.Split(req.URL.String(), "status/")
resStatusCode, _ := strconv.Atoi(reqStatusCode[1])
return &http.Response{
StatusCode: resStatusCode,
Body: ioutil.NopCloser(bytes.NewBufferString("Some Payload")),
Header: make(http.Header),
}
})
c := Client{hc: *mockHTTP, maxRetries: 1}
statusCodes := []int{200, 201, 202, 203, 204, 205, 206, 304, 307, 308, 400, 401, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 421, 426, 428, 429, 430, 431, 451, 500, 501, 502, 503, 504, 505} // 100, 101, 301, 302, 303 not tested
var wg sync.WaitGroup
for _, statusCode := range statusCodes {
wg.Add(1)
func(actualStatusCode int) {
url := fmt.Sprintf("https://mock/status/%v", actualStatusCode)
statusCode := c.GetHTTPStatus(url)
if statusCode != actualStatusCode {
t.Errorf("Expected status code %v, got %v\n", actualStatusCode, statusCode)
}
wg.Done()
}(statusCode)
}
wg.Wait()
}
func TestPayload(t *testing.T) {
payload := "Some Payload"
mockHTTP := NewTestClient(func(req *http.Request) *http.Response {
return &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewBufferString(payload)),
Header: make(http.Header),
}
})
c := Client{hc: *mockHTTP, maxRetries: 1}
req, err := NewGetRequest("https://mock/anything/123")
if err != nil {
t.Error(err)
}
response, err := c.Do(req)
if err != nil {
t.Error(err)
}
if bytes.Compare(response.payload, []byte(payload)) != 0 {
t.Errorf("Expected payload %v, got %v\n", payload, response.payload)
}
if response.TimeToCrawl() == time.Duration(0) {
t.Errorf("Expected longer time to crawl than 0 ns\n")
}
}
func TestGet(t *testing.T) {
mockHTTP := NewTestClient(func(req *http.Request) *http.Response {
return &http.Response{
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewBufferString("0")),
Header: make(http.Header),
}
})
c := Client{hc: *mockHTTP, maxRetries: 1}
url := "https://mock/status/200"
resp1, err := c.Get(url)
if err != nil {
t.Error(err)
}
req, err := NewGetRequest(url)
if err != nil {
t.Error(err)
}
resp2, err := c.Do(req)
if err != nil {
t.Error(err)
}
if resp1.StatusCode() != resp2.StatusCode() {
t.Errorf("Expected status code %v, got %v\n", resp1.statusCode, resp2.statusCode)
}
if resp1.Retries() != resp2.Retries() {
t.Errorf("Expected retries %v, got %v\n", resp1.retries, resp2.retries)
}
if bytes.Compare(resp1.Payload(), resp2.Payload()) != 0 {
t.Errorf("Expected status code %v, got %v\n", resp1.payload, resp2.payload)
}
_, err = c.Get("")
if err != nil {
t.Error("Expected error due to wrong URL, got no error")
}
}
func | (t *testing.T) {
mockHTTP := NewTestClient(func(req *http.Request) *http.Response {
return &http.Response{
StatusCode: 501,
Body: ioutil.NopCloser(bytes.NewBufferString(`Server Error`)),
Header: http.Header{"Retry-After": []string{"1"}},
}
})
retries := uint(1)
c := Client{hc: *mockHTTP, maxRetries: 1}
req, err := NewGetRequest("https://mock/status/501")
if err != nil {
t.Error(err)
}
response, _ := c.Do(req) // 501 and error is expected
if response.StatusCode() != 501 {
t.Errorf("Expected status code %v , got %v\n", 501, response.StatusCode())
}
if response.Retries() != retries {
t.Errorf("Expected %v retrys, got %v\n", retries, response.Retries())
}
}
type RoundTripFunc func(req *http.Request) *http.Response
func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
return f(req), nil
}
func NewTestClient(fn RoundTripFunc) *http.Client {
return &http.Client{
Transport: RoundTripFunc(fn),
}
}
| TestRetry |
cargo.rs | use crate::config::*;
use crate::errors::RunError;
use crate::path_utils::get_source_walker;
use cargo_metadata::{diagnostic::DiagnosticLevel, CargoOpt, Message, Metadata, MetadataCommand};
use lazy_static::lazy_static;
use regex::Regex;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::env;
use std::fs::{read_dir, read_to_string, remove_dir_all, File};
use std::io;
use std::io::{BufRead, BufReader};
use std::path::{Component, Path, PathBuf};
use std::process::{Command, Stdio};
use toml::Value;
use tracing::{error, info, trace, warn};
use walkdir::{DirEntry, WalkDir};
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
enum Channel {
Stable,
Beta,
Nightly,
}
#[derive(Clone, Debug, Eq, PartialEq, Hash, Ord, PartialOrd)]
struct CargoVersionInfo {
major: usize,
minor: usize,
channel: Channel,
year: usize,
month: usize,
day: usize,
}
impl CargoVersionInfo {
fn supports_llvm_cov(&self) -> bool {
self.minor >= 50 && self.channel == Channel::Nightly
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)]
pub struct TestBinary {
path: PathBuf,
ty: Option<RunType>,
cargo_dir: Option<PathBuf>,
pkg_name: Option<String>,
pkg_version: Option<String>,
pkg_authors: Option<Vec<String>>,
should_panic: bool,
}
#[derive(Clone, Debug)]
struct DocTestBinaryMeta {
prefix: String,
line: usize,
}
impl TestBinary {
pub fn new(path: PathBuf, ty: Option<RunType>) -> Self {
Self {
path,
ty,
pkg_name: None,
pkg_version: None,
pkg_authors: None,
cargo_dir: None,
should_panic: false,
}
}
pub fn path(&self) -> &Path {
&self.path
}
pub fn run_type(&self) -> Option<RunType> {
self.ty
}
pub fn manifest_dir(&self) -> &Option<PathBuf> {
&self.cargo_dir
}
pub fn pkg_name(&self) -> &Option<String> {
&self.pkg_name
}
pub fn pkg_version(&self) -> &Option<String> {
&self.pkg_version
}
pub fn pkg_authors(&self) -> &Option<Vec<String>> |
/// Should be `false` for normal tests and for doctests either `true` or
/// `false` depending on the test attribute
pub fn should_panic(&self) -> bool {
self.should_panic
}
}
impl DocTestBinaryMeta {
fn new<P: AsRef<Path>>(test: P) -> Option<Self> {
if let Some(Component::Normal(folder)) = test.as_ref().components().nth_back(1) {
let temp = folder.to_string_lossy();
let file_end = temp.rfind("rs").map(|i| i + 2)?;
let end = temp.rfind('_')?;
if end > file_end + 1 {
let line = temp[(file_end + 1)..end].parse::<usize>().ok()?;
Some(Self {
prefix: temp[..file_end].to_string(),
line,
})
} else {
None
}
} else {
None
}
}
}
lazy_static! {
static ref CARGO_VERSION_INFO: Option<CargoVersionInfo> = {
let version_info = Regex::new(
r"cargo (\d)\.(\d+)\.\d+([\-betanightly]*) \([[:alnum:]]+ (\d{4})-(\d{2})-(\d{2})\)",
)
.unwrap();
Command::new("cargo")
.arg("--version")
.output()
.map(|x| {
let s = String::from_utf8_lossy(&x.stdout);
if let Some(cap) = version_info.captures(&s) {
let major = cap[1].parse().unwrap();
let minor = cap[2].parse().unwrap();
// We expect a string like `cargo 1.50.0-nightly (a0f433460 2020-02-01)
// the version number either has `-nightly` `-beta` or empty for stable
let channel = match &cap[3] {
"-nightly" => Channel::Nightly,
"-beta" => Channel::Beta,
_ => Channel::Stable,
};
let year = cap[4].parse().unwrap();
let month = cap[5].parse().unwrap();
let day = cap[6].parse().unwrap();
Some(CargoVersionInfo {
major,
minor,
channel,
year,
month,
day,
})
} else {
None
}
})
.unwrap_or(None)
};
}
pub fn get_tests(config: &Config) -> Result<Vec<TestBinary>, RunError> {
let mut result = vec![];
if config.force_clean {
let cleanup_dir = if config.release {
config.target_dir().join("debug")
} else {
config.target_dir().join("release")
};
info!("Cleaning project");
if cleanup_dir.exists() {
if let Err(e) = remove_dir_all(cleanup_dir) {
error!("Cargo clean failed: {}", e);
}
}
}
let manifest = match config.manifest.as_path().to_str() {
Some(s) => s,
None => "Cargo.toml",
};
let metadata = MetadataCommand::new()
.manifest_path(manifest)
.features(CargoOpt::AllFeatures)
.exec()
.map_err(|e| RunError::Cargo(e.to_string()))?;
for ty in &config.run_types {
run_cargo(&metadata, manifest, config, Some(*ty), &mut result)?;
}
if config.has_named_tests() {
run_cargo(&metadata, manifest, config, None, &mut result)?
} else if config.run_types.is_empty() {
let ty = if config.command == Mode::Test {
Some(RunType::Tests)
} else {
None
};
run_cargo(&metadata, manifest, config, ty, &mut result)?;
}
Ok(result)
}
fn run_cargo(
metadata: &Metadata,
manifest: &str,
config: &Config,
ty: Option<RunType>,
result: &mut Vec<TestBinary>,
) -> Result<(), RunError> {
let mut cmd = create_command(manifest, config, ty);
if ty != Some(RunType::Doctests) {
cmd.stdout(Stdio::piped());
} else {
clean_doctest_folder(&config.doctest_dir());
cmd.stdout(Stdio::null());
}
trace!("Running command {:?}", cmd);
let mut child = cmd.spawn().map_err(|e| RunError::Cargo(e.to_string()))?;
if ty != Some(RunType::Doctests) {
let mut package_ids = vec![];
let reader = std::io::BufReader::new(child.stdout.take().unwrap());
let mut error = None;
for msg in Message::parse_stream(reader) {
match msg {
Ok(Message::CompilerArtifact(art)) => {
if let Some(path) = art.executable {
if !art.profile.test && config.command == Mode::Test {
continue;
}
result.push(TestBinary::new(PathBuf::from(path), ty));
package_ids.push(art.package_id.clone());
}
}
Ok(Message::CompilerMessage(m)) => match m.message.level {
DiagnosticLevel::Error | DiagnosticLevel::Ice => {
let msg = format!("{}: {}", m.target.name, m.message.message);
error = Some(RunError::TestCompile(msg));
break;
}
_ => {}
},
Err(e) => {
error!("Error parsing cargo messages {}", e);
}
_ => {}
}
}
let status = child.wait().unwrap();
if let Some(error) = error {
return Err(error);
}
if !status.success() {
return Err(RunError::Cargo("cargo run failed".to_string()));
};
for (res, package) in result.iter_mut().zip(package_ids.iter()) {
let package = &metadata[package];
res.cargo_dir = package
.manifest_path
.parent()
.map(|x| PathBuf::from(x.to_path_buf()));
res.pkg_name = Some(package.name.clone());
res.pkg_version = Some(package.version.to_string());
res.pkg_authors = Some(package.authors.clone());
}
child.wait().map_err(|e| RunError::Cargo(e.to_string()))?;
} else {
// need to wait for compiling to finish before getting doctests
// also need to wait with output to ensure the stdout buffer doesn't fill up
let out = child
.wait_with_output()
.map_err(|e| RunError::Cargo(e.to_string()))?;
if !out.status.success() {
error!("Building doctests failed");
return Err(RunError::Cargo("Building doctest failed".to_string()));
}
let walker = WalkDir::new(&config.doctest_dir()).into_iter();
let dir_entries = walker
.filter_map(|e| e.ok())
.filter(|e| matches!(e.metadata(), Ok(ref m) if m.is_file() && m.len() != 0))
.collect::<Vec<_>>();
let should_panics = get_panic_candidates(&dir_entries, config);
for dt in &dir_entries {
let mut tb = TestBinary::new(dt.path().to_path_buf(), ty);
let mut current_dir = dt.path();
loop {
if current_dir.is_dir() && current_dir.join("Cargo.toml").exists() {
tb.cargo_dir = Some(current_dir.to_path_buf());
break;
}
match current_dir.parent() {
Some(s) => {
current_dir = s;
}
None => break,
}
}
// Now to do my magic!
if let Some(meta) = DocTestBinaryMeta::new(dt.path()) {
if let Some(lines) = should_panics.get(&meta.prefix) {
tb.should_panic |= lines.contains(&meta.line);
}
}
result.push(tb);
}
}
Ok(())
}
fn convert_to_prefix(p: &Path) -> Option<String> {
// Need to go from directory after last one with Cargo.toml
let convert_name = |p: &Path| {
if let Some(s) = p.file_name() {
s.to_str().map(|x| x.replace('.', "_")).unwrap_or_default()
} else {
String::new()
}
};
let mut buffer = vec![convert_name(p)];
let mut parent = p.parent();
while let Some(path_temp) = parent {
if !path_temp.join("Cargo.toml").exists() {
buffer.insert(0, convert_name(path_temp));
} else {
break;
}
parent = path_temp.parent();
}
if buffer.is_empty() {
None
} else {
Some(buffer.join("_"))
}
}
fn is_prefix_match(prefix: &str, entry: &Path) -> bool {
convert_to_prefix(entry)
.map(|s| s.contains(prefix))
.unwrap_or(false)
}
/// This returns a map of the string prefixes for the file in the doc test and a list of lines
/// which contain the string `should_panic` it makes no guarantees that all these lines are a
/// doctest attribute showing panic behaviour (but some of them will be)
///
/// Currently all doctest files take the pattern of `{name}_{line}_{number}` where name is the
/// path to the file with directory separators and dots replaced with underscores. Therefore
/// each name could potentially map to many files as `src_some_folder_foo_rs_0_1` could go to
/// `src/some/folder_foo.rs` or `src/some/folder/foo.rs` here we're going to work on a heuristic
/// that any matching file is good because we can't do any better
fn get_panic_candidates(tests: &[DirEntry], config: &Config) -> HashMap<String, Vec<usize>> {
let mut result = HashMap::new();
let mut checked_files = HashSet::new();
let root = config.root();
for test in tests {
if let Some(test_binary) = DocTestBinaryMeta::new(test.path()) {
for dir_entry in get_source_walker(config) {
let path = dir_entry.path();
if path.is_file() {
if let Some(p) = path_relative_from(path, &root) {
if is_prefix_match(&test_binary.prefix, &p) && !checked_files.contains(path)
{
checked_files.insert(path.to_path_buf());
let lines = find_panics_in_file(path).unwrap_or_default();
if !result.contains_key(&test_binary.prefix) {
result.insert(test_binary.prefix.clone(), lines);
} else if let Some(current_lines) = result.get_mut(&test_binary.prefix)
{
current_lines.extend_from_slice(&lines);
}
}
}
}
}
} else {
warn!(
"Invalid characters in name of doctest {}",
test.path().display()
);
}
}
result
}
fn find_panics_in_file(file: &Path) -> io::Result<Vec<usize>> {
let f = File::open(file)?;
let reader = BufReader::new(f);
let lines = reader
.lines()
.enumerate()
.filter(|(_, l)| {
l.as_ref()
.map(|x| x.contains("should_panic"))
.unwrap_or(false)
})
.map(|(i, _)| i + 1) // Move from line index to line number
.collect();
Ok(lines)
}
fn create_command(manifest_path: &str, config: &Config, ty: Option<RunType>) -> Command {
let mut test_cmd = Command::new("cargo");
if ty == Some(RunType::Doctests) {
if let Some(toolchain) = env::var("RUSTUP_TOOLCHAIN")
.ok()
.filter(|t| t.starts_with("nightly"))
{
test_cmd.args(&[format!("+{}", toolchain).as_str(), "test"]);
} else {
test_cmd.args(&["+nightly", "test"]);
}
} else {
if let Ok(toolchain) = env::var("RUSTUP_TOOLCHAIN") {
test_cmd.arg(format!("+{}", toolchain));
}
if config.command == Mode::Test {
test_cmd.args(&["test", "--no-run"]);
} else {
test_cmd.arg("build");
}
}
test_cmd.args(&["--message-format", "json", "--manifest-path", manifest_path]);
if let Some(ty) = ty {
match ty {
RunType::Tests => test_cmd.arg("--tests"),
RunType::Doctests => test_cmd.arg("--doc"),
RunType::Benchmarks => test_cmd.arg("--benches"),
RunType::Examples => test_cmd.arg("--examples"),
RunType::AllTargets => test_cmd.arg("--all-targets"),
RunType::Lib => test_cmd.arg("--lib"),
RunType::Bins => test_cmd.arg("--bins"),
};
} else {
for test in &config.test_names {
test_cmd.arg("--test");
test_cmd.arg(test);
}
for test in &config.bin_names {
test_cmd.arg("--bin");
test_cmd.arg(test);
}
for test in &config.example_names {
test_cmd.arg("--example");
test_cmd.arg(test);
}
for test in &config.bench_names {
test_cmd.arg("--bench");
test_cmd.arg(test);
}
}
init_args(&mut test_cmd, config);
setup_environment(&mut test_cmd, config);
test_cmd
}
fn init_args(test_cmd: &mut Command, config: &Config) {
if config.debug {
test_cmd.arg("-vvv");
}
if config.locked {
test_cmd.arg("--locked");
}
if config.frozen {
test_cmd.arg("--frozen");
}
if config.no_fail_fast {
test_cmd.arg("--no-fail-fast");
}
if let Some(profile) = config.profile.as_ref() {
test_cmd.arg("--profile");
test_cmd.arg(profile);
}
if let Some(jobs) = config.jobs {
test_cmd.arg("--jobs");
test_cmd.arg(jobs.to_string());
}
if let Some(features) = config.features.as_ref() {
test_cmd.arg("--features");
test_cmd.arg(features);
}
if config.all_targets {
test_cmd.arg("--all-targets");
}
if config.all_features {
test_cmd.arg("--all-features");
}
if config.no_default_features {
test_cmd.arg("--no-default-features");
}
if config.all {
test_cmd.arg("--workspace");
}
if config.release {
test_cmd.arg("--release");
}
config.packages.iter().for_each(|package| {
test_cmd.arg("--package");
test_cmd.arg(package);
});
config.exclude.iter().for_each(|package| {
test_cmd.arg("--exclude");
test_cmd.arg(package);
});
test_cmd.arg("--color");
test_cmd.arg(config.color.to_string().to_ascii_lowercase());
if let Some(target) = config.target.as_ref() {
test_cmd.args(&["--target", target]);
}
let args = vec![
"--target-dir".to_string(),
format!("{}", config.target_dir().display()),
];
test_cmd.args(args);
if config.offline {
test_cmd.arg("--offline");
}
for feat in &config.unstable_features {
test_cmd.arg(format!("-Z{}", feat));
}
if config.command == Mode::Test && !config.varargs.is_empty() {
let mut args = vec!["--".to_string()];
args.extend_from_slice(&config.varargs);
test_cmd.args(args);
}
}
/// Old doc tests that no longer exist or where the line have changed can persist so delete them to
/// avoid confusing the results
fn clean_doctest_folder<P: AsRef<Path>>(doctest_dir: P) {
if let Ok(rd) = read_dir(doctest_dir.as_ref()) {
rd.flat_map(|e| e.ok())
.filter(|e| {
e.path()
.components()
.next_back()
.map(|e| e.as_os_str().to_string_lossy().contains("rs"))
.unwrap_or(false)
})
.for_each(|e| {
if let Err(err) = remove_dir_all(e.path()) {
warn!("Failed to delete {}: {}", e.path().display(), err);
}
});
}
}
fn handle_llvm_flags(value: &mut String, config: &Config) {
if (config.engine == TraceEngine::Auto || config.engine == TraceEngine::Llvm)
&& supports_llvm_coverage()
{
value.push_str("-Z instrument-coverage ");
} else if config.engine == TraceEngine::Llvm {
error!("unable to utilise llvm coverage, due to compiler support. Falling back to Ptrace");
}
}
pub fn rustdoc_flags(config: &Config) -> String {
const RUSTDOC: &str = "RUSTDOCFLAGS";
let common_opts = " -C link-dead-code -C debuginfo=2 --cfg=tarpaulin ";
let mut value = format!(
"{} --persist-doctests {} -Z unstable-options ",
common_opts,
config.doctest_dir().display()
);
if let Ok(vtemp) = env::var(RUSTDOC) {
if !vtemp.contains("--persist-doctests") {
value.push_str(vtemp.as_ref());
}
}
handle_llvm_flags(&mut value, config);
value
}
fn look_for_rustflags_in_table(value: &Value) -> String {
let table = value.as_table().unwrap();
if let Some(rustflags) = table.get("rustflags") {
let vec_of_flags: Vec<String> = rustflags
.as_array()
.unwrap()
.into_iter()
.filter_map(|x| x.as_str())
.map(|x| x.to_string())
.collect();
vec_of_flags.join(" ")
} else {
String::new()
}
}
fn look_for_rustflags_in_file(path: &Path) -> Option<String> {
if let Ok(contents) = read_to_string(path) {
let value = contents.parse::<Value>().ok()?;
let rustflags_in_file: Vec<String> = value
.as_table()?
.into_iter()
.map(|(s, v)| {
if s.as_str() == "build" {
look_for_rustflags_in_table(v)
} else {
String::new()
}
})
.collect();
Some(rustflags_in_file.join(" "))
} else {
None
}
}
fn look_for_rustflags_in(path: &Path) -> Option<String> {
let mut config_path = path.join("config");
let rustflags = look_for_rustflags_in_file(&config_path);
if rustflags.is_some() {
return rustflags;
}
config_path.pop();
config_path.push("config.toml");
let rustflags = look_for_rustflags_in_file(&config_path);
if rustflags.is_some() {
return rustflags;
}
None
}
fn build_config_path(base: impl AsRef<Path>) -> PathBuf {
let mut config_path = PathBuf::from(base.as_ref());
config_path.push(base);
config_path.push(".cargo");
config_path
}
fn gather_config_rust_flags(config: &Config) -> String {
if let Some(rustflags) = look_for_rustflags_in(&build_config_path(&config.root())) {
return rustflags;
}
if let Ok(cargo_home_config) = env::var("CARGO_HOME") {
if let Some(rustflags) = look_for_rustflags_in(&PathBuf::from(cargo_home_config)) {
return rustflags;
}
}
String::new()
}
pub fn rust_flags(config: &Config) -> String {
const RUSTFLAGS: &str = "RUSTFLAGS";
let mut value = config.rustflags.clone().unwrap_or_default();
value.push_str(" -C link-dead-code -C debuginfo=2 ");
if !config.avoid_cfg_tarpaulin {
value.push_str("--cfg=tarpaulin ");
}
if config.release {
value.push_str("-C debug-assertions=off ");
}
handle_llvm_flags(&mut value, config);
lazy_static! {
static ref DEBUG_INFO: Regex = Regex::new(r#"\-C\s*debuginfo=\d"#).unwrap();
}
if let Ok(vtemp) = env::var(RUSTFLAGS) {
value.push_str(&DEBUG_INFO.replace_all(&vtemp, " "));
} else {
let vtemp = gather_config_rust_flags(config);
value.push_str(&DEBUG_INFO.replace_all(&vtemp, " "));
}
value
}
fn setup_environment(cmd: &mut Command, config: &Config) {
cmd.env("TARPAULIN", "1");
let rustflags = "RUSTFLAGS";
let value = rust_flags(config);
cmd.env(rustflags, value);
// doesn't matter if we don't use it
let rustdoc = "RUSTDOCFLAGS";
let value = rustdoc_flags(config);
trace!("Setting RUSTDOCFLAGS='{}'", value);
cmd.env(rustdoc, value);
}
fn supports_llvm_coverage() -> bool {
if let Some(version) = CARGO_VERSION_INFO.as_ref() {
version.supports_llvm_cov()
} else {
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn llvm_cov_compatible_version() {
let version = CargoVersionInfo {
major: 1,
minor: 50,
channel: Channel::Nightly,
year: 2020,
month: 12,
day: 22,
};
assert!(version.supports_llvm_cov());
}
#[test]
fn llvm_cov_incompatible_version() {
let mut version = CargoVersionInfo {
major: 1,
minor: 48,
channel: Channel::Stable,
year: 2020,
month: 10,
day: 14,
};
assert!(!version.supports_llvm_cov());
version.channel = Channel::Beta;
assert!(!version.supports_llvm_cov());
version.minor = 50;
assert!(!version.supports_llvm_cov());
version.minor = 58;
version.channel = Channel::Stable;
assert!(!version.supports_llvm_cov());
}
}
| {
&self.pkg_authors
} |
tokenizer_whitespace.rs | use tokenizer::{ParseResult, TomlFragment};
pub fn tokenize_whitespace(s : &str) -> Option<ParseResult> {
if s.is_empty() {
return None;
}
let mut index : usize = 0;
for c in s.chars() {
if !c.is_whitespace() {
break;
}
index = index + 1;
}
match index {
0 => None,
_ => {
let fragment = TomlFragment::Whitespace(&s[0..index]);
let remainder = &s[index..];
Some(ParseResult { fragment: fragment, remainder: remainder })
}
}
}
#[test]
fn tokenize_whitespace_empty_string() {
assert_eq!(None, tokenize_whitespace(""));
}
#[test] | assert_eq!(None, tokenize_whitespace("abc"));
}
#[test]
fn tokenize_whitespace_one_space() {
let fragment = TomlFragment::Whitespace(" ");
let remainder = "";
assert_eq!(Some(ParseResult { fragment: fragment, remainder: remainder }), tokenize_whitespace(" "));
}
#[test]
fn tokenize_whitespace_two_spaces() {
let fragment = TomlFragment::Whitespace(" ");
let remainder = "";
assert_eq!(Some(ParseResult { fragment: fragment, remainder: remainder }), tokenize_whitespace(" "));
}
#[test]
fn tokenize_whitespace_one_space_one_letter() {
let fragment = TomlFragment::Whitespace(" ");
let remainder = "w";
assert_eq!(Some(ParseResult { fragment: fragment, remainder: remainder }), tokenize_whitespace(" w"));
}
#[test]
fn tokenize_whitespace_unix_newline() {
let fragment = TomlFragment::Whitespace("\n");
let remainder = "";
assert_eq!(Some(ParseResult { fragment: fragment, remainder: remainder }), tokenize_whitespace("\n"));
}
#[test]
fn tokenize_whitespace_windows_newline() {
let fragment = TomlFragment::Whitespace("\r\n");
let remainder = "";
assert_eq!(Some(ParseResult { fragment: fragment, remainder: remainder }), tokenize_whitespace("\r\n"));
} | fn tokenize_whitespace_no_whitespace_str() { |
3dg.ts | /**
* Copyright (c) 2019 mol* contributors, licensed under MIT, See LICENSE file for more info.
*
* @author Alexander Rose <[email protected]>
*/
import { Model } from '../../mol-model/structure/model';
import { Task } from '../../mol-task';
import { ModelFormat } from './format';
import { _parse_mmCif } from './mmcif/parser';
import { CifCategory, CifField } from '../../mol-io/reader/cif';
import { Column } from '../../mol-data/db';
import { mmCIF_Schema } from '../../mol-io/reader/cif/schema/mmcif';
import { EntityBuilder } from './common/entity';
import { File3DG } from '../../mol-io/reader/3dg/parser';
import { fillSerial } from '../../mol-util/array';
import { MoleculeType } from '../../mol-model/structure/model/types';
function | (table: File3DG['table']) {
const entityIds = new Array<string>(table._rowCount)
const entityBuilder = new EntityBuilder()
const seqIdStarts = table.position.toArray({ array: Uint32Array })
const seqIdEnds = new Uint32Array(table._rowCount)
const stride = seqIdStarts[1] - seqIdStarts[0]
const objectRadius = stride / 3500
for (let i = 0, il = table._rowCount; i < il; ++i) {
const chr = table.chromosome.value(i)
const entityId = entityBuilder.getEntityId(chr, MoleculeType.DNA, chr)
entityIds[i] = entityId
seqIdEnds[i] = seqIdStarts[i] + stride - 1
}
const ihm_sphere_obj_site: CifCategory.SomeFields<mmCIF_Schema['ihm_sphere_obj_site']> = {
id: CifField.ofNumbers(fillSerial(new Uint32Array(table._rowCount))),
entity_id: CifField.ofStrings(entityIds),
seq_id_begin: CifField.ofNumbers(seqIdStarts),
seq_id_end: CifField.ofNumbers(seqIdEnds),
asym_id: CifField.ofColumn(table.chromosome),
Cartn_x: CifField.ofNumbers(Column.mapToArray(table.x, x => x * 10, Float32Array)),
Cartn_y: CifField.ofNumbers(Column.mapToArray(table.y, y => y * 10, Float32Array)),
Cartn_z: CifField.ofNumbers(Column.mapToArray(table.z, z => z * 10, Float32Array)),
object_radius: CifField.ofColumn(Column.ofConst(objectRadius, table._rowCount, Column.Schema.float)),
rmsf: CifField.ofColumn(Column.ofConst(0, table._rowCount, Column.Schema.float)),
model_id: CifField.ofColumn(Column.ofConst(1, table._rowCount, Column.Schema.int)),
}
return {
entity: entityBuilder.getEntityCategory(),
ihm_model_list: CifCategory.ofFields('ihm_model_list', {
model_id: CifField.ofNumbers([1]),
model_name: CifField.ofStrings(['3DG Model']),
}),
ihm_sphere_obj_site: CifCategory.ofFields('ihm_sphere_obj_site', ihm_sphere_obj_site)
}
}
async function mmCifFrom3dg(file3dg: File3DG) {
const categories = getCategories(file3dg.table)
return {
header: '3DG',
categoryNames: Object.keys(categories),
categories
};
}
export function trajectoryFrom3DG(file3dg: File3DG): Task<Model.Trajectory> {
return Task.create('Parse 3DG', async ctx => {
await ctx.update('Converting to mmCIF');
const cif = await mmCifFrom3dg(file3dg);
const format = ModelFormat.mmCIF(cif);
return _parse_mmCif(format, ctx);
})
}
| getCategories |
file_classification.go | "github.com/anchore/syft/syft/source"
"github.com/spf13/viper"
)
type fileClassification struct {
Cataloger catalogerOptions `yaml:"cataloger" json:"cataloger" mapstructure:"cataloger"`
}
func (cfg fileClassification) loadDefaultValues(v *viper.Viper) {
v.SetDefault("file-classification.cataloger.enabled", true)
v.SetDefault("file-classification.cataloger.scope", source.SquashedScope)
}
func (cfg *fileClassification) parseConfigValues() error {
return cfg.Cataloger.parseConfigValues()
} | package config
import ( |
|
error.d.ts | import { AxiosResponse } from "axios";
export declare const enum ArweaveErrorType {
TX_PENDING = "TX_PENDING",
TX_NOT_FOUND = "TX_NOT_FOUND",
TX_FAILED = "TX_FAILED",
TX_INVALID = "TX_INVALID",
BLOCK_NOT_FOUND = "BLOCK_NOT_FOUND"
}
export default class | extends Error {
readonly type: ArweaveErrorType;
readonly response?: AxiosResponse;
constructor(type: ArweaveErrorType, optional?: {
message?: string;
response?: AxiosResponse;
});
getType(): ArweaveErrorType;
}
declare type AxiosResponseLite = {
status: number;
statusText?: string;
data: {
error: string;
} | any;
};
export declare function getError(resp: AxiosResponseLite): any;
export {};
| ArweaveError |
nup_test.go | /*
Copyright 2020 The pdfcpu Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"path/filepath"
"testing"
"github.com/micaelAlastor/pdfcpu/pkg/cli"
"github.com/micaelAlastor/pdfcpu/pkg/pdfcpu"
)
func testNUp(t *testing.T, msg string, inFiles []string, outFile string, selectedPages []string, desc string, n int, isImg bool) |
func TestNUpCommand(t *testing.T) {
for _, tt := range []struct {
msg string
inFiles []string
outFile string
selectedPages []string
desc string
n int
isImg bool
}{
{"TestNUpFromPDF",
[]string{filepath.Join(inDir, "Acroforms2.pdf")},
filepath.Join(outDir, "Acroforms2.pdf"),
nil,
"",
4,
false},
{"TestNUpFromSingleImage",
[]string{filepath.Join(resDir, "pdfchip3.png")},
filepath.Join(outDir, "out.pdf"),
nil,
"f:A3L",
9,
true},
{"TestNUpFromImages",
[]string{
filepath.Join(resDir, "pdfchip3.png"),
filepath.Join(resDir, "demo.png"),
filepath.Join(resDir, "snow.jpg"),
},
filepath.Join(outDir, "out1.pdf"),
nil,
"f:Tabloid, b:off, m:0",
6,
true},
} {
testNUp(t, tt.msg, tt.inFiles, tt.outFile, tt.selectedPages, tt.desc, tt.n, tt.isImg)
}
}
| {
t.Helper()
var (
nup *pdfcpu.NUp
err error
)
if isImg {
if nup, err = pdfcpu.ImageNUpConfig(n, desc); err != nil {
t.Fatalf("%s %s: %v\n", msg, outFile, err)
}
} else {
if nup, err = pdfcpu.PDFNUpConfig(n, desc); err != nil {
t.Fatalf("%s %s: %v\n", msg, outFile, err)
}
}
cmd := cli.NUpCommand(inFiles, outFile, selectedPages, nup, nil)
if _, err := cli.Process(cmd); err != nil {
t.Fatalf("%s %s: %v\n", msg, outFile, err)
}
if err := validateFile(t, outFile, nil); err != nil {
t.Fatalf("%s: %v\n", msg, err)
}
} |
workspaceContext.d.ts | import { Connection } from '@salesforce/core';
import * as vscode from 'vscode';
export declare class | {
protected static instance?: WorkspaceContext;
initialize(context: vscode.ExtensionContext): Promise<void>;
static getInstance(forceNew?: boolean): WorkspaceContext;
getConnection(): Promise<Connection>;
}
| WorkspaceContext |
enr.go | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package enr implements Ethereum Node Records as defined in EIP-778. A node record holds
// arbitrary information about a node on the peer-to-peer network. Node information is
// stored in key/value pairs. To store and retrieve key/values in a record, use the Entry
// interface.
//
// Signature Handling
//
// Records must be signed before transmitting them to another node.
//
// Decoding a record doesn't check its signature. Code working with records from an
// untrusted source must always verify two things: that the record uses an identity scheme
// deemed secure, and that the signature is valid according to the declared scheme.
//
// When creating a record, set the entries you want and use a signing function provided by
// the identity scheme to add the signature. Modifying a record invalidates the signature.
//
// Package enr supports the "secp256k1-keccak" identity scheme.
package enr
import (
"bytes"
"errors"
"fmt"
"io"
"sort"
"github.com/tenderly/optimism/l2geth/rlp"
)
const SizeLimit = 300 // maximum encoded size of a node record in bytes
var (
ErrInvalidSig = errors.New("invalid signature on node record")
errNotSorted = errors.New("record key/value pairs are not sorted by key")
errDuplicateKey = errors.New("record contains duplicate key")
errIncompletePair = errors.New("record contains incomplete k/v pair")
errTooBig = fmt.Errorf("record bigger than %d bytes", SizeLimit)
errEncodeUnsigned = errors.New("can't encode unsigned record")
errNotFound = errors.New("no such key in record")
)
// An IdentityScheme is capable of verifying record signatures and
// deriving node addresses.
type IdentityScheme interface {
Verify(r *Record, sig []byte) error
NodeAddr(r *Record) []byte
}
// SchemeMap is a registry of named identity schemes.
type SchemeMap map[string]IdentityScheme
func (m SchemeMap) Verify(r *Record, sig []byte) error {
s := m[r.IdentityScheme()]
if s == nil {
return ErrInvalidSig
}
return s.Verify(r, sig)
}
func (m SchemeMap) NodeAddr(r *Record) []byte {
s := m[r.IdentityScheme()]
if s == nil {
return nil
}
return s.NodeAddr(r)
}
// Record represents a node record. The zero value is an empty record.
type Record struct {
seq uint64 // sequence number
signature []byte // the signature
raw []byte // RLP encoded record
pairs []pair // sorted list of all key/value pairs
}
// pair is a key/value pair in a record.
type pair struct {
k string
v rlp.RawValue
}
// Seq returns the sequence number.
func (r *Record) Seq() uint64 {
return r.seq
}
// SetSeq updates the record sequence number. This invalidates any signature on the record.
// Calling SetSeq is usually not required because setting any key in a signed record
// increments the sequence number.
func (r *Record) SetSeq(s uint64) {
r.signature = nil
r.raw = nil
r.seq = s
}
// Load retrieves the value of a key/value pair. The given Entry must be a pointer and will
// be set to the value of the entry in the record.
//
// Errors returned by Load are wrapped in KeyError. You can distinguish decoding errors
// from missing keys using the IsNotFound function.
func (r *Record) Load(e Entry) error {
i := sort.Search(len(r.pairs), func(i int) bool { return r.pairs[i].k >= e.ENRKey() })
if i < len(r.pairs) && r.pairs[i].k == e.ENRKey() {
if err := rlp.DecodeBytes(r.pairs[i].v, e); err != nil {
return &KeyError{Key: e.ENRKey(), Err: err}
}
return nil
}
return &KeyError{Key: e.ENRKey(), Err: errNotFound}
}
// Set adds or updates the given entry in the record. It panics if the value can't be
// encoded. If the record is signed, Set increments the sequence number and invalidates
// the sequence number.
func (r *Record) Set(e Entry) {
blob, err := rlp.EncodeToBytes(e)
if err != nil {
panic(fmt.Errorf("enr: can't encode %s: %v", e.ENRKey(), err))
}
r.invalidate()
pairs := make([]pair, len(r.pairs))
copy(pairs, r.pairs)
i := sort.Search(len(pairs), func(i int) bool { return pairs[i].k >= e.ENRKey() })
switch {
case i < len(pairs) && pairs[i].k == e.ENRKey():
// element is present at r.pairs[i]
pairs[i].v = blob
case i < len(r.pairs):
// insert pair before i-th elem
el := pair{e.ENRKey(), blob}
pairs = append(pairs, pair{})
copy(pairs[i+1:], pairs[i:])
pairs[i] = el
default:
// element should be placed at the end of r.pairs
pairs = append(pairs, pair{e.ENRKey(), blob})
}
r.pairs = pairs
}
func (r *Record) invalidate() {
if r.signature != nil {
r.seq++
}
r.signature = nil
r.raw = nil
}
// Signature returns the signature of the record.
func (r *Record) Signature() []byte {
if r.signature == nil {
return nil
}
cpy := make([]byte, len(r.signature))
copy(cpy, r.signature)
return cpy
}
// EncodeRLP implements rlp.Encoder. Encoding fails if
// the record is unsigned.
func (r Record) EncodeRLP(w io.Writer) error {
if r.signature == nil {
return errEncodeUnsigned
}
_, err := w.Write(r.raw)
return err
}
// DecodeRLP implements rlp.Decoder. Decoding doesn't verify the signature.
func (r *Record) DecodeRLP(s *rlp.Stream) error {
dec, raw, err := decodeRecord(s)
if err != nil {
return err
}
*r = dec
r.raw = raw
return nil
}
func decodeRecord(s *rlp.Stream) (dec Record, raw []byte, err error) {
raw, err = s.Raw()
if err != nil {
return dec, raw, err
}
if len(raw) > SizeLimit {
return dec, raw, errTooBig
}
// Decode the RLP container.
s = rlp.NewStream(bytes.NewReader(raw), 0)
if _, err := s.List(); err != nil {
return dec, raw, err
}
if err = s.Decode(&dec.signature); err != nil {
return dec, raw, err
}
if err = s.Decode(&dec.seq); err != nil {
return dec, raw, err
}
// The rest of the record contains sorted k/v pairs.
var prevkey string
for i := 0; ; i++ {
var kv pair
if err := s.Decode(&kv.k); err != nil {
if err == rlp.EOL {
break
}
return dec, raw, err
}
if err := s.Decode(&kv.v); err != nil {
if err == rlp.EOL {
return dec, raw, errIncompletePair
}
return dec, raw, err
}
if i > 0 {
if kv.k == prevkey {
return dec, raw, errDuplicateKey
}
if kv.k < prevkey {
return dec, raw, errNotSorted
}
}
dec.pairs = append(dec.pairs, kv)
prevkey = kv.k
}
return dec, raw, s.ListEnd()
}
// IdentityScheme returns the name of the identity scheme in the record.
func (r *Record) IdentityScheme() string {
var id ID
r.Load(&id)
return string(id)
}
// VerifySignature checks whether the record is signed using the given identity scheme.
func (r *Record) VerifySignature(s IdentityScheme) error {
return s.Verify(r, r.signature)
}
// SetSig sets the record signature. It returns an error if the encoded record is larger
// than the size limit or if the signature is invalid according to the passed scheme.
//
// You can also use SetSig to remove the signature explicitly by passing a nil scheme | func (r *Record) SetSig(s IdentityScheme, sig []byte) error {
switch {
// Prevent storing invalid data.
case s == nil && sig != nil:
panic("enr: invalid call to SetSig with non-nil signature but nil scheme")
case s != nil && sig == nil:
panic("enr: invalid call to SetSig with nil signature but non-nil scheme")
// Verify if we have a scheme.
case s != nil:
if err := s.Verify(r, sig); err != nil {
return err
}
raw, err := r.encode(sig)
if err != nil {
return err
}
r.signature, r.raw = sig, raw
// Reset otherwise.
default:
r.signature, r.raw = nil, nil
}
return nil
}
// AppendElements appends the sequence number and entries to the given slice.
func (r *Record) AppendElements(list []interface{}) []interface{} {
list = append(list, r.seq)
for _, p := range r.pairs {
list = append(list, p.k, p.v)
}
return list
}
func (r *Record) encode(sig []byte) (raw []byte, err error) {
list := make([]interface{}, 1, 2*len(r.pairs)+1)
list[0] = sig
list = r.AppendElements(list)
if raw, err = rlp.EncodeToBytes(list); err != nil {
return nil, err
}
if len(raw) > SizeLimit {
return nil, errTooBig
}
return raw, nil
} | // and signature.
//
// SetSig panics when either the scheme or the signature (but not both) are nil. |
patterns.rs | //! FIXME: write short doc here
use super::*;
pub(super) const PATTERN_FIRST: TokenSet =
expressions::LITERAL_FIRST.union(paths::PATH_FIRST).union(TokenSet::new(&[
T![box],
T![ref],
T![mut],
T!['('],
T!['['],
T![&],
T![_],
T![-],
T![.],
]));
pub(crate) fn pattern(p: &mut Parser) {
pattern_r(p, PAT_RECOVERY_SET);
}
/// Parses a pattern list separated by pipes `|`
pub(super) fn pattern_top(p: &mut Parser) {
pattern_top_r(p, PAT_RECOVERY_SET)
}
pub(crate) fn pattern_single(p: &mut Parser) {
pattern_single_r(p, PAT_RECOVERY_SET);
}
/// Parses a pattern list separated by pipes `|`
/// using the given `recovery_set`
pub(super) fn pattern_top_r(p: &mut Parser, recovery_set: TokenSet) {
p.eat(T![|]);
pattern_r(p, recovery_set);
}
/// Parses a pattern list separated by pipes `|`, with no leading `|`,using the
/// given `recovery_set`
// test or_pattern
// fn main() {
// match () {
// (_ | _) => (),
// &(_ | _) => (),
// (_ | _,) => (),
// [_ | _,] => (),
// }
// }
fn pattern_r(p: &mut Parser, recovery_set: TokenSet) {
let m = p.start();
pattern_single_r(p, recovery_set);
if !p.at(T![|]) {
m.abandon(p);
return;
}
while p.eat(T![|]) {
pattern_single_r(p, recovery_set);
}
m.complete(p, OR_PAT);
}
fn pattern_single_r(p: &mut Parser, recovery_set: TokenSet) {
if let Some(lhs) = atom_pat(p, recovery_set) {
// test range_pat
// fn main() {
// match 92 {
// 0 ... 100 => (),
// 101 ..= 200 => (),
// 200 .. 301=> (),
// }
// }
for &range_op in [T![...], T![..=], T![..]].iter() {
if p.at(range_op) {
let m = lhs.precede(p);
p.bump(range_op);
atom_pat(p, recovery_set);
m.complete(p, RANGE_PAT);
return;
}
}
}
}
const PAT_RECOVERY_SET: TokenSet =
TokenSet::new(&[LET_KW, IF_KW, WHILE_KW, LOOP_KW, MATCH_KW, R_PAREN, COMMA]);
fn atom_pat(p: &mut Parser, recovery_set: TokenSet) -> Option<CompletedMarker> {
let m = match p.nth(0) {
T![box] => box_pat(p),
T![ref] | T![mut] => ident_pat(p, true),
IDENT => match p.nth(1) {
// Checks the token after an IDENT to see if a pattern is a path (Struct { .. }) or macro
// (T![x]).
T!['('] | T!['{'] | T![!] => path_or_macro_pat(p),
T![:] if p.nth_at(1, T![::]) => path_or_macro_pat(p),
_ => ident_pat(p, true),
},
// test type_path_in_pattern
// fn main() { let <_>::Foo = (); }
_ if paths::is_path_start(p) => path_or_macro_pat(p),
_ if is_literal_pat_start(p) => literal_pat(p),
T![.] if p.at(T![..]) => rest_pat(p),
T![_] => wildcard_pat(p),
T![&] => ref_pat(p),
T!['('] => tuple_pat(p),
T!['['] => slice_pat(p),
_ => {
p.err_recover("expected pattern", recovery_set);
return None;
}
};
Some(m)
}
fn is_literal_pat_start(p: &Parser) -> bool {
p.at(T![-]) && (p.nth(1) == INT_NUMBER || p.nth(1) == FLOAT_NUMBER)
|| p.at_ts(expressions::LITERAL_FIRST)
}
// test literal_pattern
// fn main() {
// match () {
// -1 => (),
// 92 => (),
// 'c' => (),
// "hello" => (),
// }
// }
fn literal_pat(p: &mut Parser) -> CompletedMarker {
assert!(is_literal_pat_start(p));
let m = p.start();
if p.at(T![-]) {
p.bump(T![-]);
}
expressions::literal(p);
m.complete(p, LITERAL_PAT)
}
// test path_part
// fn foo() {
// let foo::Bar = ();
// let ::Bar = ();
// let Bar { .. } = ();
// let Bar(..) = ();
// }
fn path_or_macro_pat(p: &mut Parser) -> CompletedMarker {
assert!(paths::is_path_start(p));
let m = p.start();
paths::expr_path(p);
let kind = match p.current() {
T!['('] => {
tuple_pat_fields(p);
TUPLE_STRUCT_PAT
}
T!['{'] => {
record_pat_field_list(p);
RECORD_PAT
}
// test marco_pat
// fn main() {
// let m!(x) = 0;
// }
T![!] => {
items::macro_call_after_excl(p);
return m.complete(p, MACRO_CALL).precede(p).complete(p, MACRO_PAT);
}
_ => PATH_PAT,
};
m.complete(p, kind)
}
// test tuple_pat_fields
// fn foo() {
// let S() = ();
// let S(_) = ();
// let S(_,) = ();
// let S(_, .. , x) = ();
// }
fn tuple_pat_fields(p: &mut Parser) {
assert!(p.at(T!['(']));
p.bump(T!['(']);
pat_list(p, T![')']);
p.expect(T![')']);
}
// test record_field_pat_list
// fn foo() {
// let S {} = ();
// let S { f, ref mut g } = ();
// let S { h: _, ..} = ();
// let S { h: _, } = ();
// }
fn record_pat_field_list(p: &mut Parser) {
assert!(p.at(T!['{']));
let m = p.start();
p.bump(T!['{']);
while !p.at(EOF) && !p.at(T!['}']) {
match p.current() {
// A trailing `..` is *not* treated as a REST_PAT.
T![.] if p.at(T![..]) => p.bump(T![..]),
T!['{'] => error_block(p, "expected ident"),
c => |
}
if !p.at(T!['}']) {
p.expect(T![,]);
}
}
p.expect(T!['}']);
m.complete(p, RECORD_PAT_FIELD_LIST);
}
// test placeholder_pat
// fn main() { let _ = (); }
fn wildcard_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![_]));
let m = p.start();
p.bump(T![_]);
m.complete(p, WILDCARD_PAT)
}
// test dot_dot_pat
// fn main() {
// let .. = ();
// //
// // Tuples
// //
// let (a, ..) = ();
// let (a, ..,) = ();
// let Tuple(a, ..) = ();
// let Tuple(a, ..,) = ();
// let (.., ..) = ();
// let Tuple(.., ..) = ();
// let (.., a, ..) = ();
// let Tuple(.., a, ..) = ();
// //
// // Slices
// //
// let [..] = ();
// let [head, ..] = ();
// let [head, tail @ ..] = ();
// let [head, .., cons] = ();
// let [head, mid @ .., cons] = ();
// let [head, .., .., cons] = ();
// let [head, .., mid, tail @ ..] = ();
// let [head, .., mid, .., cons] = ();
// }
fn rest_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![..]));
let m = p.start();
p.bump(T![..]);
m.complete(p, REST_PAT)
}
// test ref_pat
// fn main() {
// let &a = ();
// let &mut b = ();
// }
fn ref_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![&]));
let m = p.start();
p.bump(T![&]);
p.eat(T![mut]);
pattern_single(p);
m.complete(p, REF_PAT)
}
// test tuple_pat
// fn main() {
// let (a, b, ..) = ();
// let (a,) = ();
// let (..) = ();
// let () = ();
// }
fn tuple_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T!['(']));
let m = p.start();
p.bump(T!['(']);
let mut has_comma = false;
let mut has_pat = false;
let mut has_rest = false;
while !p.at(EOF) && !p.at(T![')']) {
has_pat = true;
if !p.at_ts(PATTERN_FIRST) {
p.error("expected a pattern");
break;
}
has_rest |= p.at(T![..]);
pattern(p);
if !p.at(T![')']) {
has_comma = true;
p.expect(T![,]);
}
}
p.expect(T![')']);
m.complete(p, if !has_comma && !has_rest && has_pat { PAREN_PAT } else { TUPLE_PAT })
}
// test slice_pat
// fn main() {
// let [a, b, ..] = [];
// }
fn slice_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T!['[']));
let m = p.start();
p.bump(T!['[']);
pat_list(p, T![']']);
p.expect(T![']']);
m.complete(p, SLICE_PAT)
}
fn pat_list(p: &mut Parser, ket: SyntaxKind) {
while !p.at(EOF) && !p.at(ket) {
if !p.at_ts(PATTERN_FIRST) {
p.error("expected a pattern");
break;
}
pattern(p);
if !p.at(ket) {
p.expect(T![,]);
}
}
}
// test bind_pat
// fn main() {
// let a = ();
// let mut b = ();
// let ref c = ();
// let ref mut d = ();
// let e @ _ = ();
// let ref mut f @ g @ _ = ();
// }
fn ident_pat(p: &mut Parser, with_at: bool) -> CompletedMarker {
let m = p.start();
p.eat(T![ref]);
p.eat(T![mut]);
name(p);
if with_at && p.eat(T![@]) {
pattern_single(p);
}
m.complete(p, IDENT_PAT)
}
// test box_pat
// fn main() {
// let box i = ();
// let box Outer { box i, j: box Inner(box &x) } = ();
// let box ref mut i = ();
// }
fn box_pat(p: &mut Parser) -> CompletedMarker {
assert!(p.at(T![box]));
let m = p.start();
p.bump(T![box]);
pattern_single(p);
m.complete(p, BOX_PAT)
}
| {
let m = p.start();
match c {
// test record_field_pat
// fn foo() {
// let S { 0: 1 } = ();
// let S { x: 1 } = ();
// }
IDENT | INT_NUMBER if p.nth(1) == T![:] => {
name_ref_or_index(p);
p.bump(T![:]);
pattern(p);
}
T![box] => {
// FIXME: not all box patterns should be allowed
box_pat(p);
}
_ => {
ident_pat(p, false);
}
}
m.complete(p, RECORD_PAT_FIELD);
} |
golden_file_test.go | package apply_test
import (
"flag"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/chanzuckerberg/fogg/apply"
"github.com/chanzuckerberg/fogg/config"
"github.com/chanzuckerberg/fogg/templates"
"github.com/chanzuckerberg/fogg/util"
"github.com/sirupsen/logrus"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
)
var updateGoldenFiles = flag.Bool("update", false, "when set, rewrite the golden files")
func TestIntegration(t *testing.T) {
var testCases = []struct {
fileName string
}{
{"okta_provider_yaml"},
{"github_provider_yaml"},
{"bless_provider_yaml"},
{"snowflake_provider_yaml"},
{"v2_full_yaml"},
{"v2_minimal_valid_yaml"},
{"v2_no_aws_provider_yaml"},
{"github_actions"},
{"circleci"},
{"tfe_provider_yaml"},
}
for _, test := range testCases {
tt := test
t.Run(tt.fileName, func(t *testing.T) {
r := require.New(t)
testdataFs := afero.NewBasePathFs(afero.NewOsFs(), filepath.Join(util.ProjectRoot(), "testdata", tt.fileName))
if *updateGoldenFiles {
// delete all files except fogg.yml
e := afero.Walk(testdataFs, ".", func(path string, info os.FileInfo, err error) error {
if !info.IsDir() && !(path == "fogg.yml") {
return testdataFs.Remove(path)
}
return nil
})
r.NoError(e)
conf, e := config.FindAndReadConfig(testdataFs, "fogg.yml")
r.NoError(e)
fmt.Printf("conf %#v\n", conf)
fmt.Println("READ CONFIG")
w, e := conf.Validate()
r.NoError(e)
r.Len(w, 0)
e = apply.Apply(testdataFs, conf, templates.Templates, true)
r.NoError(e)
} else {
fileName := "fogg.yml"
fs, _, e := util.TestFs()
r.NoError(e)
// copy fogg.yml into the tmp test dir (so that it doesn't show up as a diff)
configContents, e := afero.ReadFile(testdataFs, fileName)
if os.IsNotExist(e) { //If the error is related to the file being non-existent
fileName = "fogg.yml"
configContents, e = afero.ReadFile(testdataFs, fileName)
}
r.NoError(e)
|
conf, e := config.FindAndReadConfig(fs, fileName)
r.NoError(e)
fmt.Printf("conf %#v\n", conf)
w, e := conf.Validate()
r.NoError(e)
r.Len(w, 0)
e = apply.Apply(fs, conf, templates.Templates, true)
r.NoError(e)
r.NoError(afero.Walk(testdataFs, ".", func(path string, info os.FileInfo, err error) error {
logrus.Debug("================================================")
logrus.Debug(path)
if !info.Mode().IsRegular() {
logrus.Debug("dir or link")
} else {
i1, e1 := testdataFs.Stat(path)
r.NotNil(i1)
r.NoError(e1)
i2, e2 := fs.Stat(path)
r.NoError(e2)
r.NotNil(i2)
logrus.Debugf("i1 size: %d ii2 size %d", i1.Size(), i2.Size())
r.Equalf(i1.Size(), i2.Size(), "file size: %s", path)
// This (below) doesn't currently work for files created on a mac then tested on linux. :shrug:
// r.Equalf(i1.Mode(), i2.Mode(), "file mode: %s, %o vs %o", path, i1.Mode(), i2.Mode())
f1, e3 := afero.ReadFile(testdataFs, path)
r.NoError(e3)
f2, e4 := afero.ReadFile(fs, path)
r.NoError(e4)
logrus.Debugf("f1:\n%s\n\n---- ", f1)
logrus.Debugf("f2:\n%s\n\n---- ", f2)
r.Equal(f1, f2, path)
}
return nil
}))
}
})
}
} | configMode, e := testdataFs.Stat(fileName)
r.NoError(e)
r.NoError(afero.WriteFile(fs, fileName, configContents, configMode.Mode())) |
maopao.go | package main
import "fmt"
func main() | {
var arr = [...]int{2,43, 10, 7, 8, 5, 20, 11}
// 冒泡排序
for i := 0;i<len(arr) ;i++ {
for j:= 0;j <len(arr) - 1 ; j++ {
if arr[j] > arr[j+1] {
arr[j],arr[j+1] = arr[j+1],arr[j]
}
}
}
fmt.Println(arr)
//选择排序
arr = [...]int{2,43, 10, 7, 8, 5, 20, 11}
}
|
|
scene.rs | use validation::{Error, Validate};
use {camera, extensions, mesh, scene, skin, Extras, Index, Root, Path};
/// A node in the node hierarchy. When the node contains `skin`, all
/// `mesh.primitives` must contain `JOINTS_0` and `WEIGHTS_0` attributes.
/// A node can have either a `matrix` or any combination of
/// `translation`/`rotation`/`scale` (TRS) properties. TRS properties are converted
/// to matrices and postmultiplied in the `T * R * S` order to compose the
/// transformation matrix; first the scale is applied to the vertices, then the
/// rotation, and then the translation. If none are provided, the transform is the
/// identity. When a node is targeted for animation (referenced by an
/// animation.channel.target), only TRS properties may be present; `matrix` will not
/// be present.
#[derive(Clone, Debug, Deserialize, Serialize, Validate)]
pub struct Node {
/// The index of the camera referenced by this node.
#[serde(skip_serializing_if = "Option::is_none")]
pub camera: Option<Index<camera::Camera>>,
/// The indices of this node's children.
#[serde(skip_serializing_if = "Option::is_none")]
pub children: Option<Vec<Index<scene::Node>>>,
/// Extension specific data.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub extensions: Option<extensions::scene::Node>,
/// Optional application specific data.
#[serde(default)]
#[cfg_attr(feature = "extras", serde(skip_serializing_if = "Option::is_none"))]
pub extras: Extras,
/// 4x4 column-major transformation matrix.
///
/// glTF 2.0 specification:
/// When a node is targeted for animation (referenced by an
/// animation.channel.target), only TRS properties may be present;
/// matrix will not be present.
///
/// TODO: Ensure that .matrix is set to None or otherwise skipped during
/// serialization, if the node is targeted for animation.
///
#[serde(skip_serializing_if = "Option::is_none")]
pub matrix: Option<[f32; 16]>,
/// The index of the mesh in this node.
#[serde(skip_serializing_if = "Option::is_none")]
pub mesh: Option<Index<mesh::Mesh>>,
/// Optional user-defined name for this object.
#[cfg(feature = "names")]
#[cfg_attr(feature = "names", serde(skip_serializing_if = "Option::is_none"))]
pub name: Option<String>,
/// The node's unit quaternion rotation in the order (x, y, z, w), where w is
/// the scalar.
#[serde(skip_serializing_if = "Option::is_none")]
pub rotation: Option<UnitQuaternion>,
/// The node's non-uniform scale.
#[serde(skip_serializing_if = "Option::is_none")]
pub scale: Option<[f32; 3]>,
/// The node's translation.
#[serde(skip_serializing_if = "Option::is_none")]
pub translation: Option<[f32; 3]>,
/// The index of the skin referenced by this node.
#[serde(skip_serializing_if = "Option::is_none")]
pub skin: Option<Index<skin::Skin>>,
/// The weights of the instantiated Morph Target. Number of elements must match
/// the number of Morph Targets of used mesh.
#[serde(skip_serializing_if = "Option::is_none")]
pub weights: Option<Vec<f32>>,
}
/// The root `Node`s of a scene.
#[derive(Clone, Debug, Deserialize, Serialize, Validate)]
pub struct | {
/// Extension specific data.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub extensions: Option<extensions::scene::Scene>,
/// Optional application specific data.
#[serde(default)]
#[cfg_attr(feature = "extras", serde(skip_serializing_if = "Option::is_none"))]
pub extras: Extras,
/// Optional user-defined name for this object.
#[cfg(feature = "names")]
#[cfg_attr(feature = "names", serde(skip_serializing_if = "Option::is_none"))]
pub name: Option<String>,
/// The indices of each root node.
#[serde(skip_serializing_if = "Vec::is_empty")]
pub nodes: Vec<Index<Node>>,
}
/// Unit quaternion rotation in the order (x, y, z, w), where w is the scalar.
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
pub struct UnitQuaternion(pub [f32; 4]);
impl Default for UnitQuaternion {
fn default() -> Self {
UnitQuaternion([0.0, 0.0, 0.0, 1.0])
}
}
impl Validate for UnitQuaternion {
fn validate_completely<P, R>(&self, _: &Root, path: P, report: &mut R)
where P: Fn() -> Path, R: FnMut(&Fn() -> Path, Error)
{
for x in &self.0 {
if *x < -1.0 || *x > 1.0 {
report(&path, Error::Invalid);
// Only report once
break;
}
}
}
}
| Scene |
helpers.ts | /**
* Copyright 2018 Google Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {expect} from 'chai';
import {GrpcClient} from 'google-gax';
import * as through2 from 'through2';
import * as proto from '../../protos/firestore_proto_api';
import {Firestore} from '../../src';
import {ClientPool} from '../../src/pool';
import {GapicClient} from '../../src/types';
import api = proto.google.firestore.v1;
const v1 = require('../../src/v1');
/* tslint:disable:no-any */
const grpc = new GrpcClient({} as any).grpc;
const SSL_CREDENTIALS = (grpc.credentials as any).createInsecure();
/* tslint:enable:no-any */
export const PROJECT_ID = 'test-project';
export const DATABASE_ROOT = `projects/${PROJECT_ID}/databases/(default)`;
export const COLLECTION_ROOT = `${DATABASE_ROOT}/documents/collectionId`;
export const DOCUMENT_NAME = `${COLLECTION_ROOT}/documentId`;
// Allow invalid API usage to test error handling.
// tslint:disable-next-line:no-any
export type InvalidApiUsage = any;
/** A Promise implementation that supports deferred resolution. */
export class Deferred<R> {
promise: Promise<R>;
resolve: (value?: R|Promise<R>) => void = () => {};
reject: (reason?: Error) => void = () => {};
constructor() {
this.promise = new Promise(
(resolve: (value?: R|Promise<R>) => void,
reject: (reason?: Error) => void) => {
this.resolve = resolve;
this.reject = reject;
});
}
}
/**
* Interface that defines the request handlers used by Firestore.
*/
export type ApiOverride = {
beginTransaction?: (request, options, callback) => void;
commit?: (request, options, callback) => void;
rollback?: (request, options, callback) => void;
listCollectionIds?: (request, options, callback) => void;
listDocuments?: (request, options, callback) => void;
batchGetDocuments?: (request) => NodeJS.ReadableStream;
runQuery?: (request) => NodeJS.ReadableStream;
listen?: () => NodeJS.ReadWriteStream;
};
/**
* Creates a new Firestore instance for testing. Request handlers can be
* overridden by providing `apiOverrides`.
*
* @param {ApiOverride} apiOverrides An object with the request handlers to
* override.
* @param {Object} firestoreSettings Firestore Settings to configure the client.
* @return {Promise<Firestore>} A Promise that resolves with the new Firestore
* client.
*/
export function createInstance(
apiOverrides?: ApiOverride, firestoreSettings?: {}): Promise<Firestore> {
const initializationOptions = Object.assign(
{
projectId: PROJECT_ID,
sslCreds: SSL_CREDENTIALS,
keyFilename: __dirname + '/../fake-certificate.json',
},
firestoreSettings);
const firestore = new Firestore();
firestore.settings(initializationOptions);
const clientPool = new ClientPool(/* concurrentRequestLimit= */ 1, () => {
const gapicClient: GapicClient = new v1(initializationOptions);
if (apiOverrides) {
Object.keys(apiOverrides).forEach(override => {
gapicClient._innerApiCalls[override] = apiOverrides[override];
});
}
return gapicClient;
});
// tslint:disable-next-line:no-any
(firestore as any)._initClientPool = () => Promise.resolve(clientPool);
return Promise.resolve(firestore);
}
function write(
document: api.IDocument|null, mask: api.IDocumentMask|null,
transforms: api.DocumentTransform.IFieldTransform[]|null,
precondition: api.IPrecondition|null): api.ICommitRequest {
const writes: api.IWrite[] = [];
if (document) {
const update = Object.assign({}, document);
delete update.updateTime;
delete update.createTime;
writes.push({update});
if (mask) {
writes[0].updateMask = mask;
}
}
if (transforms) {
writes.push(
{transform: {document: DOCUMENT_NAME, fieldTransforms: transforms}});
}
if (precondition) {
writes[0].currentDocument = precondition;
}
return {writes};
}
export function updateMask(...fieldPaths: string[]): api.IDocumentMask {
return fieldPaths.length === 0 ? {} : {fieldPaths};
}
export function set(opts: {
document?: api.IDocument,
transforms?: api.DocumentTransform.IFieldTransform[];
mask?: api.IDocumentMask,
}): api.ICommitRequest {
return write(
opts.document || null, opts.mask || null, opts.transforms || null, null);
}
export function update(opts: {
document?: api.IDocument,
transforms?: api.DocumentTransform.IFieldTransform[];
mask?: api.IDocumentMask,
precondition?: api.IPrecondition
}): api.ICommitRequest {
const precondition = opts.precondition || {exists: true};
const mask = opts.mask || updateMask();
return write(
opts.document || null, mask, opts.transforms || null, precondition);
}
export function create(opts: {
document?: api.IDocument,
transforms?: api.DocumentTransform.IFieldTransform[];
mask?: api.IDocumentMask
}): api.ICommitRequest {
return write(
opts.document || null, /* updateMask */ null, opts.transforms || null, {
exists: false,
});
}
function value(value: string|api.IValue): api.IValue {
if (typeof value === 'string') {
return {
stringValue: value,
};
} else {
return value;
}
}
export function retrieve(id: string): api.IBatchGetDocumentsRequest {
return {documents: [`${DATABASE_ROOT}/documents/collectionId/${id}`]};
}
export function remove(
id: string, precondition?: api.IPrecondition): api.ICommitRequest {
const writes: api.IWrite[] = [
{delete: `${DATABASE_ROOT}/documents/collectionId/${id}`},
];
if (precondition) {
writes[0].currentDocument = precondition;
}
return {writes};
}
export function found(dataOrId: api.IDocument|
string): api.IBatchGetDocumentsResponse {
return {
found: typeof dataOrId === 'string' ? document(dataOrId) : dataOrId,
readTime: {seconds: 5, nanos: 6}
};
}
export function missing(id: string): api.IBatchGetDocumentsResponse {
return {
missing: `${DATABASE_ROOT}/documents/collectionId/${id}`,
readTime: {seconds: 5, nanos: 6}
};
}
export function document(
id: string, field?: string, value?: string|api.IValue,
...fieldOrValues: Array<string|api.IValue>): api.IDocument {
const document: api.IDocument = {
name: `${DATABASE_ROOT}/documents/collectionId/${id}`,
fields: {},
createTime: {seconds: 1, nanos: 2},
updateTime: {seconds: 3, nanos: 4},
};
if (field !== undefined) {
fieldOrValues = [field, value!].concat(fieldOrValues);
for (let i = 0; i < fieldOrValues.length; i += 2) {
const field = fieldOrValues[i] as string;
const value = fieldOrValues[i + 1];
if (typeof value === 'string') {
document.fields![field] = {
stringValue: value,
};
} else {
document.fields![field] = value;
}
}
}
return document;
}
export function serverTimestamp(field: string):
api.DocumentTransform.IFieldTransform {
return {fieldPath: field, setToServerValue: 'REQUEST_TIME'};
}
export function arrayTransform(
field: string, transform: 'appendMissingElements'|'removeAllFromArray',
...values: Array<string|api.IValue>):
api.DocumentTransform.IFieldTransform {
const fieldTransform:
api.DocumentTransform.IFieldTransform = {fieldPath: field};
fieldTransform[transform] = {values: values.map(val => value(val))};
|
export function writeResult(count: number): api.IWriteResponse {
const response: api.IWriteResponse = {
commitTime: {
nanos: 0,
seconds: 1,
},
};
if (count > 0) {
response.writeResults = [];
for (let i = 1; i <= count; ++i) {
response.writeResults.push({
updateTime: {
nanos: i * 2,
seconds: i * 2 + 1,
},
});
}
}
return response;
}
export function requestEquals(actual: object, ...components: object[]): void {
const proto: object = {
database: DATABASE_ROOT,
};
for (const component of components) {
for (const key in component) {
if (component.hasOwnProperty(key)) {
if (proto[key]) {
proto[key] = proto[key].concat(component[key]);
} else {
proto[key] = component[key];
}
}
}
}
expect(actual).to.deep.eq(proto);
}
export function stream<T>(...elements: Array<T|Error>): NodeJS.ReadableStream {
const stream = through2.obj();
setImmediate(() => {
for (const el of elements) {
if (el instanceof Error) {
stream.destroy(el);
return;
}
stream.push(el);
}
stream.push(null);
});
return stream;
} | return fieldTransform;
} |
mod.rs | pub mod handlers;
pub mod middleware;
pub mod requests;
pub mod responses;
pub mod tokens;
use crate::api::requests::brand_requests::{EditBrandRequest, NewBrandRequest};
use actix_web::HttpRequest;
use actix_web::{error, web, FromRequest, HttpResponse};
use handlers::catalog::brand_handlers;
use handlers::{account_handlers, health_handlers};
pub fn config_services(cfg: &mut web::ServiceConfig) |
fn json_error_handler(err: error::JsonPayloadError, _req: &HttpRequest) -> error::Error {
use actix_web::error::JsonPayloadError;
let detail = err.to_string();
let resp = match &err {
JsonPayloadError::ContentType => HttpResponse::UnsupportedMediaType().body(detail),
JsonPayloadError::Deserialize(json_err) if json_err.is_data() => {
HttpResponse::UnprocessableEntity().body(detail)
}
_ => HttpResponse::BadRequest().body(detail),
};
error::InternalError::from_response(err, resp).into()
}
| {
#[rustfmt::skip]
cfg.service(
web::scope("/api/v1")
.service(
web::resource("/authenticate")
.route(web::post().to(account_handlers::authenticate))
)
.service(
web::resource("/health_check")
.route(web::get().to(health_handlers::health_check))
)
.service(
web::scope("/brands")
.service(
web::resource("")
.app_data(web::Json::<NewBrandRequest>::configure(|cfg| {
cfg.error_handler(json_error_handler)
}))
.route(web::get().to(brand_handlers::get_all_brands))
.route(web::post().to(brand_handlers::post_new_brand))
)
.service(
web::resource("/{brand}")
.app_data(web::Json::<EditBrandRequest>::configure(|cfg| {
cfg.error_handler(json_error_handler)
}))
.route(web::get().to(brand_handlers::get_brand))
.route(web::put().to(brand_handlers::edit_brand))
)
)
);
} |
lowering.rs | // ignore-tidy-filelength
//! Lowers the AST to the HIR.
//!
//! Since the AST and HIR are fairly similar, this is mostly a simple procedure,
//! much like a fold. Where lowering involves a bit more work things get more
//! interesting and there are some invariants you should know about. These mostly
//! concern spans and IDs.
//!
//! Spans are assigned to AST nodes during parsing and then are modified during
//! expansion to indicate the origin of a node and the process it went through
//! being expanded. IDs are assigned to AST nodes just before lowering.
//!
//! For the simpler lowering steps, IDs and spans should be preserved. Unlike
//! expansion we do not preserve the process of lowering in the spans, so spans
//! should not be modified here. When creating a new node (as opposed to
//! 'folding' an existing one), then you create a new ID using `next_id()`.
//!
//! You must ensure that IDs are unique. That means that you should only use the
//! ID from an AST node in a single HIR node (you can assume that AST node-IDs
//! are unique). Every new node must have a unique ID. Avoid cloning HIR nodes.
//! If you do, you must then set the new node's ID to a fresh one.
//!
//! Spans are used for error messages and for tools to map semantics back to
//! source code. It is therefore not as important with spans as IDs to be strict
//! about use (you can't break the compiler by screwing up a span). Obviously, a
//! HIR node can only have a single span. But multiple nodes can have the same
//! span and spans don't need to be kept in order, etc. Where code is preserved
//! by lowering, it should have the same span as in the AST. Where HIR nodes are
//! new it is probably best to give a span for the whole AST node being lowered.
//! All nodes should have real spans, don't use dummy spans. Tools are likely to
//! get confused if the spans from leaf AST nodes occur in multiple places
//! in the HIR, especially for multiple identifiers.
mod expr;
mod item;
use crate::dep_graph::DepGraph;
use crate::hir::{self, ParamName};
use crate::hir::HirVec;
use crate::hir::map::{DefKey, DefPathData, Definitions};
use crate::hir::def_id::{DefId, DefIndex, CRATE_DEF_INDEX};
use crate::hir::def::{Namespace, Res, DefKind, PartialRes, PerNS};
use crate::hir::{GenericArg, ConstArg};
use crate::hir::ptr::P;
use crate::lint::builtin::{self, PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES,
ELIDED_LIFETIMES_IN_PATHS};
use crate::middle::cstore::CrateStore;
use crate::session::Session;
use crate::session::config::nightly_options;
use crate::util::common::FN_OUTPUT_NAME;
use crate::util::nodemap::{DefIdMap, NodeMap};
use errors::Applicability;
use rustc_data_structures::fx::FxHashSet;
use rustc_index::vec::IndexVec;
use rustc_data_structures::thin_vec::ThinVec;
use rustc_data_structures::sync::Lrc;
use std::collections::BTreeMap;
use std::mem;
use smallvec::SmallVec;
use syntax::attr;
use syntax::ast;
use syntax::ptr::P as AstP;
use syntax::ast::*;
use syntax::errors;
use syntax_expand::base::SpecialDerives;
use syntax::print::pprust;
use syntax::parse::token::{self, Nonterminal, Token};
use syntax::tokenstream::{TokenStream, TokenTree};
use syntax::sess::ParseSess;
use syntax::source_map::{respan, ExpnData, ExpnKind, DesugaringKind, Spanned};
use syntax::symbol::{kw, sym, Symbol};
use syntax::visit::{self, Visitor};
use syntax_pos::hygiene::ExpnId;
use syntax_pos::Span;
const HIR_ID_COUNTER_LOCKED: u32 = 0xFFFFFFFF;
pub struct LoweringContext<'a> {
crate_root: Option<Symbol>,
/// Used to assign IDs to HIR nodes that do not directly correspond to AST nodes.
sess: &'a Session,
cstore: &'a dyn CrateStore,
resolver: &'a mut dyn Resolver,
/// HACK(Centril): there is a cyclic dependency between the parser and lowering
/// if we don't have this function pointer. To avoid that dependency so that
/// librustc is independent of the parser, we use dynamic dispatch here.
nt_to_tokenstream: NtToTokenstream,
/// The items being lowered are collected here.
items: BTreeMap<hir::HirId, hir::Item>,
trait_items: BTreeMap<hir::TraitItemId, hir::TraitItem>,
impl_items: BTreeMap<hir::ImplItemId, hir::ImplItem>,
bodies: BTreeMap<hir::BodyId, hir::Body>,
exported_macros: Vec<hir::MacroDef>,
non_exported_macro_attrs: Vec<ast::Attribute>,
trait_impls: BTreeMap<DefId, Vec<hir::HirId>>,
modules: BTreeMap<hir::HirId, hir::ModuleItems>,
generator_kind: Option<hir::GeneratorKind>,
/// Used to get the current `fn`'s def span to point to when using `await`
/// outside of an `async fn`.
current_item: Option<Span>,
catch_scopes: Vec<NodeId>,
loop_scopes: Vec<NodeId>,
is_in_loop_condition: bool,
is_in_trait_impl: bool,
is_in_dyn_type: bool,
/// What to do when we encounter either an "anonymous lifetime
/// reference". The term "anonymous" is meant to encompass both
/// `'_` lifetimes as well as fully elided cases where nothing is
/// written at all (e.g., `&T` or `std::cell::Ref<T>`).
anonymous_lifetime_mode: AnonymousLifetimeMode,
/// Used to create lifetime definitions from in-band lifetime usages.
/// e.g., `fn foo(x: &'x u8) -> &'x u8` to `fn foo<'x>(x: &'x u8) -> &'x u8`
/// When a named lifetime is encountered in a function or impl header and
/// has not been defined
/// (i.e., it doesn't appear in the in_scope_lifetimes list), it is added
/// to this list. The results of this list are then added to the list of
/// lifetime definitions in the corresponding impl or function generics.
lifetimes_to_define: Vec<(Span, ParamName)>,
/// `true` if in-band lifetimes are being collected. This is used to
/// indicate whether or not we're in a place where new lifetimes will result
/// in in-band lifetime definitions, such a function or an impl header,
/// including implicit lifetimes from `impl_header_lifetime_elision`.
is_collecting_in_band_lifetimes: bool,
/// Currently in-scope lifetimes defined in impl headers, fn headers, or HRTB.
/// When `is_collectin_in_band_lifetimes` is true, each lifetime is checked
/// against this list to see if it is already in-scope, or if a definition
/// needs to be created for it.
///
/// We always store a `modern()` version of the param-name in this
/// vector.
in_scope_lifetimes: Vec<ParamName>,
current_module: hir::HirId,
type_def_lifetime_params: DefIdMap<usize>,
current_hir_id_owner: Vec<(DefIndex, u32)>,
item_local_id_counters: NodeMap<u32>,
node_id_to_hir_id: IndexVec<NodeId, hir::HirId>,
allow_try_trait: Option<Lrc<[Symbol]>>,
allow_gen_future: Option<Lrc<[Symbol]>>,
}
pub trait Resolver {
/// Obtains resolution for a `NodeId` with a single resolution.
fn get_partial_res(&mut self, id: NodeId) -> Option<PartialRes>;
/// Obtains per-namespace resolutions for `use` statement with the given `NodeId`.
fn get_import_res(&mut self, id: NodeId) -> PerNS<Option<Res<NodeId>>>;
/// Obtains resolution for a label with the given `NodeId`.
fn get_label_res(&mut self, id: NodeId) -> Option<NodeId>;
/// We must keep the set of definitions up to date as we add nodes that weren't in the AST.
/// This should only return `None` during testing.
fn definitions(&mut self) -> &mut Definitions;
/// Given suffix `["b", "c", "d"]`, creates an AST path for `[::crate_root]::b::c::d` and
/// resolves it based on `is_value`.
fn resolve_str_path(
&mut self,
span: Span,
crate_root: Option<Symbol>,
components: &[Symbol],
ns: Namespace,
) -> (ast::Path, Res<NodeId>);
fn has_derives(&self, node_id: NodeId, derives: SpecialDerives) -> bool;
}
type NtToTokenstream = fn(&Nonterminal, &ParseSess, Span) -> TokenStream;
/// Context of `impl Trait` in code, which determines whether it is allowed in an HIR subtree,
/// and if so, what meaning it has.
#[derive(Debug)]
enum ImplTraitContext<'a> {
/// Treat `impl Trait` as shorthand for a new universal generic parameter.
/// Example: `fn foo(x: impl Debug)`, where `impl Debug` is conceptually
/// equivalent to a fresh universal parameter like `fn foo<T: Debug>(x: T)`.
///
/// Newly generated parameters should be inserted into the given `Vec`.
Universal(&'a mut Vec<hir::GenericParam>),
/// Treat `impl Trait` as shorthand for a new opaque type.
/// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually
/// equivalent to a new opaque type like `type T = impl Debug; fn foo() -> T`.
///
/// We optionally store a `DefId` for the parent item here so we can look up necessary
/// information later. It is `None` when no information about the context should be stored
/// (e.g., for consts and statics).
OpaqueTy(Option<DefId> /* fn def-ID */),
/// `impl Trait` is not accepted in this position.
Disallowed(ImplTraitPosition),
}
/// Position in which `impl Trait` is disallowed.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum ImplTraitPosition {
/// Disallowed in `let` / `const` / `static` bindings.
Binding,
/// All other posiitons.
Other,
}
impl<'a> ImplTraitContext<'a> {
#[inline]
fn disallowed() -> Self {
ImplTraitContext::Disallowed(ImplTraitPosition::Other)
}
fn reborrow(&'b mut self) -> ImplTraitContext<'b> {
use self::ImplTraitContext::*;
match self {
Universal(params) => Universal(params),
OpaqueTy(fn_def_id) => OpaqueTy(*fn_def_id),
Disallowed(pos) => Disallowed(*pos),
}
}
}
pub fn lower_crate(
sess: &Session,
cstore: &dyn CrateStore,
dep_graph: &DepGraph,
krate: &Crate,
resolver: &mut dyn Resolver,
nt_to_tokenstream: NtToTokenstream,
) -> hir::Crate {
// We're constructing the HIR here; we don't care what we will
// read, since we haven't even constructed the *input* to
// incr. comp. yet.
dep_graph.assert_ignored();
let _prof_timer = sess.prof.generic_activity("hir_lowering");
LoweringContext {
crate_root: sess.parse_sess.injected_crate_name.try_get().copied(),
sess,
cstore,
resolver,
nt_to_tokenstream,
items: BTreeMap::new(),
trait_items: BTreeMap::new(),
impl_items: BTreeMap::new(),
bodies: BTreeMap::new(),
trait_impls: BTreeMap::new(),
modules: BTreeMap::new(),
exported_macros: Vec::new(),
non_exported_macro_attrs: Vec::new(),
catch_scopes: Vec::new(),
loop_scopes: Vec::new(),
is_in_loop_condition: false,
is_in_trait_impl: false,
is_in_dyn_type: false,
anonymous_lifetime_mode: AnonymousLifetimeMode::PassThrough,
type_def_lifetime_params: Default::default(),
current_module: hir::CRATE_HIR_ID,
current_hir_id_owner: vec![(CRATE_DEF_INDEX, 0)],
item_local_id_counters: Default::default(),
node_id_to_hir_id: IndexVec::new(),
generator_kind: None,
current_item: None,
lifetimes_to_define: Vec::new(),
is_collecting_in_band_lifetimes: false,
in_scope_lifetimes: Vec::new(),
allow_try_trait: Some([sym::try_trait][..].into()),
allow_gen_future: Some([sym::gen_future][..].into()),
}.lower_crate(krate)
}
#[derive(Copy, Clone, PartialEq)]
enum ParamMode {
/// Any path in a type context.
Explicit,
/// Path in a type definition, where the anonymous lifetime `'_` is not allowed.
ExplicitNamed,
/// The `module::Type` in `module::Type::method` in an expression.
Optional,
}
enum ParenthesizedGenericArgs {
Ok,
Warn,
Err,
}
/// What to do when we encounter an **anonymous** lifetime
/// reference. Anonymous lifetime references come in two flavors. You
/// have implicit, or fully elided, references to lifetimes, like the
/// one in `&T` or `Ref<T>`, and you have `'_` lifetimes, like `&'_ T`
/// or `Ref<'_, T>`. These often behave the same, but not always:
///
/// - certain usages of implicit references are deprecated, like
/// `Ref<T>`, and we sometimes just give hard errors in those cases
/// as well.
/// - for object bounds there is a difference: `Box<dyn Foo>` is not
/// the same as `Box<dyn Foo + '_>`.
///
/// We describe the effects of the various modes in terms of three cases:
///
/// - **Modern** -- includes all uses of `'_`, but also the lifetime arg
/// of a `&` (e.g., the missing lifetime in something like `&T`)
/// - **Dyn Bound** -- if you have something like `Box<dyn Foo>`,
/// there is an elided lifetime bound (`Box<dyn Foo + 'X>`). These
/// elided bounds follow special rules. Note that this only covers
/// cases where *nothing* is written; the `'_` in `Box<dyn Foo +
/// '_>` is a case of "modern" elision.
/// - **Deprecated** -- this coverse cases like `Ref<T>`, where the lifetime
/// parameter to ref is completely elided. `Ref<'_, T>` would be the modern,
/// non-deprecated equivalent.
///
/// Currently, the handling of lifetime elision is somewhat spread out
/// between HIR lowering and -- as described below -- the
/// `resolve_lifetime` module. Often we "fallthrough" to that code by generating
/// an "elided" or "underscore" lifetime name. In the future, we probably want to move
/// everything into HIR lowering.
#[derive(Copy, Clone, Debug)]
enum AnonymousLifetimeMode {
/// For **Modern** cases, create a new anonymous region parameter
/// and reference that.
///
/// For **Dyn Bound** cases, pass responsibility to
/// `resolve_lifetime` code.
///
/// For **Deprecated** cases, report an error.
CreateParameter,
/// Give a hard error when either `&` or `'_` is written. Used to
/// rule out things like `where T: Foo<'_>`. Does not imply an
/// error on default object bounds (e.g., `Box<dyn Foo>`).
ReportError,
/// Pass responsibility to `resolve_lifetime` code for all cases.
PassThrough,
}
struct ImplTraitTypeIdVisitor<'a> { ids: &'a mut SmallVec<[NodeId; 1]> }
impl<'a, 'b> Visitor<'a> for ImplTraitTypeIdVisitor<'b> {
fn visit_ty(&mut self, ty: &'a Ty) {
match ty.kind {
| TyKind::Typeof(_)
| TyKind::BareFn(_)
=> return,
TyKind::ImplTrait(id, _) => self.ids.push(id),
_ => {},
}
visit::walk_ty(self, ty);
}
fn visit_path_segment(
&mut self,
path_span: Span,
path_segment: &'v PathSegment,
) {
if let Some(ref p) = path_segment.args {
if let GenericArgs::Parenthesized(_) = **p {
return;
}
}
visit::walk_path_segment(self, path_span, path_segment)
}
}
impl<'a> LoweringContext<'a> {
fn lower_crate(mut self, c: &Crate) -> hir::Crate {
/// Full-crate AST visitor that inserts into a fresh
/// `LoweringContext` any information that may be
/// needed from arbitrary locations in the crate,
/// e.g., the number of lifetime generic parameters
/// declared for every type and trait definition.
struct MiscCollector<'tcx, 'interner> {
lctx: &'tcx mut LoweringContext<'interner>,
hir_id_owner: Option<NodeId>,
}
impl MiscCollector<'_, '_> {
fn allocate_use_tree_hir_id_counters(
&mut self,
tree: &UseTree,
owner: DefIndex,
) {
match tree.kind {
UseTreeKind::Simple(_, id1, id2) => {
for &id in &[id1, id2] {
self.lctx.resolver.definitions().create_def_with_parent(
owner,
id,
DefPathData::Misc,
ExpnId::root(),
tree.prefix.span,
);
self.lctx.allocate_hir_id_counter(id);
}
}
UseTreeKind::Glob => (),
UseTreeKind::Nested(ref trees) => {
for &(ref use_tree, id) in trees {
let hir_id = self.lctx.allocate_hir_id_counter(id);
self.allocate_use_tree_hir_id_counters(use_tree, hir_id.owner);
}
}
}
}
fn with_hir_id_owner<F, T>(&mut self, owner: Option<NodeId>, f: F) -> T
where
F: FnOnce(&mut Self) -> T,
{
let old = mem::replace(&mut self.hir_id_owner, owner);
let r = f(self);
self.hir_id_owner = old;
r
}
}
impl<'tcx, 'interner> Visitor<'tcx> for MiscCollector<'tcx, 'interner> {
fn visit_pat(&mut self, p: &'tcx Pat) {
if let PatKind::Paren(..) | PatKind::Rest = p.kind {
// Doesn't generate a HIR node
} else if let Some(owner) = self.hir_id_owner {
self.lctx.lower_node_id_with_owner(p.id, owner);
}
visit::walk_pat(self, p)
}
fn visit_item(&mut self, item: &'tcx Item) {
let hir_id = self.lctx.allocate_hir_id_counter(item.id);
match item.kind {
ItemKind::Struct(_, ref generics)
| ItemKind::Union(_, ref generics)
| ItemKind::Enum(_, ref generics)
| ItemKind::TyAlias(_, ref generics)
| ItemKind::OpaqueTy(_, ref generics)
| ItemKind::Trait(_, _, ref generics, ..) => {
let def_id = self.lctx.resolver.definitions().local_def_id(item.id);
let count = generics
.params
.iter()
.filter(|param| match param.kind {
ast::GenericParamKind::Lifetime { .. } => true,
_ => false,
})
.count();
self.lctx.type_def_lifetime_params.insert(def_id, count);
}
ItemKind::Use(ref use_tree) => {
self.allocate_use_tree_hir_id_counters(use_tree, hir_id.owner);
}
_ => {}
}
self.with_hir_id_owner(Some(item.id), |this| {
visit::walk_item(this, item);
});
}
fn visit_trait_item(&mut self, item: &'tcx TraitItem) {
self.lctx.allocate_hir_id_counter(item.id);
match item.kind {
TraitItemKind::Method(_, None) => {
// Ignore patterns in trait methods without bodies
self.with_hir_id_owner(None, |this| {
visit::walk_trait_item(this, item)
});
}
_ => self.with_hir_id_owner(Some(item.id), |this| {
visit::walk_trait_item(this, item);
})
}
}
fn visit_impl_item(&mut self, item: &'tcx ImplItem) {
self.lctx.allocate_hir_id_counter(item.id);
self.with_hir_id_owner(Some(item.id), |this| {
visit::walk_impl_item(this, item);
});
}
fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) {
// Ignore patterns in foreign items
self.with_hir_id_owner(None, |this| {
visit::walk_foreign_item(this, i)
});
}
fn visit_ty(&mut self, t: &'tcx Ty) {
match t.kind {
// Mirrors the case in visit::walk_ty
TyKind::BareFn(ref f) => {
walk_list!(
self,
visit_generic_param,
&f.generic_params
);
// Mirrors visit::walk_fn_decl
for parameter in &f.decl.inputs {
// We don't lower the ids of argument patterns
self.with_hir_id_owner(None, |this| {
this.visit_pat(¶meter.pat);
});
self.visit_ty(¶meter.ty)
}
self.visit_fn_ret_ty(&f.decl.output)
}
_ => visit::walk_ty(self, t),
}
}
}
self.lower_node_id(CRATE_NODE_ID);
debug_assert!(self.node_id_to_hir_id[CRATE_NODE_ID] == hir::CRATE_HIR_ID);
visit::walk_crate(&mut MiscCollector { lctx: &mut self, hir_id_owner: None }, c);
visit::walk_crate(&mut item::ItemLowerer { lctx: &mut self }, c);
let module = self.lower_mod(&c.module);
let attrs = self.lower_attrs(&c.attrs);
let body_ids = body_ids(&self.bodies);
self.resolver
.definitions()
.init_node_id_to_hir_id_mapping(self.node_id_to_hir_id);
hir::Crate {
module,
attrs,
span: c.span,
exported_macros: hir::HirVec::from(self.exported_macros),
non_exported_macro_attrs: hir::HirVec::from(self.non_exported_macro_attrs),
items: self.items,
trait_items: self.trait_items,
impl_items: self.impl_items,
bodies: self.bodies,
body_ids,
trait_impls: self.trait_impls,
modules: self.modules,
}
}
fn insert_item(&mut self, item: hir::Item) {
let id = item.hir_id;
// FIXME: Use `debug_asset-rt`.
assert_eq!(id.local_id, hir::ItemLocalId::from_u32(0));
self.items.insert(id, item);
self.modules.get_mut(&self.current_module).unwrap().items.insert(id);
}
fn allocate_hir_id_counter(&mut self, owner: NodeId) -> hir::HirId {
// Set up the counter if needed.
self.item_local_id_counters.entry(owner).or_insert(0);
// Always allocate the first `HirId` for the owner itself.
let lowered = self.lower_node_id_with_owner(owner, owner);
debug_assert_eq!(lowered.local_id.as_u32(), 0);
lowered
}
fn lower_node_id_generic<F>(&mut self, ast_node_id: NodeId, alloc_hir_id: F) -> hir::HirId
where
F: FnOnce(&mut Self) -> hir::HirId,
{
if ast_node_id == DUMMY_NODE_ID {
return hir::DUMMY_HIR_ID;
}
let min_size = ast_node_id.as_usize() + 1;
if min_size > self.node_id_to_hir_id.len() {
self.node_id_to_hir_id.resize(min_size, hir::DUMMY_HIR_ID);
}
let existing_hir_id = self.node_id_to_hir_id[ast_node_id];
if existing_hir_id == hir::DUMMY_HIR_ID {
// Generate a new `HirId`.
let hir_id = alloc_hir_id(self);
self.node_id_to_hir_id[ast_node_id] = hir_id;
hir_id
} else {
existing_hir_id
}
}
fn with_hir_id_owner<F, T>(&mut self, owner: NodeId, f: F) -> T
where
F: FnOnce(&mut Self) -> T,
{
let counter = self.item_local_id_counters
.insert(owner, HIR_ID_COUNTER_LOCKED)
.unwrap_or_else(|| panic!("no `item_local_id_counters` entry for {:?}", owner));
let def_index = self.resolver.definitions().opt_def_index(owner).unwrap();
self.current_hir_id_owner.push((def_index, counter));
let ret = f(self);
let (new_def_index, new_counter) = self.current_hir_id_owner.pop().unwrap();
debug_assert!(def_index == new_def_index);
debug_assert!(new_counter >= counter);
let prev = self.item_local_id_counters
.insert(owner, new_counter)
.unwrap();
debug_assert!(prev == HIR_ID_COUNTER_LOCKED);
ret
}
/// This method allocates a new `HirId` for the given `NodeId` and stores it in
/// the `LoweringContext`'s `NodeId => HirId` map.
/// Take care not to call this method if the resulting `HirId` is then not
/// actually used in the HIR, as that would trigger an assertion in the
/// `HirIdValidator` later on, which makes sure that all `NodeId`s got mapped
/// properly. Calling the method twice with the same `NodeId` is fine though.
fn lower_node_id(&mut self, ast_node_id: NodeId) -> hir::HirId {
self.lower_node_id_generic(ast_node_id, |this| {
let &mut (def_index, ref mut local_id_counter) =
this.current_hir_id_owner.last_mut().unwrap();
let local_id = *local_id_counter;
*local_id_counter += 1;
hir::HirId {
owner: def_index,
local_id: hir::ItemLocalId::from_u32(local_id),
}
})
}
fn lower_node_id_with_owner(&mut self, ast_node_id: NodeId, owner: NodeId) -> hir::HirId {
self.lower_node_id_generic(ast_node_id, |this| {
let local_id_counter = this
.item_local_id_counters
.get_mut(&owner)
.expect("called `lower_node_id_with_owner` before `allocate_hir_id_counter`");
let local_id = *local_id_counter;
// We want to be sure not to modify the counter in the map while it
// is also on the stack. Otherwise we'll get lost updates when writing
// back from the stack to the map.
debug_assert!(local_id != HIR_ID_COUNTER_LOCKED);
*local_id_counter += 1;
let def_index = this
.resolver
.definitions()
.opt_def_index(owner)
.expect("you forgot to call `create_def_with_parent` or are lowering node-IDs \
that do not belong to the current owner");
hir::HirId {
owner: def_index,
local_id: hir::ItemLocalId::from_u32(local_id),
}
})
}
fn next_id(&mut self) -> hir::HirId {
self.lower_node_id(self.sess.next_node_id())
}
fn lower_res(&mut self, res: Res<NodeId>) -> Res {
res.map_id(|id| {
self.lower_node_id_generic(id, |_| {
panic!("expected `NodeId` to be lowered already for res {:#?}", res);
})
})
}
fn expect_full_res(&mut self, id: NodeId) -> Res<NodeId> {
self.resolver.get_partial_res(id).map_or(Res::Err, |pr| {
if pr.unresolved_segments() != 0 {
bug!("path not fully resolved: {:?}", pr);
}
pr.base_res()
})
}
fn expect_full_res_from_use(&mut self, id: NodeId) -> impl Iterator<Item = Res<NodeId>> {
self.resolver.get_import_res(id).present_items()
}
fn diagnostic(&self) -> &errors::Handler {
self.sess.diagnostic()
}
/// Reuses the span but adds information like the kind of the desugaring and features that are
/// allowed inside this span.
fn mark_span_with_reason(
&self,
reason: DesugaringKind,
span: Span,
allow_internal_unstable: Option<Lrc<[Symbol]>>,
) -> Span {
span.fresh_expansion(ExpnData {
allow_internal_unstable,
..ExpnData::default(ExpnKind::Desugaring(reason), span, self.sess.edition())
})
}
fn with_anonymous_lifetime_mode<R>(
&mut self,
anonymous_lifetime_mode: AnonymousLifetimeMode,
op: impl FnOnce(&mut Self) -> R,
) -> R {
debug!(
"with_anonymous_lifetime_mode(anonymous_lifetime_mode={:?})",
anonymous_lifetime_mode,
);
let old_anonymous_lifetime_mode = self.anonymous_lifetime_mode;
self.anonymous_lifetime_mode = anonymous_lifetime_mode;
let result = op(self);
self.anonymous_lifetime_mode = old_anonymous_lifetime_mode;
debug!("with_anonymous_lifetime_mode: restoring anonymous_lifetime_mode={:?}",
old_anonymous_lifetime_mode);
result
}
/// Creates a new `hir::GenericParam` for every new lifetime and
/// type parameter encountered while evaluating `f`. Definitions
/// are created with the parent provided. If no `parent_id` is
/// provided, no definitions will be returned.
///
/// Presuming that in-band lifetimes are enabled, then
/// `self.anonymous_lifetime_mode` will be updated to match the
/// parameter while `f` is running (and restored afterwards).
fn collect_in_band_defs<T, F>(
&mut self,
parent_id: DefId,
anonymous_lifetime_mode: AnonymousLifetimeMode,
f: F,
) -> (Vec<hir::GenericParam>, T)
where
F: FnOnce(&mut LoweringContext<'_>) -> (Vec<hir::GenericParam>, T),
{
assert!(!self.is_collecting_in_band_lifetimes);
assert!(self.lifetimes_to_define.is_empty());
let old_anonymous_lifetime_mode = self.anonymous_lifetime_mode;
self.anonymous_lifetime_mode = anonymous_lifetime_mode;
self.is_collecting_in_band_lifetimes = true;
let (in_band_ty_params, res) = f(self);
self.is_collecting_in_band_lifetimes = false;
self.anonymous_lifetime_mode = old_anonymous_lifetime_mode;
let lifetimes_to_define = self.lifetimes_to_define.split_off(0);
let params = lifetimes_to_define
.into_iter()
.map(|(span, hir_name)| self.lifetime_to_generic_param(
span, hir_name, parent_id.index,
))
.chain(in_band_ty_params.into_iter())
.collect();
(params, res)
}
/// Converts a lifetime into a new generic parameter.
fn lifetime_to_generic_param(
&mut self,
span: Span,
hir_name: ParamName,
parent_index: DefIndex,
) -> hir::GenericParam {
let node_id = self.sess.next_node_id();
// Get the name we'll use to make the def-path. Note
// that collisions are ok here and this shouldn't
// really show up for end-user.
let (str_name, kind) = match hir_name {
ParamName::Plain(ident) => (
ident.name,
hir::LifetimeParamKind::InBand,
),
ParamName::Fresh(_) => (
kw::UnderscoreLifetime,
hir::LifetimeParamKind::Elided,
),
ParamName::Error => (
kw::UnderscoreLifetime,
hir::LifetimeParamKind::Error,
),
};
// Add a definition for the in-band lifetime def.
self.resolver.definitions().create_def_with_parent(
parent_index,
node_id,
DefPathData::LifetimeNs(str_name),
ExpnId::root(),
span,
);
hir::GenericParam {
hir_id: self.lower_node_id(node_id),
name: hir_name,
attrs: hir_vec![],
bounds: hir_vec![],
span,
pure_wrt_drop: false,
kind: hir::GenericParamKind::Lifetime { kind }
}
}
/// When there is a reference to some lifetime `'a`, and in-band
/// lifetimes are enabled, then we want to push that lifetime into
/// the vector of names to define later. In that case, it will get
/// added to the appropriate generics.
fn maybe_collect_in_band_lifetime(&mut self, ident: Ident) {
if !self.is_collecting_in_band_lifetimes {
return;
}
if !self.sess.features_untracked().in_band_lifetimes {
return;
}
if self.in_scope_lifetimes.contains(&ParamName::Plain(ident.modern())) {
return;
}
let hir_name = ParamName::Plain(ident);
if self.lifetimes_to_define.iter()
.any(|(_, lt_name)| lt_name.modern() == hir_name.modern()) {
return;
}
self.lifetimes_to_define.push((ident.span, hir_name));
}
/// When we have either an elided or `'_` lifetime in an impl
/// header, we convert it to an in-band lifetime.
fn collect_fresh_in_band_lifetime(&mut self, span: Span) -> ParamName {
assert!(self.is_collecting_in_band_lifetimes);
let index = self.lifetimes_to_define.len() + self.in_scope_lifetimes.len();
let hir_name = ParamName::Fresh(index);
self.lifetimes_to_define.push((span, hir_name));
hir_name
}
// Evaluates `f` with the lifetimes in `params` in-scope.
// This is used to track which lifetimes have already been defined, and
// which are new in-band lifetimes that need to have a definition created
// for them.
fn with_in_scope_lifetime_defs<T, F>(&mut self, params: &[GenericParam], f: F) -> T
where
F: FnOnce(&mut LoweringContext<'_>) -> T,
{
let old_len = self.in_scope_lifetimes.len();
let lt_def_names = params.iter().filter_map(|param| match param.kind {
GenericParamKind::Lifetime { .. } => Some(ParamName::Plain(param.ident.modern())),
_ => None,
});
self.in_scope_lifetimes.extend(lt_def_names);
let res = f(self);
self.in_scope_lifetimes.truncate(old_len);
res
}
/// Appends in-band lifetime defs and argument-position `impl
/// Trait` defs to the existing set of generics.
///
/// Presuming that in-band lifetimes are enabled, then
/// `self.anonymous_lifetime_mode` will be updated to match the
/// parameter while `f` is running (and restored afterwards).
fn add_in_band_defs<F, T>(
&mut self,
generics: &Generics,
parent_id: DefId,
anonymous_lifetime_mode: AnonymousLifetimeMode,
f: F,
) -> (hir::Generics, T)
where
F: FnOnce(&mut LoweringContext<'_>, &mut Vec<hir::GenericParam>) -> T,
{
let (in_band_defs, (mut lowered_generics, res)) = self.with_in_scope_lifetime_defs(
&generics.params,
|this| {
this.collect_in_band_defs(parent_id, anonymous_lifetime_mode, |this| {
let mut params = Vec::new();
// Note: it is necessary to lower generics *before* calling `f`.
// When lowering `async fn`, there's a final step when lowering
// the return type that assumes that all in-scope lifetimes have
// already been added to either `in_scope_lifetimes` or
// `lifetimes_to_define`. If we swapped the order of these two,
// in-band-lifetimes introduced by generics or where-clauses
// wouldn't have been added yet.
let generics = this.lower_generics(
generics,
ImplTraitContext::Universal(&mut params),
);
let res = f(this, &mut params);
(params, (generics, res))
})
},
);
let mut lowered_params: Vec<_> = lowered_generics
.params
.into_iter()
.chain(in_band_defs)
.collect();
// FIXME(const_generics): the compiler doesn't always cope with
// unsorted generic parameters at the moment, so we make sure
// that they're ordered correctly here for now. (When we chain
// the `in_band_defs`, we might make the order unsorted.)
lowered_params.sort_by_key(|param| {
match param.kind {
hir::GenericParamKind::Lifetime { .. } => ParamKindOrd::Lifetime,
hir::GenericParamKind::Type { .. } => ParamKindOrd::Type,
hir::GenericParamKind::Const { .. } => ParamKindOrd::Const,
}
});
lowered_generics.params = lowered_params.into();
(lowered_generics, res)
}
fn with_dyn_type_scope<T, F>(&mut self, in_scope: bool, f: F) -> T
where
F: FnOnce(&mut LoweringContext<'_>) -> T,
{
let was_in_dyn_type = self.is_in_dyn_type;
self.is_in_dyn_type = in_scope;
let result = f(self);
self.is_in_dyn_type = was_in_dyn_type;
result
}
fn with_new_scopes<T, F>(&mut self, f: F) -> T
where
F: FnOnce(&mut LoweringContext<'_>) -> T,
{
let was_in_loop_condition = self.is_in_loop_condition;
self.is_in_loop_condition = false;
let catch_scopes = mem::take(&mut self.catch_scopes);
let loop_scopes = mem::take(&mut self.loop_scopes);
let ret = f(self);
self.catch_scopes = catch_scopes;
self.loop_scopes = loop_scopes;
self.is_in_loop_condition = was_in_loop_condition;
ret
}
fn def_key(&mut self, id: DefId) -> DefKey {
if id.is_local() {
self.resolver.definitions().def_key(id.index)
} else {
self.cstore.def_key(id)
}
}
fn lower_attrs_extendable(&mut self, attrs: &[Attribute]) -> Vec<Attribute> {
attrs
.iter()
.map(|a| self.lower_attr(a))
.collect()
}
fn lower_attrs(&mut self, attrs: &[Attribute]) -> hir::HirVec<Attribute> {
self.lower_attrs_extendable(attrs).into()
}
fn lower_attr(&mut self, attr: &Attribute) -> Attribute {
// Note that we explicitly do not walk the path. Since we don't really
// lower attributes (we use the AST version) there is nowhere to keep
// the `HirId`s. We don't actually need HIR version of attributes anyway.
Attribute {
item: AttrItem {
path: attr.path.clone(),
tokens: self.lower_token_stream(attr.tokens.clone()),
},
id: attr.id,
style: attr.style,
is_sugared_doc: attr.is_sugared_doc,
span: attr.span,
}
}
fn lower_token_stream(&mut self, tokens: TokenStream) -> TokenStream {
tokens
.into_trees()
.flat_map(|tree| self.lower_token_tree(tree).into_trees())
.collect()
}
fn lower_token_tree(&mut self, tree: TokenTree) -> TokenStream {
match tree {
TokenTree::Token(token) => self.lower_token(token),
TokenTree::Delimited(span, delim, tts) => TokenTree::Delimited(
span,
delim,
self.lower_token_stream(tts),
).into(),
}
}
fn lower_token(&mut self, token: Token) -> TokenStream {
match token.kind {
token::Interpolated(nt) => {
let tts = (self.nt_to_tokenstream)(&nt, &self.sess.parse_sess, token.span);
self.lower_token_stream(tts)
}
_ => TokenTree::Token(token).into(),
}
}
/// Given an associated type constraint like one of these:
///
/// ```
/// T: Iterator<Item: Debug>
/// ^^^^^^^^^^^
/// T: Iterator<Item = Debug>
/// ^^^^^^^^^^^^
/// ```
///
/// returns a `hir::TypeBinding` representing `Item`.
fn lower_assoc_ty_constraint(
&mut self,
constraint: &AssocTyConstraint,
itctx: ImplTraitContext<'_>,
) -> hir::TypeBinding {
debug!("lower_assoc_ty_constraint(constraint={:?}, itctx={:?})", constraint, itctx);
let kind = match constraint.kind {
AssocTyConstraintKind::Equality { ref ty } => hir::TypeBindingKind::Equality {
ty: self.lower_ty(ty, itctx)
},
AssocTyConstraintKind::Bound { ref bounds } => {
// Piggy-back on the `impl Trait` context to figure out the correct behavior.
let (desugar_to_impl_trait, itctx) = match itctx {
// We are in the return position:
//
// fn foo() -> impl Iterator<Item: Debug>
//
// so desugar to
//
// fn foo() -> impl Iterator<Item = impl Debug>
ImplTraitContext::OpaqueTy(_) => (true, itctx),
// We are in the argument position, but within a dyn type:
//
// fn foo(x: dyn Iterator<Item: Debug>)
//
// so desugar to
//
// fn foo(x: dyn Iterator<Item = impl Debug>)
ImplTraitContext::Universal(_) if self.is_in_dyn_type => (true, itctx),
// In `type Foo = dyn Iterator<Item: Debug>` we desugar to
// `type Foo = dyn Iterator<Item = impl Debug>` but we have to override the
// "impl trait context" to permit `impl Debug` in this position (it desugars
// then to an opaque type).
//
// FIXME: this is only needed until `impl Trait` is allowed in type aliases.
ImplTraitContext::Disallowed(_) if self.is_in_dyn_type =>
(true, ImplTraitContext::OpaqueTy(None)),
// We are in the parameter position, but not within a dyn type:
//
// fn foo(x: impl Iterator<Item: Debug>)
//
// so we leave it as is and this gets expanded in astconv to a bound like
// `<T as Iterator>::Item: Debug` where `T` is the type parameter for the
// `impl Iterator`.
_ => (false, itctx),
};
if desugar_to_impl_trait {
// Desugar `AssocTy: Bounds` into `AssocTy = impl Bounds`. We do this by
// constructing the HIR for `impl bounds...` and then lowering that.
let impl_trait_node_id = self.sess.next_node_id();
let parent_def_index = self.current_hir_id_owner.last().unwrap().0;
self.resolver.definitions().create_def_with_parent(
parent_def_index,
impl_trait_node_id,
DefPathData::ImplTrait,
ExpnId::root(),
constraint.span,
);
self.with_dyn_type_scope(false, |this| {
let ty = this.lower_ty(
&Ty {
id: this.sess.next_node_id(),
kind: TyKind::ImplTrait(impl_trait_node_id, bounds.clone()),
span: constraint.span,
},
itctx,
);
hir::TypeBindingKind::Equality {
ty
}
})
} else {
// Desugar `AssocTy: Bounds` into a type binding where the
// later desugars into a trait predicate.
let bounds = self.lower_param_bounds(bounds, itctx);
hir::TypeBindingKind::Constraint {
bounds
}
}
}
};
hir::TypeBinding {
hir_id: self.lower_node_id(constraint.id),
ident: constraint.ident,
kind,
span: constraint.span,
}
}
fn lower_generic_arg(&mut self,
arg: &ast::GenericArg,
itctx: ImplTraitContext<'_>)
-> hir::GenericArg {
match arg {
ast::GenericArg::Lifetime(lt) => GenericArg::Lifetime(self.lower_lifetime(<)),
ast::GenericArg::Type(ty) => GenericArg::Type(self.lower_ty_direct(&ty, itctx)),
ast::GenericArg::Const(ct) => {
GenericArg::Const(ConstArg {
value: self.lower_anon_const(&ct),
span: ct.value.span,
})
}
}
}
fn lower_ty(&mut self, t: &Ty, itctx: ImplTraitContext<'_>) -> P<hir::Ty> {
P(self.lower_ty_direct(t, itctx))
}
fn lower_path_ty(
&mut self,
t: &Ty,
qself: &Option<QSelf>,
path: &Path,
param_mode: ParamMode,
itctx: ImplTraitContext<'_>
) -> hir::Ty {
let id = self.lower_node_id(t.id);
let qpath = self.lower_qpath(t.id, qself, path, param_mode, itctx);
let ty = self.ty_path(id, t.span, qpath);
if let hir::TyKind::TraitObject(..) = ty.kind {
self.maybe_lint_bare_trait(t.span, t.id, qself.is_none() && path.is_global());
}
ty
}
fn lower_ty_direct(&mut self, t: &Ty, mut itctx: ImplTraitContext<'_>) -> hir::Ty {
let kind = match t.kind {
TyKind::Infer => hir::TyKind::Infer,
TyKind::Err => hir::TyKind::Err,
TyKind::Slice(ref ty) => hir::TyKind::Slice(self.lower_ty(ty, itctx)),
TyKind::Ptr(ref mt) => hir::TyKind::Ptr(self.lower_mt(mt, itctx)),
TyKind::Rptr(ref region, ref mt) => {
let span = self.sess.source_map().next_point(t.span.shrink_to_lo());
let lifetime = match *region {
Some(ref lt) => self.lower_lifetime(lt),
None => self.elided_ref_lifetime(span),
};
hir::TyKind::Rptr(lifetime, self.lower_mt(mt, itctx))
}
TyKind::BareFn(ref f) => self.with_in_scope_lifetime_defs(
&f.generic_params,
|this| {
this.with_anonymous_lifetime_mode(
AnonymousLifetimeMode::PassThrough,
|this| {
hir::TyKind::BareFn(P(hir::BareFnTy {
generic_params: this.lower_generic_params(
&f.generic_params,
&NodeMap::default(),
ImplTraitContext::disallowed(),
),
unsafety: this.lower_unsafety(f.unsafety),
abi: f.abi,
decl: this.lower_fn_decl(&f.decl, None, false, None),
param_names: this.lower_fn_params_to_names(&f.decl),
}))
},
)
},
),
TyKind::Never => hir::TyKind::Never,
TyKind::Tup(ref tys) => {
hir::TyKind::Tup(tys.iter().map(|ty| {
self.lower_ty_direct(ty, itctx.reborrow())
}).collect())
}
TyKind::Paren(ref ty) => {
return self.lower_ty_direct(ty, itctx);
}
TyKind::Path(ref qself, ref path) => {
return self.lower_path_ty(t, qself, path, ParamMode::Explicit, itctx);
}
TyKind::ImplicitSelf => {
let res = self.expect_full_res(t.id);
let res = self.lower_res(res);
hir::TyKind::Path(hir::QPath::Resolved(
None,
P(hir::Path {
res,
segments: hir_vec![hir::PathSegment::from_ident(
Ident::with_dummy_span(kw::SelfUpper)
)],
span: t.span,
}),
))
},
TyKind::Array(ref ty, ref length) => {
hir::TyKind::Array(self.lower_ty(ty, itctx), self.lower_anon_const(length))
}
TyKind::Typeof(ref expr) => {
hir::TyKind::Typeof(self.lower_anon_const(expr))
}
TyKind::TraitObject(ref bounds, kind) => {
let mut lifetime_bound = None;
let (bounds, lifetime_bound) = self.with_dyn_type_scope(true, |this| {
let bounds = bounds
.iter()
.filter_map(|bound| match *bound {
GenericBound::Trait(ref ty, TraitBoundModifier::None) => {
Some(this.lower_poly_trait_ref(ty, itctx.reborrow()))
}
GenericBound::Trait(_, TraitBoundModifier::Maybe) => None,
GenericBound::Outlives(ref lifetime) => {
if lifetime_bound.is_none() {
lifetime_bound = Some(this.lower_lifetime(lifetime));
}
None
}
})
.collect();
let lifetime_bound =
lifetime_bound.unwrap_or_else(|| this.elided_dyn_bound(t.span));
(bounds, lifetime_bound)
});
if kind != TraitObjectSyntax::Dyn {
self.maybe_lint_bare_trait(t.span, t.id, false);
}
hir::TyKind::TraitObject(bounds, lifetime_bound)
}
TyKind::ImplTrait(def_node_id, ref bounds) => {
let span = t.span;
match itctx {
ImplTraitContext::OpaqueTy(fn_def_id) => {
self.lower_opaque_impl_trait(
span, fn_def_id, def_node_id,
|this| this.lower_param_bounds(bounds, itctx),
)
}
ImplTraitContext::Universal(in_band_ty_params) => {
// Add a definition for the in-band `Param`.
let def_index = self
.resolver
.definitions()
.opt_def_index(def_node_id)
.unwrap();
let hir_bounds = self.lower_param_bounds(
bounds,
ImplTraitContext::Universal(in_band_ty_params),
);
// Set the name to `impl Bound1 + Bound2`.
let ident = Ident::from_str_and_span(&pprust::ty_to_string(t), span);
in_band_ty_params.push(hir::GenericParam {
hir_id: self.lower_node_id(def_node_id),
name: ParamName::Plain(ident),
pure_wrt_drop: false,
attrs: hir_vec![],
bounds: hir_bounds,
span,
kind: hir::GenericParamKind::Type {
default: None,
synthetic: Some(hir::SyntheticTyParamKind::ImplTrait),
}
});
hir::TyKind::Path(hir::QPath::Resolved(
None,
P(hir::Path {
span,
res: Res::Def(DefKind::TyParam, DefId::local(def_index)),
segments: hir_vec![hir::PathSegment::from_ident(ident)],
}),
))
}
ImplTraitContext::Disallowed(pos) => {
let allowed_in = if self.sess.features_untracked()
.impl_trait_in_bindings {
"bindings or function and inherent method return types"
} else {
"function and inherent method return types"
};
let mut err = struct_span_err!(
self.sess,
t.span,
E0562,
"`impl Trait` not allowed outside of {}",
allowed_in,
);
if pos == ImplTraitPosition::Binding &&
nightly_options::is_nightly_build() {
help!(err,
"add `#![feature(impl_trait_in_bindings)]` to the crate \
attributes to enable");
}
err.emit();
hir::TyKind::Err
}
}
}
TyKind::Mac(_) => bug!("`TyKind::Mac` should have been expanded by now"),
TyKind::CVarArgs => bug!("`TyKind::CVarArgs` should have been handled elsewhere"),
};
hir::Ty {
kind,
span: t.span,
hir_id: self.lower_node_id(t.id),
}
}
fn lower_opaque_impl_trait(
&mut self,
span: Span,
fn_def_id: Option<DefId>,
opaque_ty_node_id: NodeId,
lower_bounds: impl FnOnce(&mut LoweringContext<'_>) -> hir::GenericBounds,
) -> hir::TyKind {
debug!(
"lower_opaque_impl_trait(fn_def_id={:?}, opaque_ty_node_id={:?}, span={:?})",
fn_def_id,
opaque_ty_node_id,
span,
);
// Make sure we know that some funky desugaring has been going on here.
// This is a first: there is code in other places like for loop
// desugaring that explicitly states that we don't want to track that.
// Not tracking it makes lints in rustc and clippy very fragile, as
// frequently opened issues show.
let opaque_ty_span = self.mark_span_with_reason(
DesugaringKind::OpaqueTy,
span,
None,
);
let opaque_ty_def_index = self
.resolver
.definitions()
.opt_def_index(opaque_ty_node_id)
.unwrap();
self.allocate_hir_id_counter(opaque_ty_node_id);
let hir_bounds = self.with_hir_id_owner(opaque_ty_node_id, lower_bounds);
let (lifetimes, lifetime_defs) = self.lifetimes_from_impl_trait_bounds(
opaque_ty_node_id,
opaque_ty_def_index,
&hir_bounds,
);
debug!(
"lower_opaque_impl_trait: lifetimes={:#?}", lifetimes,
);
debug!(
"lower_opaque_impl_trait: lifetime_defs={:#?}", lifetime_defs,
);
self.with_hir_id_owner(opaque_ty_node_id, |lctx| {
let opaque_ty_item = hir::OpaqueTy {
generics: hir::Generics {
params: lifetime_defs,
where_clause: hir::WhereClause {
predicates: hir_vec![],
span,
},
span,
},
bounds: hir_bounds,
impl_trait_fn: fn_def_id,
origin: hir::OpaqueTyOrigin::FnReturn,
};
trace!("lower_opaque_impl_trait: {:#?}", opaque_ty_def_index);
let opaque_ty_id = lctx.generate_opaque_type(
opaque_ty_node_id,
opaque_ty_item,
span,
opaque_ty_span,
);
// `impl Trait` now just becomes `Foo<'a, 'b, ..>`.
hir::TyKind::Def(hir::ItemId { id: opaque_ty_id }, lifetimes)
})
}
/// Registers a new opaque type with the proper `NodeId`s and
/// returns the lowered node-ID for the opaque type.
fn generate_opaque_type(
&mut self,
opaque_ty_node_id: NodeId,
opaque_ty_item: hir::OpaqueTy,
span: Span,
opaque_ty_span: Span,
) -> hir::HirId {
let opaque_ty_item_kind = hir::ItemKind::OpaqueTy(opaque_ty_item);
let opaque_ty_id = self.lower_node_id(opaque_ty_node_id);
// Generate an `type Foo = impl Trait;` declaration.
trace!("registering opaque type with id {:#?}", opaque_ty_id);
let opaque_ty_item = hir::Item {
hir_id: opaque_ty_id,
ident: Ident::invalid(),
attrs: Default::default(),
kind: opaque_ty_item_kind,
vis: respan(span.shrink_to_lo(), hir::VisibilityKind::Inherited),
span: opaque_ty_span,
};
// Insert the item into the global item list. This usually happens
// automatically for all AST items. But this opaque type item
// does not actually exist in the AST.
self.insert_item(opaque_ty_item);
opaque_ty_id
}
fn lifetimes_from_impl_trait_bounds(
&mut self,
opaque_ty_id: NodeId,
parent_index: DefIndex,
bounds: &hir::GenericBounds,
) -> (HirVec<hir::GenericArg>, HirVec<hir::GenericParam>) {
debug!(
"lifetimes_from_impl_trait_bounds(opaque_ty_id={:?}, \
parent_index={:?}, \
bounds={:#?})",
opaque_ty_id, parent_index, bounds,
);
// This visitor walks over `impl Trait` bounds and creates defs for all lifetimes that
// appear in the bounds, excluding lifetimes that are created within the bounds.
// E.g., `'a`, `'b`, but not `'c` in `impl for<'c> SomeTrait<'a, 'b, 'c>`.
struct ImplTraitLifetimeCollector<'r, 'a> {
context: &'r mut LoweringContext<'a>,
parent: DefIndex,
opaque_ty_id: NodeId,
collect_elided_lifetimes: bool,
currently_bound_lifetimes: Vec<hir::LifetimeName>,
already_defined_lifetimes: FxHashSet<hir::LifetimeName>,
output_lifetimes: Vec<hir::GenericArg>,
output_lifetime_params: Vec<hir::GenericParam>,
}
impl<'r, 'a, 'v> hir::intravisit::Visitor<'v> for ImplTraitLifetimeCollector<'r, 'a> {
fn nested_visit_map<'this>(
&'this mut self,
) -> hir::intravisit::NestedVisitorMap<'this, 'v> {
hir::intravisit::NestedVisitorMap::None
}
fn visit_generic_args(&mut self, span: Span, parameters: &'v hir::GenericArgs) {
// Don't collect elided lifetimes used inside of `Fn()` syntax.
if parameters.parenthesized {
let old_collect_elided_lifetimes = self.collect_elided_lifetimes;
self.collect_elided_lifetimes = false;
hir::intravisit::walk_generic_args(self, span, parameters);
self.collect_elided_lifetimes = old_collect_elided_lifetimes;
} else {
hir::intravisit::walk_generic_args(self, span, parameters);
}
}
fn visit_ty(&mut self, t: &'v hir::Ty) {
// Don't collect elided lifetimes used inside of `fn()` syntax.
if let hir::TyKind::BareFn(_) = t.kind {
let old_collect_elided_lifetimes = self.collect_elided_lifetimes;
self.collect_elided_lifetimes = false;
// Record the "stack height" of `for<'a>` lifetime bindings
// to be able to later fully undo their introduction.
let old_len = self.currently_bound_lifetimes.len();
hir::intravisit::walk_ty(self, t);
self.currently_bound_lifetimes.truncate(old_len);
self.collect_elided_lifetimes = old_collect_elided_lifetimes;
} else {
hir::intravisit::walk_ty(self, t)
}
}
fn visit_poly_trait_ref(
&mut self,
trait_ref: &'v hir::PolyTraitRef,
modifier: hir::TraitBoundModifier,
) {
// Record the "stack height" of `for<'a>` lifetime bindings
// to be able to later fully undo their introduction.
let old_len = self.currently_bound_lifetimes.len();
hir::intravisit::walk_poly_trait_ref(self, trait_ref, modifier);
self.currently_bound_lifetimes.truncate(old_len);
}
fn visit_generic_param(&mut self, param: &'v hir::GenericParam) {
// Record the introduction of 'a in `for<'a> ...`.
if let hir::GenericParamKind::Lifetime { .. } = param.kind {
// Introduce lifetimes one at a time so that we can handle
// cases like `fn foo<'d>() -> impl for<'a, 'b: 'a, 'c: 'b + 'd>`.
let lt_name = hir::LifetimeName::Param(param.name);
self.currently_bound_lifetimes.push(lt_name);
}
hir::intravisit::walk_generic_param(self, param);
}
fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) {
let name = match lifetime.name {
hir::LifetimeName::Implicit | hir::LifetimeName::Underscore => {
if self.collect_elided_lifetimes {
// Use `'_` for both implicit and underscore lifetimes in
// `type Foo<'_> = impl SomeTrait<'_>;`.
hir::LifetimeName::Underscore
} else {
return;
}
}
hir::LifetimeName::Param(_) => lifetime.name,
// Refers to some other lifetime that is "in
// scope" within the type.
hir::LifetimeName::ImplicitObjectLifetimeDefault => return,
hir::LifetimeName::Error | hir::LifetimeName::Static => return,
};
if !self.currently_bound_lifetimes.contains(&name)
&& !self.already_defined_lifetimes.contains(&name) {
self.already_defined_lifetimes.insert(name);
self.output_lifetimes.push(hir::GenericArg::Lifetime(hir::Lifetime {
hir_id: self.context.next_id(),
span: lifetime.span,
name,
}));
let def_node_id = self.context.sess.next_node_id();
let hir_id =
self.context.lower_node_id_with_owner(def_node_id, self.opaque_ty_id);
self.context.resolver.definitions().create_def_with_parent(
self.parent,
def_node_id,
DefPathData::LifetimeNs(name.ident().name),
ExpnId::root(),
lifetime.span);
let (name, kind) = match name {
hir::LifetimeName::Underscore => (
hir::ParamName::Plain(Ident::with_dummy_span(kw::UnderscoreLifetime)),
hir::LifetimeParamKind::Elided,
),
hir::LifetimeName::Param(param_name) => (
param_name,
hir::LifetimeParamKind::Explicit,
),
_ => bug!("expected `LifetimeName::Param` or `ParamName::Plain`"),
};
self.output_lifetime_params.push(hir::GenericParam {
hir_id,
name,
span: lifetime.span,
pure_wrt_drop: false,
attrs: hir_vec![],
bounds: hir_vec![],
kind: hir::GenericParamKind::Lifetime { kind }
});
}
}
}
let mut lifetime_collector = ImplTraitLifetimeCollector {
context: self,
parent: parent_index,
opaque_ty_id,
collect_elided_lifetimes: true,
currently_bound_lifetimes: Vec::new(),
already_defined_lifetimes: FxHashSet::default(),
output_lifetimes: Vec::new(),
output_lifetime_params: Vec::new(),
};
for bound in bounds {
hir::intravisit::walk_param_bound(&mut lifetime_collector, &bound);
}
(
lifetime_collector.output_lifetimes.into(),
lifetime_collector.output_lifetime_params.into(),
)
}
fn lower_qpath(
&mut self,
id: NodeId,
qself: &Option<QSelf>,
p: &Path,
param_mode: ParamMode,
mut itctx: ImplTraitContext<'_>,
) -> hir::QPath {
let qself_position = qself.as_ref().map(|q| q.position);
let qself = qself.as_ref().map(|q| self.lower_ty(&q.ty, itctx.reborrow()));
let partial_res = self.resolver
.get_partial_res(id)
.unwrap_or_else(|| PartialRes::new(Res::Err));
let proj_start = p.segments.len() - partial_res.unresolved_segments();
let path = P(hir::Path {
res: self.lower_res(partial_res.base_res()),
segments: p.segments[..proj_start]
.iter()
.enumerate()
.map(|(i, segment)| {
let param_mode = match (qself_position, param_mode) {
(Some(j), ParamMode::Optional) if i < j => {
// This segment is part of the trait path in a
// qualified path - one of `a`, `b` or `Trait`
// in `<X as a::b::Trait>::T::U::method`.
ParamMode::Explicit
}
_ => param_mode,
};
// Figure out if this is a type/trait segment,
// which may need lifetime elision performed.
let parent_def_id = |this: &mut Self, def_id: DefId| DefId {
krate: def_id.krate,
index: this.def_key(def_id).parent.expect("missing parent"),
};
let type_def_id = match partial_res.base_res() {
Res::Def(DefKind::AssocTy, def_id) if i + 2 == proj_start => {
Some(parent_def_id(self, def_id))
}
Res::Def(DefKind::Variant, def_id) if i + 1 == proj_start => {
Some(parent_def_id(self, def_id))
}
Res::Def(DefKind::Struct, def_id)
| Res::Def(DefKind::Union, def_id)
| Res::Def(DefKind::Enum, def_id)
| Res::Def(DefKind::TyAlias, def_id)
| Res::Def(DefKind::Trait, def_id) if i + 1 == proj_start =>
{
Some(def_id)
}
_ => None,
};
let parenthesized_generic_args = match partial_res.base_res() {
// `a::b::Trait(Args)`
Res::Def(DefKind::Trait, _)
if i + 1 == proj_start => ParenthesizedGenericArgs::Ok,
// `a::b::Trait(Args)::TraitItem`
Res::Def(DefKind::Method, _)
| Res::Def(DefKind::AssocConst, _)
| Res::Def(DefKind::AssocTy, _)
if i + 2 == proj_start =>
{
ParenthesizedGenericArgs::Ok
}
// Avoid duplicated errors.
Res::Err => ParenthesizedGenericArgs::Ok,
// An error
Res::Def(DefKind::Struct, _)
| Res::Def(DefKind::Enum, _)
| Res::Def(DefKind::Union, _)
| Res::Def(DefKind::TyAlias, _)
| Res::Def(DefKind::Variant, _) if i + 1 == proj_start =>
{
ParenthesizedGenericArgs::Err
}
// A warning for now, for compatibility reasons.
_ => ParenthesizedGenericArgs::Warn,
};
let num_lifetimes = type_def_id.map_or(0, |def_id| {
if let Some(&n) = self.type_def_lifetime_params.get(&def_id) {
return n;
}
assert!(!def_id.is_local());
let item_generics =
self.cstore.item_generics_cloned_untracked(def_id, self.sess);
let n = item_generics.own_counts().lifetimes;
self.type_def_lifetime_params.insert(def_id, n);
n
});
self.lower_path_segment(
p.span,
segment,
param_mode,
num_lifetimes,
parenthesized_generic_args,
itctx.reborrow(),
None,
)
})
.collect(),
span: p.span,
});
// Simple case, either no projections, or only fully-qualified.
// E.g., `std::mem::size_of` or `<I as Iterator>::Item`.
if partial_res.unresolved_segments() == 0 {
return hir::QPath::Resolved(qself, path);
}
// Create the innermost type that we're projecting from.
let mut ty = if path.segments.is_empty() {
// If the base path is empty that means there exists a
// syntactical `Self`, e.g., `&i32` in `<&i32>::clone`.
qself.expect("missing QSelf for <T>::...")
} else {
// Otherwise, the base path is an implicit `Self` type path,
// e.g., `Vec` in `Vec::new` or `<I as Iterator>::Item` in
// `<I as Iterator>::Item::default`.
let new_id = self.next_id();
P(self.ty_path(new_id, p.span, hir::QPath::Resolved(qself, path)))
};
// Anything after the base path are associated "extensions",
// out of which all but the last one are associated types,
// e.g., for `std::vec::Vec::<T>::IntoIter::Item::clone`:
// * base path is `std::vec::Vec<T>`
// * "extensions" are `IntoIter`, `Item` and `clone`
// * type nodes are:
// 1. `std::vec::Vec<T>` (created above)
// 2. `<std::vec::Vec<T>>::IntoIter`
// 3. `<<std::vec::Vec<T>>::IntoIter>::Item`
// * final path is `<<<std::vec::Vec<T>>::IntoIter>::Item>::clone`
for (i, segment) in p.segments.iter().enumerate().skip(proj_start) {
let segment = P(self.lower_path_segment(
p.span,
segment,
param_mode,
0,
ParenthesizedGenericArgs::Warn,
itctx.reborrow(),
None,
));
let qpath = hir::QPath::TypeRelative(ty, segment);
// It's finished, return the extension of the right node type.
if i == p.segments.len() - 1 {
return qpath;
}
// Wrap the associated extension in another type node.
let new_id = self.next_id();
ty = P(self.ty_path(new_id, p.span, qpath));
}
// We should've returned in the for loop above.
span_bug!(
p.span,
"lower_qpath: no final extension segment in {}..{}",
proj_start,
p.segments.len()
)
}
fn lower_path_extra(
&mut self,
res: Res,
p: &Path,
param_mode: ParamMode,
explicit_owner: Option<NodeId>,
) -> hir::Path {
hir::Path {
res,
segments: p.segments
.iter()
.map(|segment| {
self.lower_path_segment(
p.span,
segment,
param_mode,
0,
ParenthesizedGenericArgs::Err,
ImplTraitContext::disallowed(),
explicit_owner,
)
})
.collect(),
span: p.span,
}
}
fn lower_path(&mut self, id: NodeId, p: &Path, param_mode: ParamMode) -> hir::Path {
let res = self.expect_full_res(id);
let res = self.lower_res(res);
self.lower_path_extra(res, p, param_mode, None)
}
fn lower_path_segment(
&mut self,
path_span: Span,
segment: &PathSegment,
param_mode: ParamMode,
expected_lifetimes: usize,
parenthesized_generic_args: ParenthesizedGenericArgs,
itctx: ImplTraitContext<'_>,
explicit_owner: Option<NodeId>,
) -> hir::PathSegment {
let (mut generic_args, infer_args) = if let Some(ref generic_args) = segment.args {
let msg = "parenthesized type parameters may only be used with a `Fn` trait";
match **generic_args {
GenericArgs::AngleBracketed(ref data) => {
self.lower_angle_bracketed_parameter_data(data, param_mode, itctx)
}
GenericArgs::Parenthesized(ref data) => match parenthesized_generic_args {
ParenthesizedGenericArgs::Ok => self.lower_parenthesized_parameter_data(data),
ParenthesizedGenericArgs::Warn => {
self.sess.buffer_lint(
PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES,
CRATE_NODE_ID,
data.span,
msg.into(),
);
(hir::GenericArgs::none(), true)
}
ParenthesizedGenericArgs::Err => {
let mut err = struct_span_err!(self.sess, data.span, E0214, "{}", msg);
err.span_label(data.span, "only `Fn` traits may use parentheses");
if let Ok(snippet) = self.sess.source_map().span_to_snippet(data.span) {
// Do not suggest going from `Trait()` to `Trait<>`
if data.inputs.len() > 0 {
let split = snippet.find('(').unwrap();
let trait_name = &snippet[0..split];
let args = &snippet[split + 1 .. snippet.len() - 1];
err.span_suggestion(
data.span,
"use angle brackets instead",
format!("{}<{}>", trait_name, args),
Applicability::MaybeIncorrect,
);
}
};
err.emit();
(
self.lower_angle_bracketed_parameter_data(
&data.as_angle_bracketed_args(),
param_mode,
itctx
).0,
false,
)
}
},
}
} else {
self.lower_angle_bracketed_parameter_data(&Default::default(), param_mode, itctx)
};
let has_lifetimes = generic_args.args.iter().any(|arg| match arg {
GenericArg::Lifetime(_) => true,
_ => false,
});
let first_generic_span = generic_args.args.iter().map(|a| a.span())
.chain(generic_args.bindings.iter().map(|b| b.span)).next();
if !generic_args.parenthesized && !has_lifetimes {
generic_args.args =
self.elided_path_lifetimes(path_span, expected_lifetimes)
.into_iter()
.map(|lt| GenericArg::Lifetime(lt))
.chain(generic_args.args.into_iter())
.collect();
if expected_lifetimes > 0 && param_mode == ParamMode::Explicit {
let anon_lt_suggestion = vec!["'_"; expected_lifetimes].join(", ");
let no_non_lt_args = generic_args.args.len() == expected_lifetimes;
let no_bindings = generic_args.bindings.is_empty();
let (incl_angl_brckt, insertion_sp, suggestion) = if no_non_lt_args && no_bindings {
// If there are no (non-implicit) generic args or associated type
// bindings, our suggestion includes the angle brackets.
(true, path_span.shrink_to_hi(), format!("<{}>", anon_lt_suggestion))
} else {
// Otherwise (sorry, this is kind of gross) we need to infer the
// place to splice in the `'_, ` from the generics that do exist.
let first_generic_span = first_generic_span
.expect("already checked that non-lifetime args or bindings exist");
(false, first_generic_span.shrink_to_lo(), format!("{}, ", anon_lt_suggestion))
};
match self.anonymous_lifetime_mode {
// In create-parameter mode we error here because we don't want to support
// deprecated impl elision in new features like impl elision and `async fn`,
// both of which work using the `CreateParameter` mode:
//
// impl Foo for std::cell::Ref<u32> // note lack of '_
// async fn foo(_: std::cell::Ref<u32>) { ... }
AnonymousLifetimeMode::CreateParameter => {
let mut err = struct_span_err!(
self.sess,
path_span,
E0726,
"implicit elided lifetime not allowed here"
);
crate::lint::builtin::add_elided_lifetime_in_path_suggestion(
&self.sess,
&mut err,
expected_lifetimes,
path_span,
incl_angl_brckt,
insertion_sp,
suggestion,
);
err.emit();
}
AnonymousLifetimeMode::PassThrough |
AnonymousLifetimeMode::ReportError => {
self.sess.buffer_lint_with_diagnostic(
ELIDED_LIFETIMES_IN_PATHS,
CRATE_NODE_ID,
path_span,
"hidden lifetime parameters in types are deprecated",
builtin::BuiltinLintDiagnostics::ElidedLifetimesInPaths(
expected_lifetimes,
path_span,
incl_angl_brckt,
insertion_sp,
suggestion,
)
);
}
}
}
}
let res = self.expect_full_res(segment.id);
let id = if let Some(owner) = explicit_owner {
self.lower_node_id_with_owner(segment.id, owner)
} else {
self.lower_node_id(segment.id)
};
debug!(
"lower_path_segment: ident={:?} original-id={:?} new-id={:?}",
segment.ident, segment.id, id,
);
hir::PathSegment::new(
segment.ident,
Some(id),
Some(self.lower_res(res)),
generic_args,
infer_args,
)
}
fn lower_angle_bracketed_parameter_data(
&mut self,
data: &AngleBracketedArgs,
param_mode: ParamMode,
mut itctx: ImplTraitContext<'_>,
) -> (hir::GenericArgs, bool) {
let &AngleBracketedArgs { ref args, ref constraints, .. } = data;
let has_non_lt_args = args.iter().any(|arg| match arg {
ast::GenericArg::Lifetime(_) => false,
ast::GenericArg::Type(_) => true,
ast::GenericArg::Const(_) => true,
});
(
hir::GenericArgs {
args: args.iter().map(|a| self.lower_generic_arg(a, itctx.reborrow())).collect(),
bindings: constraints.iter()
.map(|b| self.lower_assoc_ty_constraint(b, itctx.reborrow()))
.collect(),
parenthesized: false,
},
!has_non_lt_args && param_mode == ParamMode::Optional
)
}
fn lower_parenthesized_parameter_data(
&mut self,
data: &ParenthesizedArgs,
) -> (hir::GenericArgs, bool) {
// Switch to `PassThrough` mode for anonymous lifetimes; this
// means that we permit things like `&Ref<T>`, where `Ref` has
// a hidden lifetime parameter. This is needed for backwards
// compatibility, even in contexts like an impl header where
// we generally don't permit such things (see #51008).
self.with_anonymous_lifetime_mode(
AnonymousLifetimeMode::PassThrough,
|this| {
let &ParenthesizedArgs { ref inputs, ref output, span } = data;
let inputs = inputs
.iter()
.map(|ty| this.lower_ty_direct(ty, ImplTraitContext::disallowed()))
.collect();
let mk_tup = |this: &mut Self, tys, span| {
hir::Ty { kind: hir::TyKind::Tup(tys), hir_id: this.next_id(), span }
};
(
hir::GenericArgs {
args: hir_vec![GenericArg::Type(mk_tup(this, inputs, span))],
bindings: hir_vec![
hir::TypeBinding {
hir_id: this.next_id(),
ident: Ident::with_dummy_span(FN_OUTPUT_NAME),
kind: hir::TypeBindingKind::Equality {
ty: output
.as_ref()
.map(|ty| this.lower_ty(
&ty,
ImplTraitContext::disallowed()
))
.unwrap_or_else(||
P(mk_tup(this, hir::HirVec::new(), span))
),
},
span: output.as_ref().map_or(span, |ty| ty.span),
}
],
parenthesized: true,
},
false,
)
}
)
}
fn lower_local(&mut self, l: &Local) -> (hir::Local, SmallVec<[NodeId; 1]>) {
let mut ids = SmallVec::<[NodeId; 1]>::new();
if self.sess.features_untracked().impl_trait_in_bindings {
if let Some(ref ty) = l.ty {
let mut visitor = ImplTraitTypeIdVisitor { ids: &mut ids };
visitor.visit_ty(ty);
}
}
let parent_def_id = DefId::local(self.current_hir_id_owner.last().unwrap().0);
(hir::Local {
hir_id: self.lower_node_id(l.id),
ty: l.ty
.as_ref()
.map(|t| self.lower_ty(t,
if self.sess.features_untracked().impl_trait_in_bindings {
ImplTraitContext::OpaqueTy(Some(parent_def_id))
} else {
ImplTraitContext::Disallowed(ImplTraitPosition::Binding)
}
)),
pat: self.lower_pat(&l.pat),
init: l.init.as_ref().map(|e| P(self.lower_expr(e))),
span: l.span,
attrs: l.attrs.clone(),
source: hir::LocalSource::Normal,
}, ids)
}
fn lower_mutability(&mut self, m: Mutability) -> hir::Mutability {
match m {
Mutability::Mutable => hir::MutMutable,
Mutability::Immutable => hir::MutImmutable,
}
}
fn lower_fn_params_to_names(&mut self, decl: &FnDecl) -> hir::HirVec<Ident> {
// Skip the `...` (`CVarArgs`) trailing arguments from the AST,
// as they are not explicit in HIR/Ty function signatures.
// (instead, the `c_variadic` flag is set to `true`)
let mut inputs = &decl.inputs[..];
if decl.c_variadic() {
inputs = &inputs[..inputs.len() - 1];
}
inputs
.iter()
.map(|param| match param.pat.kind {
PatKind::Ident(_, ident, _) => ident,
_ => Ident::new(kw::Invalid, param.pat.span),
})
.collect()
}
// Lowers a function declaration.
//
// `decl`: the unlowered (AST) function declaration.
// `fn_def_id`: if `Some`, impl Trait arguments are lowered into generic parameters on the
// given DefId, otherwise impl Trait is disallowed. Must be `Some` if
// `make_ret_async` is also `Some`.
// `impl_trait_return_allow`: determines whether `impl Trait` can be used in return position.
// This guards against trait declarations and implementations where `impl Trait` is
// disallowed.
// `make_ret_async`: if `Some`, converts `-> T` into `-> impl Future<Output = T>` in the
// return type. This is used for `async fn` declarations. The `NodeId` is the ID of the
// return type `impl Trait` item.
fn lower_fn_decl(
&mut self,
decl: &FnDecl,
mut in_band_ty_params: Option<(DefId, &mut Vec<hir::GenericParam>)>,
impl_trait_return_allow: bool,
make_ret_async: Option<NodeId>,
) -> P<hir::FnDecl> {
let lt_mode = if make_ret_async.is_some() {
// In `async fn`, argument-position elided lifetimes
// must be transformed into fresh generic parameters so that
// they can be applied to the opaque `impl Trait` return type.
AnonymousLifetimeMode::CreateParameter
} else {
self.anonymous_lifetime_mode
};
let c_variadic = decl.c_variadic();
// Remember how many lifetimes were already around so that we can
// only look at the lifetime parameters introduced by the arguments.
let inputs = self.with_anonymous_lifetime_mode(lt_mode, |this| {
// Skip the `...` (`CVarArgs`) trailing arguments from the AST,
// as they are not explicit in HIR/Ty function signatures.
// (instead, the `c_variadic` flag is set to `true`)
let mut inputs = &decl.inputs[..];
if c_variadic {
inputs = &inputs[..inputs.len() - 1];
}
inputs
.iter()
.map(|param| {
if let Some((_, ibty)) = &mut in_band_ty_params {
this.lower_ty_direct(¶m.ty, ImplTraitContext::Universal(ibty))
} else {
this.lower_ty_direct(¶m.ty, ImplTraitContext::disallowed())
}
})
.collect::<HirVec<_>>()
});
let output = if let Some(ret_id) = make_ret_async {
self.lower_async_fn_ret_ty(
&decl.output,
in_band_ty_params.expect("`make_ret_async` but no `fn_def_id`").0,
ret_id,
)
} else {
match decl.output {
FunctionRetTy::Ty(ref ty) => match in_band_ty_params {
Some((def_id, _)) if impl_trait_return_allow => {
hir::Return(self.lower_ty(ty, ImplTraitContext::OpaqueTy(Some(def_id))))
}
_ => {
hir::Return(self.lower_ty(ty, ImplTraitContext::disallowed()))
}
},
FunctionRetTy::Default(span) => hir::DefaultReturn(span),
}
};
P(hir::FnDecl {
inputs,
output,
c_variadic,
implicit_self: decl.inputs.get(0).map_or(
hir::ImplicitSelfKind::None,
|arg| {
let is_mutable_pat = match arg.pat.kind {
PatKind::Ident(BindingMode::ByValue(mt), _, _) |
PatKind::Ident(BindingMode::ByRef(mt), _, _) =>
mt == Mutability::Mutable,
_ => false,
};
match arg.ty.kind {
TyKind::ImplicitSelf if is_mutable_pat => hir::ImplicitSelfKind::Mut,
TyKind::ImplicitSelf => hir::ImplicitSelfKind::Imm,
// Given we are only considering `ImplicitSelf` types, we needn't consider
// the case where we have a mutable pattern to a reference as that would
// no longer be an `ImplicitSelf`.
TyKind::Rptr(_, ref mt) if mt.ty.kind.is_implicit_self() &&
mt.mutbl == ast::Mutability::Mutable =>
hir::ImplicitSelfKind::MutRef,
TyKind::Rptr(_, ref mt) if mt.ty.kind.is_implicit_self() =>
hir::ImplicitSelfKind::ImmRef,
_ => hir::ImplicitSelfKind::None,
}
},
),
})
}
// Transforms `-> T` for `async fn` into `-> OpaqueTy { .. }`
// combined with the following definition of `OpaqueTy`:
//
// type OpaqueTy<generics_from_parent_fn> = impl Future<Output = T>;
//
// `inputs`: lowered types of parameters to the function (used to collect lifetimes)
// `output`: unlowered output type (`T` in `-> T`)
// `fn_def_id`: `DefId` of the parent function (used to create child impl trait definition)
// `opaque_ty_node_id`: `NodeId` of the opaque `impl Trait` type that should be created
// `elided_lt_replacement`: replacement for elided lifetimes in the return type
fn lower_async_fn_ret_ty(
&mut self,
output: &FunctionRetTy,
fn_def_id: DefId,
opaque_ty_node_id: NodeId,
) -> hir::FunctionRetTy {
debug!(
"lower_async_fn_ret_ty(\
output={:?}, \
fn_def_id={:?}, \
opaque_ty_node_id={:?})",
output, fn_def_id, opaque_ty_node_id,
);
let span = output.span();
let opaque_ty_span = self.mark_span_with_reason(
DesugaringKind::Async,
span,
None,
);
let opaque_ty_def_index = self
.resolver
.definitions()
.opt_def_index(opaque_ty_node_id)
.unwrap();
self.allocate_hir_id_counter(opaque_ty_node_id);
// When we create the opaque type for this async fn, it is going to have
// to capture all the lifetimes involved in the signature (including in the
// return type). This is done by introducing lifetime parameters for:
//
// - all the explicitly declared lifetimes from the impl and function itself;
// - all the elided lifetimes in the fn arguments;
// - all the elided lifetimes in the return type.
//
// So for example in this snippet:
//
// ```rust
// impl<'a> Foo<'a> {
// async fn bar<'b>(&self, x: &'b Vec<f64>, y: &str) -> &u32 {
// // ^ '0 ^ '1 ^ '2
// // elided lifetimes used below
// }
// }
// ```
//
// we would create an opaque type like:
//
// ```
// type Bar<'a, 'b, '0, '1, '2> = impl Future<Output = &'2 u32>;
// ```
//
// and we would then desugar `bar` to the equivalent of:
//
// ```rust
// impl<'a> Foo<'a> {
// fn bar<'b, '0, '1>(&'0 self, x: &'b Vec<f64>, y: &'1 str) -> Bar<'a, 'b, '0, '1, '_>
// }
// ```
//
// Note that the final parameter to `Bar` is `'_`, not `'2` --
// this is because the elided lifetimes from the return type
// should be figured out using the ordinary elision rules, and
// this desugaring achieves that.
//
// The variable `input_lifetimes_count` tracks the number of
// lifetime parameters to the opaque type *not counting* those
// lifetimes elided in the return type. This includes those
// that are explicitly declared (`in_scope_lifetimes`) and
// those elided lifetimes we found in the arguments (current
// content of `lifetimes_to_define`). Next, we will process
// the return type, which will cause `lifetimes_to_define` to
// grow.
let input_lifetimes_count = self.in_scope_lifetimes.len() + self.lifetimes_to_define.len();
let (opaque_ty_id, lifetime_params) = self.with_hir_id_owner(opaque_ty_node_id, |this| {
// We have to be careful to get elision right here. The
// idea is that we create a lifetime parameter for each
// lifetime in the return type. So, given a return type
// like `async fn foo(..) -> &[&u32]`, we lower to `impl
// Future<Output = &'1 [ &'2 u32 ]>`.
//
// Then, we will create `fn foo(..) -> Foo<'_, '_>`, and
// hence the elision takes place at the fn site.
let future_bound = this.with_anonymous_lifetime_mode(
AnonymousLifetimeMode::CreateParameter,
|this| this.lower_async_fn_output_type_to_future_bound(
output,
fn_def_id,
span,
),
);
debug!("lower_async_fn_ret_ty: future_bound={:#?}", future_bound);
// Calculate all the lifetimes that should be captured
// by the opaque type. This should include all in-scope
// lifetime parameters, including those defined in-band. | // as the output type may introduce new in-band lifetimes.
let lifetime_params: Vec<(Span, ParamName)> =
this.in_scope_lifetimes
.iter().cloned()
.map(|name| (name.ident().span, name))
.chain(this.lifetimes_to_define.iter().cloned())
.collect();
debug!("lower_async_fn_ret_ty: in_scope_lifetimes={:#?}", this.in_scope_lifetimes);
debug!("lower_async_fn_ret_ty: lifetimes_to_define={:#?}", this.lifetimes_to_define);
debug!("lower_async_fn_ret_ty: lifetime_params={:#?}", lifetime_params);
let generic_params =
lifetime_params
.iter().cloned()
.map(|(span, hir_name)| {
this.lifetime_to_generic_param(span, hir_name, opaque_ty_def_index)
})
.collect();
let opaque_ty_item = hir::OpaqueTy {
generics: hir::Generics {
params: generic_params,
where_clause: hir::WhereClause {
predicates: hir_vec![],
span,
},
span,
},
bounds: hir_vec![future_bound],
impl_trait_fn: Some(fn_def_id),
origin: hir::OpaqueTyOrigin::AsyncFn,
};
trace!("exist ty from async fn def index: {:#?}", opaque_ty_def_index);
let opaque_ty_id = this.generate_opaque_type(
opaque_ty_node_id,
opaque_ty_item,
span,
opaque_ty_span,
);
(opaque_ty_id, lifetime_params)
});
// As documented above on the variable
// `input_lifetimes_count`, we need to create the lifetime
// arguments to our opaque type. Continuing with our example,
// we're creating the type arguments for the return type:
//
// ```
// Bar<'a, 'b, '0, '1, '_>
// ```
//
// For the "input" lifetime parameters, we wish to create
// references to the parameters themselves, including the
// "implicit" ones created from parameter types (`'a`, `'b`,
// '`0`, `'1`).
//
// For the "output" lifetime parameters, we just want to
// generate `'_`.
let mut generic_args: Vec<_> =
lifetime_params[..input_lifetimes_count]
.iter()
.map(|&(span, hir_name)| {
// Input lifetime like `'a` or `'1`:
GenericArg::Lifetime(hir::Lifetime {
hir_id: self.next_id(),
span,
name: hir::LifetimeName::Param(hir_name),
})
})
.collect();
generic_args.extend(
lifetime_params[input_lifetimes_count..]
.iter()
.map(|&(span, _)| {
// Output lifetime like `'_`.
GenericArg::Lifetime(hir::Lifetime {
hir_id: self.next_id(),
span,
name: hir::LifetimeName::Implicit,
})
})
);
// Create the `Foo<...>` refernece itself. Note that the `type
// Foo = impl Trait` is, internally, created as a child of the
// async fn, so the *type parameters* are inherited. It's
// only the lifetime parameters that we must supply.
let opaque_ty_ref = hir::TyKind::Def(hir::ItemId { id: opaque_ty_id }, generic_args.into());
hir::FunctionRetTy::Return(P(hir::Ty {
kind: opaque_ty_ref,
span,
hir_id: self.next_id(),
}))
}
/// Transforms `-> T` into `Future<Output = T>`
fn lower_async_fn_output_type_to_future_bound(
&mut self,
output: &FunctionRetTy,
fn_def_id: DefId,
span: Span,
) -> hir::GenericBound {
// Compute the `T` in `Future<Output = T>` from the return type.
let output_ty = match output {
FunctionRetTy::Ty(ty) => {
self.lower_ty(ty, ImplTraitContext::OpaqueTy(Some(fn_def_id)))
}
FunctionRetTy::Default(ret_ty_span) => {
P(hir::Ty {
hir_id: self.next_id(),
kind: hir::TyKind::Tup(hir_vec![]),
span: *ret_ty_span,
})
}
};
// "<Output = T>"
let future_params = P(hir::GenericArgs {
args: hir_vec![],
bindings: hir_vec![hir::TypeBinding {
ident: Ident::with_dummy_span(FN_OUTPUT_NAME),
kind: hir::TypeBindingKind::Equality {
ty: output_ty,
},
hir_id: self.next_id(),
span,
}],
parenthesized: false,
});
// ::std::future::Future<future_params>
let future_path =
P(self.std_path(span, &[sym::future, sym::Future], Some(future_params), false));
hir::GenericBound::Trait(
hir::PolyTraitRef {
trait_ref: hir::TraitRef {
path: future_path,
hir_ref_id: self.next_id(),
},
bound_generic_params: hir_vec![],
span,
},
hir::TraitBoundModifier::None,
)
}
fn lower_param_bound(
&mut self,
tpb: &GenericBound,
itctx: ImplTraitContext<'_>,
) -> hir::GenericBound {
match *tpb {
GenericBound::Trait(ref ty, modifier) => {
hir::GenericBound::Trait(
self.lower_poly_trait_ref(ty, itctx),
self.lower_trait_bound_modifier(modifier),
)
}
GenericBound::Outlives(ref lifetime) => {
hir::GenericBound::Outlives(self.lower_lifetime(lifetime))
}
}
}
fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime {
let span = l.ident.span;
match l.ident {
ident if ident.name == kw::StaticLifetime =>
self.new_named_lifetime(l.id, span, hir::LifetimeName::Static),
ident if ident.name == kw::UnderscoreLifetime =>
match self.anonymous_lifetime_mode {
AnonymousLifetimeMode::CreateParameter => {
let fresh_name = self.collect_fresh_in_band_lifetime(span);
self.new_named_lifetime(l.id, span, hir::LifetimeName::Param(fresh_name))
}
AnonymousLifetimeMode::PassThrough => {
self.new_named_lifetime(l.id, span, hir::LifetimeName::Underscore)
}
AnonymousLifetimeMode::ReportError => self.new_error_lifetime(Some(l.id), span),
},
ident => {
self.maybe_collect_in_band_lifetime(ident);
let param_name = ParamName::Plain(ident);
self.new_named_lifetime(l.id, span, hir::LifetimeName::Param(param_name))
}
}
}
fn new_named_lifetime(
&mut self,
id: NodeId,
span: Span,
name: hir::LifetimeName,
) -> hir::Lifetime {
hir::Lifetime {
hir_id: self.lower_node_id(id),
span,
name: name,
}
}
fn lower_generic_params(
&mut self,
params: &[GenericParam],
add_bounds: &NodeMap<Vec<GenericBound>>,
mut itctx: ImplTraitContext<'_>,
) -> hir::HirVec<hir::GenericParam> {
params.iter().map(|param| {
self.lower_generic_param(param, add_bounds, itctx.reborrow())
}).collect()
}
fn lower_generic_param(&mut self,
param: &GenericParam,
add_bounds: &NodeMap<Vec<GenericBound>>,
mut itctx: ImplTraitContext<'_>)
-> hir::GenericParam {
let mut bounds = self.with_anonymous_lifetime_mode(
AnonymousLifetimeMode::ReportError,
|this| this.lower_param_bounds(¶m.bounds, itctx.reborrow()),
);
let (name, kind) = match param.kind {
GenericParamKind::Lifetime => {
let was_collecting_in_band = self.is_collecting_in_band_lifetimes;
self.is_collecting_in_band_lifetimes = false;
let lt = self.with_anonymous_lifetime_mode(
AnonymousLifetimeMode::ReportError,
|this| this.lower_lifetime(&Lifetime { id: param.id, ident: param.ident }),
);
let param_name = match lt.name {
hir::LifetimeName::Param(param_name) => param_name,
hir::LifetimeName::Implicit
| hir::LifetimeName::Underscore
| hir::LifetimeName::Static => hir::ParamName::Plain(lt.name.ident()),
hir::LifetimeName::ImplicitObjectLifetimeDefault => {
span_bug!(
param.ident.span,
"object-lifetime-default should not occur here",
);
}
hir::LifetimeName::Error => ParamName::Error,
};
let kind = hir::GenericParamKind::Lifetime {
kind: hir::LifetimeParamKind::Explicit
};
self.is_collecting_in_band_lifetimes = was_collecting_in_band;
(param_name, kind)
}
GenericParamKind::Type { ref default, .. } => {
let add_bounds = add_bounds.get(¶m.id).map_or(&[][..], |x| &x);
if !add_bounds.is_empty() {
let params = self.lower_param_bounds(add_bounds, itctx.reborrow()).into_iter();
bounds = bounds.into_iter()
.chain(params)
.collect();
}
let kind = hir::GenericParamKind::Type {
default: default.as_ref().map(|x| {
self.lower_ty(x, ImplTraitContext::OpaqueTy(None))
}),
synthetic: param.attrs.iter()
.filter(|attr| attr.check_name(sym::rustc_synthetic))
.map(|_| hir::SyntheticTyParamKind::ImplTrait)
.next(),
};
(hir::ParamName::Plain(param.ident), kind)
}
GenericParamKind::Const { ref ty } => {
(hir::ParamName::Plain(param.ident), hir::GenericParamKind::Const {
ty: self.lower_ty(&ty, ImplTraitContext::disallowed()),
})
}
};
hir::GenericParam {
hir_id: self.lower_node_id(param.id),
name,
span: param.ident.span,
pure_wrt_drop: attr::contains_name(¶m.attrs, sym::may_dangle),
attrs: self.lower_attrs(¶m.attrs),
bounds,
kind,
}
}
fn lower_trait_ref(&mut self, p: &TraitRef, itctx: ImplTraitContext<'_>) -> hir::TraitRef {
let path = match self.lower_qpath(p.ref_id, &None, &p.path, ParamMode::Explicit, itctx) {
hir::QPath::Resolved(None, path) => path,
qpath => bug!("lower_trait_ref: unexpected QPath `{:?}`", qpath),
};
hir::TraitRef {
path,
hir_ref_id: self.lower_node_id(p.ref_id),
}
}
fn lower_poly_trait_ref(
&mut self,
p: &PolyTraitRef,
mut itctx: ImplTraitContext<'_>,
) -> hir::PolyTraitRef {
let bound_generic_params = self.lower_generic_params(
&p.bound_generic_params,
&NodeMap::default(),
itctx.reborrow(),
);
let trait_ref = self.with_in_scope_lifetime_defs(
&p.bound_generic_params,
|this| this.lower_trait_ref(&p.trait_ref, itctx),
);
hir::PolyTraitRef {
bound_generic_params,
trait_ref,
span: p.span,
}
}
fn lower_mt(&mut self, mt: &MutTy, itctx: ImplTraitContext<'_>) -> hir::MutTy {
hir::MutTy {
ty: self.lower_ty(&mt.ty, itctx),
mutbl: self.lower_mutability(mt.mutbl),
}
}
fn lower_param_bounds(&mut self, bounds: &[GenericBound], mut itctx: ImplTraitContext<'_>)
-> hir::GenericBounds {
bounds.iter().map(|bound| self.lower_param_bound(bound, itctx.reborrow())).collect()
}
fn lower_block(&mut self, b: &Block, targeted_by_break: bool) -> P<hir::Block> {
let mut stmts = vec![];
let mut expr = None;
for (index, stmt) in b.stmts.iter().enumerate() {
if index == b.stmts.len() - 1 {
if let StmtKind::Expr(ref e) = stmt.kind {
expr = Some(P(self.lower_expr(e)));
} else {
stmts.extend(self.lower_stmt(stmt));
}
} else {
stmts.extend(self.lower_stmt(stmt));
}
}
P(hir::Block {
hir_id: self.lower_node_id(b.id),
stmts: stmts.into(),
expr,
rules: self.lower_block_check_mode(&b.rules),
span: b.span,
targeted_by_break,
})
}
/// Lowers a block directly to an expression, presuming that it
/// has no attributes and is not targeted by a `break`.
fn lower_block_expr(&mut self, b: &Block) -> hir::Expr {
let block = self.lower_block(b, false);
self.expr_block(block, ThinVec::new())
}
fn lower_pat(&mut self, p: &Pat) -> P<hir::Pat> {
let node = match p.kind {
PatKind::Wild => hir::PatKind::Wild,
PatKind::Ident(ref binding_mode, ident, ref sub) => {
let lower_sub = |this: &mut Self| sub.as_ref().map(|x| this.lower_pat(x));
self.lower_pat_ident(p, binding_mode, ident, lower_sub)
}
PatKind::Lit(ref e) => hir::PatKind::Lit(P(self.lower_expr(e))),
PatKind::TupleStruct(ref path, ref pats) => {
let qpath = self.lower_qpath(
p.id,
&None,
path,
ParamMode::Optional,
ImplTraitContext::disallowed(),
);
let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple struct");
hir::PatKind::TupleStruct(qpath, pats, ddpos)
}
PatKind::Or(ref pats) => {
hir::PatKind::Or(pats.iter().map(|x| self.lower_pat(x)).collect())
}
PatKind::Path(ref qself, ref path) => {
let qpath = self.lower_qpath(
p.id,
qself,
path,
ParamMode::Optional,
ImplTraitContext::disallowed(),
);
hir::PatKind::Path(qpath)
}
PatKind::Struct(ref path, ref fields, etc) => {
let qpath = self.lower_qpath(
p.id,
&None,
path,
ParamMode::Optional,
ImplTraitContext::disallowed(),
);
let fs = fields
.iter()
.map(|f| hir::FieldPat {
hir_id: self.next_id(),
ident: f.ident,
pat: self.lower_pat(&f.pat),
is_shorthand: f.is_shorthand,
span: f.span,
})
.collect();
hir::PatKind::Struct(qpath, fs, etc)
}
PatKind::Tuple(ref pats) => {
let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple");
hir::PatKind::Tuple(pats, ddpos)
}
PatKind::Box(ref inner) => hir::PatKind::Box(self.lower_pat(inner)),
PatKind::Ref(ref inner, mutbl) => {
hir::PatKind::Ref(self.lower_pat(inner), self.lower_mutability(mutbl))
}
PatKind::Range(ref e1, ref e2, Spanned { node: ref end, .. }) => hir::PatKind::Range(
P(self.lower_expr(e1)),
P(self.lower_expr(e2)),
self.lower_range_end(end),
),
PatKind::Slice(ref pats) => self.lower_pat_slice(pats),
PatKind::Rest => {
// If we reach here the `..` pattern is not semantically allowed.
self.ban_illegal_rest_pat(p.span)
}
PatKind::Paren(ref inner) => return self.lower_pat(inner),
PatKind::Mac(_) => panic!("Shouldn't exist here"),
};
self.pat_with_node_id_of(p, node)
}
fn lower_pat_tuple(
&mut self,
pats: &[AstP<Pat>],
ctx: &str,
) -> (HirVec<P<hir::Pat>>, Option<usize>) {
let mut elems = Vec::with_capacity(pats.len());
let mut rest = None;
let mut iter = pats.iter().enumerate();
while let Some((idx, pat)) = iter.next() {
// Interpret the first `..` pattern as a subtuple pattern.
if pat.is_rest() {
rest = Some((idx, pat.span));
break;
}
// It was not a subslice pattern so lower it normally.
elems.push(self.lower_pat(pat));
}
while let Some((_, pat)) = iter.next() {
// There was a previous subtuple pattern; make sure we don't allow more.
if pat.is_rest() {
self.ban_extra_rest_pat(pat.span, rest.unwrap().1, ctx);
} else {
elems.push(self.lower_pat(pat));
}
}
(elems.into(), rest.map(|(ddpos, _)| ddpos))
}
fn lower_pat_slice(&mut self, pats: &[AstP<Pat>]) -> hir::PatKind {
let mut before = Vec::new();
let mut after = Vec::new();
let mut slice = None;
let mut prev_rest_span = None;
let mut iter = pats.iter();
while let Some(pat) = iter.next() {
// Interpret the first `((ref mut?)? x @)? ..` pattern as a subslice pattern.
match pat.kind {
PatKind::Rest => {
prev_rest_span = Some(pat.span);
slice = Some(self.pat_wild_with_node_id_of(pat));
break;
},
PatKind::Ident(ref bm, ident, Some(ref sub)) if sub.is_rest() => {
prev_rest_span = Some(sub.span);
let lower_sub = |this: &mut Self| Some(this.pat_wild_with_node_id_of(sub));
let node = self.lower_pat_ident(pat, bm, ident, lower_sub);
slice = Some(self.pat_with_node_id_of(pat, node));
break;
},
_ => {}
}
// It was not a subslice pattern so lower it normally.
before.push(self.lower_pat(pat));
}
while let Some(pat) = iter.next() {
// There was a previous subslice pattern; make sure we don't allow more.
let rest_span = match pat.kind {
PatKind::Rest => Some(pat.span),
PatKind::Ident(.., Some(ref sub)) if sub.is_rest() => {
// The `HirValidator` is merciless; add a `_` pattern to avoid ICEs.
after.push(self.pat_wild_with_node_id_of(pat));
Some(sub.span)
},
_ => None,
};
if let Some(rest_span) = rest_span {
self.ban_extra_rest_pat(rest_span, prev_rest_span.unwrap(), "slice");
} else {
after.push(self.lower_pat(pat));
}
}
hir::PatKind::Slice(before.into(), slice, after.into())
}
fn lower_pat_ident(
&mut self,
p: &Pat,
binding_mode: &BindingMode,
ident: Ident,
lower_sub: impl FnOnce(&mut Self) -> Option<P<hir::Pat>>,
) -> hir::PatKind {
match self.resolver.get_partial_res(p.id).map(|d| d.base_res()) {
// `None` can occur in body-less function signatures
res @ None | res @ Some(Res::Local(_)) => {
let canonical_id = match res {
Some(Res::Local(id)) => id,
_ => p.id,
};
hir::PatKind::Binding(
self.lower_binding_mode(binding_mode),
self.lower_node_id(canonical_id),
ident,
lower_sub(self),
)
}
Some(res) => hir::PatKind::Path(hir::QPath::Resolved(
None,
P(hir::Path {
span: ident.span,
res: self.lower_res(res),
segments: hir_vec![hir::PathSegment::from_ident(ident)],
}),
)),
}
}
fn pat_wild_with_node_id_of(&mut self, p: &Pat) -> P<hir::Pat> {
self.pat_with_node_id_of(p, hir::PatKind::Wild)
}
/// Construct a `Pat` with the `HirId` of `p.id` lowered.
fn pat_with_node_id_of(&mut self, p: &Pat, kind: hir::PatKind) -> P<hir::Pat> {
P(hir::Pat {
hir_id: self.lower_node_id(p.id),
kind,
span: p.span,
})
}
/// Emit a friendly error for extra `..` patterns in a tuple/tuple struct/slice pattern.
fn ban_extra_rest_pat(&self, sp: Span, prev_sp: Span, ctx: &str) {
self.diagnostic()
.struct_span_err(sp, &format!("`..` can only be used once per {} pattern", ctx))
.span_label(sp, &format!("can only be used once per {} pattern", ctx))
.span_label(prev_sp, "previously used here")
.emit();
}
/// Used to ban the `..` pattern in places it shouldn't be semantically.
fn ban_illegal_rest_pat(&self, sp: Span) -> hir::PatKind {
self.diagnostic()
.struct_span_err(sp, "`..` patterns are not allowed here")
.note("only allowed in tuple, tuple struct, and slice patterns")
.emit();
// We're not in a list context so `..` can be reasonably treated
// as `_` because it should always be valid and roughly matches the
// intent of `..` (notice that the rest of a single slot is that slot).
hir::PatKind::Wild
}
fn lower_range_end(&mut self, e: &RangeEnd) -> hir::RangeEnd {
match *e {
RangeEnd::Included(_) => hir::RangeEnd::Included,
RangeEnd::Excluded => hir::RangeEnd::Excluded,
}
}
fn lower_anon_const(&mut self, c: &AnonConst) -> hir::AnonConst {
self.with_new_scopes(|this| {
hir::AnonConst {
hir_id: this.lower_node_id(c.id),
body: this.lower_const_body(&c.value),
}
})
}
fn lower_stmt(&mut self, s: &Stmt) -> SmallVec<[hir::Stmt; 1]> {
let kind = match s.kind {
StmtKind::Local(ref l) => {
let (l, item_ids) = self.lower_local(l);
let mut ids: SmallVec<[hir::Stmt; 1]> = item_ids
.into_iter()
.map(|item_id| {
let item_id = hir::ItemId { id: self.lower_node_id(item_id) };
self.stmt(s.span, hir::StmtKind::Item(item_id))
})
.collect();
ids.push({
hir::Stmt {
hir_id: self.lower_node_id(s.id),
kind: hir::StmtKind::Local(P(l)),
span: s.span,
}
});
return ids;
},
StmtKind::Item(ref it) => {
// Can only use the ID once.
let mut id = Some(s.id);
return self.lower_item_id(it)
.into_iter()
.map(|item_id| {
let hir_id = id.take()
.map(|id| self.lower_node_id(id))
.unwrap_or_else(|| self.next_id());
hir::Stmt {
hir_id,
kind: hir::StmtKind::Item(item_id),
span: s.span,
}
})
.collect();
}
StmtKind::Expr(ref e) => hir::StmtKind::Expr(P(self.lower_expr(e))),
StmtKind::Semi(ref e) => hir::StmtKind::Semi(P(self.lower_expr(e))),
StmtKind::Mac(..) => panic!("shouldn't exist here"),
};
smallvec![hir::Stmt {
hir_id: self.lower_node_id(s.id),
kind,
span: s.span,
}]
}
fn lower_block_check_mode(&mut self, b: &BlockCheckMode) -> hir::BlockCheckMode {
match *b {
BlockCheckMode::Default => hir::DefaultBlock,
BlockCheckMode::Unsafe(u) => hir::UnsafeBlock(self.lower_unsafe_source(u)),
}
}
fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingAnnotation {
match *b {
BindingMode::ByValue(Mutability::Immutable) => hir::BindingAnnotation::Unannotated,
BindingMode::ByRef(Mutability::Immutable) => hir::BindingAnnotation::Ref,
BindingMode::ByValue(Mutability::Mutable) => hir::BindingAnnotation::Mutable,
BindingMode::ByRef(Mutability::Mutable) => hir::BindingAnnotation::RefMut,
}
}
fn lower_unsafe_source(&mut self, u: UnsafeSource) -> hir::UnsafeSource {
match u {
CompilerGenerated => hir::CompilerGenerated,
UserProvided => hir::UserProvided,
}
}
fn lower_trait_bound_modifier(&mut self, f: TraitBoundModifier) -> hir::TraitBoundModifier {
match f {
TraitBoundModifier::None => hir::TraitBoundModifier::None,
TraitBoundModifier::Maybe => hir::TraitBoundModifier::Maybe,
}
}
// Helper methods for building HIR.
fn stmt(&mut self, span: Span, kind: hir::StmtKind) -> hir::Stmt {
hir::Stmt { span, kind, hir_id: self.next_id() }
}
fn stmt_expr(&mut self, span: Span, expr: hir::Expr) -> hir::Stmt {
self.stmt(span, hir::StmtKind::Expr(P(expr)))
}
fn stmt_let_pat(
&mut self,
attrs: ThinVec<Attribute>,
span: Span,
init: Option<P<hir::Expr>>,
pat: P<hir::Pat>,
source: hir::LocalSource,
) -> hir::Stmt {
let local = hir::Local {
attrs,
hir_id: self.next_id(),
init,
pat,
source,
span,
ty: None,
};
self.stmt(span, hir::StmtKind::Local(P(local)))
}
fn block_expr(&mut self, expr: P<hir::Expr>) -> hir::Block {
self.block_all(expr.span, hir::HirVec::new(), Some(expr))
}
fn block_all(
&mut self,
span: Span,
stmts: hir::HirVec<hir::Stmt>,
expr: Option<P<hir::Expr>>,
) -> hir::Block {
hir::Block {
stmts,
expr,
hir_id: self.next_id(),
rules: hir::DefaultBlock,
span,
targeted_by_break: false,
}
}
/// Constructs a `true` or `false` literal pattern.
fn pat_bool(&mut self, span: Span, val: bool) -> P<hir::Pat> {
let expr = self.expr_bool(span, val);
self.pat(span, hir::PatKind::Lit(P(expr)))
}
fn pat_ok(&mut self, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
self.pat_std_enum(span, &[sym::result, sym::Result, sym::Ok], hir_vec![pat])
}
fn pat_err(&mut self, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
self.pat_std_enum(span, &[sym::result, sym::Result, sym::Err], hir_vec![pat])
}
fn pat_some(&mut self, span: Span, pat: P<hir::Pat>) -> P<hir::Pat> {
self.pat_std_enum(span, &[sym::option, sym::Option, sym::Some], hir_vec![pat])
}
fn pat_none(&mut self, span: Span) -> P<hir::Pat> {
self.pat_std_enum(span, &[sym::option, sym::Option, sym::None], hir_vec![])
}
fn pat_std_enum(
&mut self,
span: Span,
components: &[Symbol],
subpats: hir::HirVec<P<hir::Pat>>,
) -> P<hir::Pat> {
let path = self.std_path(span, components, None, true);
let qpath = hir::QPath::Resolved(None, P(path));
let pt = if subpats.is_empty() {
hir::PatKind::Path(qpath)
} else {
hir::PatKind::TupleStruct(qpath, subpats, None)
};
self.pat(span, pt)
}
fn pat_ident(&mut self, span: Span, ident: Ident) -> (P<hir::Pat>, hir::HirId) {
self.pat_ident_binding_mode(span, ident, hir::BindingAnnotation::Unannotated)
}
fn pat_ident_binding_mode(
&mut self,
span: Span,
ident: Ident,
bm: hir::BindingAnnotation,
) -> (P<hir::Pat>, hir::HirId) {
let hir_id = self.next_id();
(
P(hir::Pat {
hir_id,
kind: hir::PatKind::Binding(bm, hir_id, ident.with_span_pos(span), None),
span,
}),
hir_id
)
}
fn pat_wild(&mut self, span: Span) -> P<hir::Pat> {
self.pat(span, hir::PatKind::Wild)
}
fn pat(&mut self, span: Span, kind: hir::PatKind) -> P<hir::Pat> {
P(hir::Pat {
hir_id: self.next_id(),
kind,
span,
})
}
/// Given a suffix `["b", "c", "d"]`, returns path `::std::b::c::d` when
/// `fld.cx.use_std`, and `::core::b::c::d` otherwise.
/// The path is also resolved according to `is_value`.
fn std_path(
&mut self,
span: Span,
components: &[Symbol],
params: Option<P<hir::GenericArgs>>,
is_value: bool,
) -> hir::Path {
let ns = if is_value { Namespace::ValueNS } else { Namespace::TypeNS };
let (path, res) = self.resolver.resolve_str_path(span, self.crate_root, components, ns);
let mut segments: Vec<_> = path.segments.iter().map(|segment| {
let res = self.expect_full_res(segment.id);
hir::PathSegment {
ident: segment.ident,
hir_id: Some(self.lower_node_id(segment.id)),
res: Some(self.lower_res(res)),
infer_args: true,
args: None,
}
}).collect();
segments.last_mut().unwrap().args = params;
hir::Path {
span,
res: res.map_id(|_| panic!("unexpected `NodeId`")),
segments: segments.into(),
}
}
fn ty_path(&mut self, mut hir_id: hir::HirId, span: Span, qpath: hir::QPath) -> hir::Ty {
let kind = match qpath {
hir::QPath::Resolved(None, path) => {
// Turn trait object paths into `TyKind::TraitObject` instead.
match path.res {
Res::Def(DefKind::Trait, _) | Res::Def(DefKind::TraitAlias, _) => {
let principal = hir::PolyTraitRef {
bound_generic_params: hir::HirVec::new(),
trait_ref: hir::TraitRef {
path,
hir_ref_id: hir_id,
},
span,
};
// The original ID is taken by the `PolyTraitRef`,
// so the `Ty` itself needs a different one.
hir_id = self.next_id();
hir::TyKind::TraitObject(hir_vec![principal], self.elided_dyn_bound(span))
}
_ => hir::TyKind::Path(hir::QPath::Resolved(None, path)),
}
}
_ => hir::TyKind::Path(qpath),
};
hir::Ty {
hir_id,
kind,
span,
}
}
/// Invoked to create the lifetime argument for a type `&T`
/// with no explicit lifetime.
fn elided_ref_lifetime(&mut self, span: Span) -> hir::Lifetime {
match self.anonymous_lifetime_mode {
// Intercept when we are in an impl header or async fn and introduce an in-band
// lifetime.
// Hence `impl Foo for &u32` becomes `impl<'f> Foo for &'f u32` for some fresh
// `'f`.
AnonymousLifetimeMode::CreateParameter => {
let fresh_name = self.collect_fresh_in_band_lifetime(span);
hir::Lifetime {
hir_id: self.next_id(),
span,
name: hir::LifetimeName::Param(fresh_name),
}
}
AnonymousLifetimeMode::ReportError => self.new_error_lifetime(None, span),
AnonymousLifetimeMode::PassThrough => self.new_implicit_lifetime(span),
}
}
/// Report an error on illegal use of `'_` or a `&T` with no explicit lifetime;
/// return a "error lifetime".
fn new_error_lifetime(&mut self, id: Option<NodeId>, span: Span) -> hir::Lifetime {
let (id, msg, label) = match id {
Some(id) => (id, "`'_` cannot be used here", "`'_` is a reserved lifetime name"),
None => (
self.sess.next_node_id(),
"`&` without an explicit lifetime name cannot be used here",
"explicit lifetime name needed here",
),
};
let mut err = struct_span_err!(
self.sess,
span,
E0637,
"{}",
msg,
);
err.span_label(span, label);
err.emit();
self.new_named_lifetime(id, span, hir::LifetimeName::Error)
}
/// Invoked to create the lifetime argument(s) for a path like
/// `std::cell::Ref<T>`; note that implicit lifetimes in these
/// sorts of cases are deprecated. This may therefore report a warning or an
/// error, depending on the mode.
fn elided_path_lifetimes(&mut self, span: Span, count: usize) -> P<[hir::Lifetime]> {
(0..count)
.map(|_| self.elided_path_lifetime(span))
.collect()
}
fn elided_path_lifetime(&mut self, span: Span) -> hir::Lifetime {
match self.anonymous_lifetime_mode {
AnonymousLifetimeMode::CreateParameter => {
// We should have emitted E0726 when processing this path above
self.sess.delay_span_bug(
span,
"expected 'implicit elided lifetime not allowed' error",
);
let id = self.sess.next_node_id();
self.new_named_lifetime(id, span, hir::LifetimeName::Error)
}
// `PassThrough` is the normal case.
// `new_error_lifetime`, which would usually be used in the case of `ReportError`,
// is unsuitable here, as these can occur from missing lifetime parameters in a
// `PathSegment`, for which there is no associated `'_` or `&T` with no explicit
// lifetime. Instead, we simply create an implicit lifetime, which will be checked
// later, at which point a suitable error will be emitted.
| AnonymousLifetimeMode::PassThrough
| AnonymousLifetimeMode::ReportError => self.new_implicit_lifetime(span),
}
}
/// Invoked to create the lifetime argument(s) for an elided trait object
/// bound, like the bound in `Box<dyn Debug>`. This method is not invoked
/// when the bound is written, even if it is written with `'_` like in
/// `Box<dyn Debug + '_>`. In those cases, `lower_lifetime` is invoked.
fn elided_dyn_bound(&mut self, span: Span) -> hir::Lifetime {
match self.anonymous_lifetime_mode {
// NB. We intentionally ignore the create-parameter mode here.
// and instead "pass through" to resolve-lifetimes, which will apply
// the object-lifetime-defaulting rules. Elided object lifetime defaults
// do not act like other elided lifetimes. In other words, given this:
//
// impl Foo for Box<dyn Debug>
//
// we do not introduce a fresh `'_` to serve as the bound, but instead
// ultimately translate to the equivalent of:
//
// impl Foo for Box<dyn Debug + 'static>
//
// `resolve_lifetime` has the code to make that happen.
AnonymousLifetimeMode::CreateParameter => {}
AnonymousLifetimeMode::ReportError => {
// ReportError applies to explicit use of `'_`.
}
// This is the normal case.
AnonymousLifetimeMode::PassThrough => {}
}
let r = hir::Lifetime {
hir_id: self.next_id(),
span,
name: hir::LifetimeName::ImplicitObjectLifetimeDefault,
};
debug!("elided_dyn_bound: r={:?}", r);
r
}
fn new_implicit_lifetime(&mut self, span: Span) -> hir::Lifetime {
hir::Lifetime {
hir_id: self.next_id(),
span,
name: hir::LifetimeName::Implicit,
}
}
fn maybe_lint_bare_trait(&self, span: Span, id: NodeId, is_global: bool) {
// FIXME(davidtwco): This is a hack to detect macros which produce spans of the
// call site which do not have a macro backtrace. See #61963.
let is_macro_callsite = self.sess.source_map()
.span_to_snippet(span)
.map(|snippet| snippet.starts_with("#["))
.unwrap_or(true);
if !is_macro_callsite {
self.sess.buffer_lint_with_diagnostic(
builtin::BARE_TRAIT_OBJECTS,
id,
span,
"trait objects without an explicit `dyn` are deprecated",
builtin::BuiltinLintDiagnostics::BareTraitObject(span, is_global),
)
}
}
}
fn body_ids(bodies: &BTreeMap<hir::BodyId, hir::Body>) -> Vec<hir::BodyId> {
// Sorting by span ensures that we get things in order within a
// file, and also puts the files in a sensible order.
let mut body_ids: Vec<_> = bodies.keys().cloned().collect();
body_ids.sort_by_key(|b| bodies[b].value.span);
body_ids
}
/// Checks if the specified expression is a built-in range literal.
/// (See: `LoweringContext::lower_expr()`).
pub fn is_range_literal(sess: &Session, expr: &hir::Expr) -> bool {
use hir::{Path, QPath, ExprKind, TyKind};
// Returns whether the given path represents a (desugared) range,
// either in std or core, i.e. has either a `::std::ops::Range` or
// `::core::ops::Range` prefix.
fn is_range_path(path: &Path) -> bool {
let segs: Vec<_> = path.segments.iter().map(|seg| seg.ident.as_str().to_string()).collect();
let segs: Vec<_> = segs.iter().map(|seg| &**seg).collect();
// "{{root}}" is the equivalent of `::` prefix in `Path`.
if let ["{{root}}", std_core, "ops", range] = segs.as_slice() {
(*std_core == "std" || *std_core == "core") && range.starts_with("Range")
} else {
false
}
};
// Check whether a span corresponding to a range expression is a
// range literal, rather than an explicit struct or `new()` call.
fn is_lit(sess: &Session, span: &Span) -> bool {
let source_map = sess.source_map();
let end_point = source_map.end_point(*span);
if let Ok(end_string) = source_map.span_to_snippet(end_point) {
!(end_string.ends_with("}") || end_string.ends_with(")"))
} else {
false
}
};
match expr.kind {
// All built-in range literals but `..=` and `..` desugar to `Struct`s.
ExprKind::Struct(ref qpath, _, _) => {
if let QPath::Resolved(None, ref path) = **qpath {
return is_range_path(&path) && is_lit(sess, &expr.span);
}
}
// `..` desugars to its struct path.
ExprKind::Path(QPath::Resolved(None, ref path)) => {
return is_range_path(&path) && is_lit(sess, &expr.span);
}
// `..=` desugars into `::std::ops::RangeInclusive::new(...)`.
ExprKind::Call(ref func, _) => {
if let ExprKind::Path(QPath::TypeRelative(ref ty, ref segment)) = func.kind {
if let TyKind::Path(QPath::Resolved(None, ref path)) = ty.kind {
let new_call = segment.ident.as_str() == "new";
return is_range_path(&path) && is_lit(sess, &expr.span) && new_call;
}
}
}
_ => {}
}
false
} | //
// Note: this must be done after lowering the output type, |
WildcardTransition.js | /*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.WildcardTransition = void 0;
const Decorators_1 = require("../Decorators");
const Transition_1 = require("./Transition");
let WildcardTransition = class WildcardTransition extends Transition_1.Transition {
constructor(target) {
super(target);
}
get serializationType() {
return 9 /* WILDCARD */;
}
matches(symbol, minVocabSymbol, maxVocabSymbol) {
return symbol >= minVocabSymbol && symbol <= maxVocabSymbol;
}
toString() {
return ".";
}
};
__decorate([
Decorators_1.Override
], WildcardTransition.prototype, "serializationType", null);
__decorate([
Decorators_1.Override
], WildcardTransition.prototype, "matches", null);
__decorate([
Decorators_1.Override,
Decorators_1.NotNull
], WildcardTransition.prototype, "toString", null);
WildcardTransition = __decorate([
__param(0, Decorators_1.NotNull)
], WildcardTransition);
exports.WildcardTransition = WildcardTransition;
//# sourceMappingURL=WildcardTransition.js.map | "use strict"; |
Subsets and Splits