file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
volume_resize_map.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"encoding/json"
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
commontypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller/volume/expand/util"
"k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume/util/types"
)
// VolumeResizeMap defines an interface that serves as a cache for holding pending resizing requests
type VolumeResizeMap interface {
// AddPVCUpdate adds pvc for resizing
AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume)
// DeletePVC deletes pvc that is scheduled for resizing
DeletePVC(pvc *v1.PersistentVolumeClaim)
// GetPVCsWithResizeRequest returns all pending pvc resize requests
GetPVCsWithResizeRequest() []*PVCWithResizeRequest
// MarkAsResized marks a pvc as fully resized
MarkAsResized(*PVCWithResizeRequest, resource.Quantity) error
// UpdatePVSize updates just pv size after cloudprovider resizing is successful
UpdatePVSize(*PVCWithResizeRequest, resource.Quantity) error
}
type volumeResizeMap struct {
// map of unique pvc name and resize requests that are pending or inflight
pvcrs map[types.UniquePVCName]*PVCWithResizeRequest
// kube client for making API calls
kubeClient clientset.Interface
// for guarding access to pvcrs map
sync.RWMutex
}
// PVCWithResizeRequest struct defines data structure that stores state needed for
// performing file system resize
type PVCWithResizeRequest struct {
// PVC that needs to be resized
PVC *v1.PersistentVolumeClaim
// persistentvolume
PersistentVolume *v1.PersistentVolume
// Current volume size
CurrentSize resource.Quantity
// Expended volume size
ExpectedSize resource.Quantity
}
// UniquePVCKey returns unique key of the PVC based on its UID
func (pvcr *PVCWithResizeRequest) UniquePVCKey() types.UniquePVCName {
return types.UniquePVCName(pvcr.PVC.UID)
}
// QualifiedName returns namespace and name combination of the PVC
func (pvcr *PVCWithResizeRequest) QualifiedName() string {
return strings.JoinQualifiedName(pvcr.PVC.Namespace, pvcr.PVC.Name)
}
// NewVolumeResizeMap returns new VolumeResizeMap which acts as a cache
// for holding pending resize requests.
func NewVolumeResizeMap(kubeClient clientset.Interface) VolumeResizeMap {
resizeMap := &volumeResizeMap{}
resizeMap.pvcrs = make(map[types.UniquePVCName]*PVCWithResizeRequest)
resizeMap.kubeClient = kubeClient
return resizeMap
}
// AddPVCUpdate adds pvc for resizing
// This function intentionally allows addition of PVCs for which pv.Spec.Size >= pvc.Spec.Size,
// the reason being - lack of transaction in k8s means after successful resize, we can't guarantee that when we update PV,
// pvc update will be successful too and after resize we alyways update PV first.
// If for some reason we weren't able to update PVC after successful resize, then we are going to reprocess
// the PVC and hopefully after a no-op resize in volume plugin, PVC will be updated with right values as well.
func (resizeMap *volumeResizeMap) AddPVCUpdate(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.Name != pv.Spec.ClaimRef.Name {
glog.V(4).Infof("Persistent Volume is not bound to PVC being updated : %s", util.ClaimToClaimKey(pvc))
return
}
if pvc.Status.Phase != v1.ClaimBound {
return
}
resizeMap.Lock()
defer resizeMap.Unlock()
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
pvcStatusSize := pvc.Status.Capacity[v1.ResourceStorage]
if pvcStatusSize.Cmp(pvcSize) >= 0 {
return
}
glog.V(4).Infof("Adding pvc %s with Size %s/%s for resizing", util.ClaimToClaimKey(pvc), pvcSize.String(), pvcStatusSize.String())
pvcRequest := &PVCWithResizeRequest{
PVC: pvc,
CurrentSize: pvcStatusSize,
ExpectedSize: pvcSize,
PersistentVolume: pv,
}
resizeMap.pvcrs[types.UniquePVCName(pvc.UID)] = pvcRequest
}
// GetPVCsWithResizeRequest returns all pending pvc resize requests
func (resizeMap *volumeResizeMap) GetPVCsWithResizeRequest() []*PVCWithResizeRequest {
resizeMap.Lock()
defer resizeMap.Unlock()
pvcrs := []*PVCWithResizeRequest{}
for _, pvcr := range resizeMap.pvcrs {
pvcrs = append(pvcrs, pvcr)
}
// Empty out pvcrs map, we will add back failed resize requests later
resizeMap.pvcrs = map[types.UniquePVCName]*PVCWithResizeRequest{}
return pvcrs
}
// DeletePVC removes given pvc object from list of pvcs that needs resizing.
// deleting a pvc in this map doesn't affect operations that are already inflight.
func (resizeMap *volumeResizeMap) DeletePVC(pvc *v1.PersistentVolumeClaim) {
resizeMap.Lock()
defer resizeMap.Unlock()
pvcUniqueName := types.UniquePVCName(pvc.UID)
glog.V(5).Infof("Removing PVC %v from resize map", pvcUniqueName)
delete(resizeMap.pvcrs, pvcUniqueName)
}
// MarkAsResized marks a pvc as fully resized
func (resizeMap *volumeResizeMap) MarkAsResized(pvcr *PVCWithResizeRequest, newSize resource.Quantity) error {
resizeMap.Lock()
defer resizeMap.Unlock()
emptyCondition := []v1.PersistentVolumeClaimCondition{}
err := resizeMap.updatePVCCapacityAndConditions(pvcr, newSize, emptyCondition)
if err != nil {
glog.V(4).Infof("Error updating PV spec capacity for volume %q with : %v", pvcr.QualifiedName(), err)
return err
}
return nil
}
// UpdatePVSize updates just pv size after cloudprovider resizing is successful
func (resizeMap *volumeResizeMap) UpdatePVSize(pvcr *PVCWithResizeRequest, newSize resource.Quantity) error {
resizeMap.Lock()
defer resizeMap.Unlock()
oldPv := pvcr.PersistentVolume
pvClone := oldPv.DeepCopy()
oldData, err := json.Marshal(pvClone)
if err != nil {
return fmt.Errorf("Unexpected error marshaling PV : %q with error %v", pvClone.Name, err)
}
pvClone.Spec.Capacity[v1.ResourceStorage] = newSize
newData, err := json.Marshal(pvClone)
if err != nil {
return fmt.Errorf("Unexpected error marshaling PV : %q with error %v", pvClone.Name, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, pvClone)
if err != nil {
return fmt.Errorf("Error Creating two way merge patch for PV : %q with error %v", pvClone.Name, err)
}
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumes().Patch(pvClone.Name, commontypes.StrategicMergePatchType, patchBytes)
if updateErr != nil {
glog.V(4).Infof("Error updating pv %q with error : %v", pvClone.Name, updateErr)
return updateErr
}
return nil
}
| claimClone := pvcr.PVC.DeepCopy()
claimClone.Status.Capacity[v1.ResourceStorage] = newSize
claimClone.Status.Conditions = pvcConditions
_, updateErr := resizeMap.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(claimClone)
if updateErr != nil {
glog.V(4).Infof("updating PersistentVolumeClaim[%s] status: failed: %v", pvcr.QualifiedName(), updateErr)
return updateErr
}
return nil
} |
func (resizeMap *volumeResizeMap) updatePVCCapacityAndConditions(pvcr *PVCWithResizeRequest, newSize resource.Quantity, pvcConditions []v1.PersistentVolumeClaimCondition) error {
|
C15425929_Undo_Redo.py | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test case ID : C15425929
# Test Case Title : Verify that undo - redo operations do not create any error
# fmt: off
class Tests:
entity_found = ("Entity was initially found", "Entity COULD NOT be found initially")
entity_deleted = ("Entity was deleted", "Entity WAS NOT deleted")
deletion_undone = ("Undo worked", "Undo DID NOT work")
deletion_redone = ("Redo worked", "Redo DID NOT work")
no_error_occurred = ("Undo/redo completed without errors", "An error occurred during undo/redo")
# fmt: off
def C15425929_Undo_Redo():
"""
Summary:
Tests that no error messages arise when using the undo and redo functions in the editor.
Level Description:
DeleteMe - an entity that just exists above the terrain with a sphere shape component on it.
Steps:
1) Load level
2) Initially find the entity
3) Delete the entity
4) Undo the deletion
5) Redo the deletion
6) Look for errors | Expected Behavior:
The entity should be deleted, un-deleted, and re-deleted.
:return: None
"""
import os
import sys
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
from editor_python_test_tools.utils import Tracer
import azlmbr.legacy.general as general
helper.init_idle()
# 1) Load the level
helper.open_level("Physics", "C15425929_Undo_Redo")
with Tracer() as error_tracer:
# Entity to delete and undo and re-delete
entity_name = "DeleteMe"
# 2) Find entity initially
entity_id = general.find_editor_entity(entity_name)
Report.critical_result(Tests.entity_found, entity_id.IsValid())
# 3) Delete entity
general.select_objects([entity_name])
general.delete_selected()
entity_id = general.find_editor_entity(entity_name)
Report.result(Tests.entity_deleted, not entity_id.IsValid())
# 4) Undo deletion
general.undo()
entity_id = general.find_editor_entity(entity_name)
Report.result(Tests.deletion_undone, entity_id.IsValid())
# 5) Redo deletion
general.redo()
entity_id = general.find_editor_entity(entity_name)
Report.result(Tests.deletion_redone, not entity_id.IsValid())
# 6) Look for errors
helper.wait_for_condition(lambda: error_tracer.has_errors, 1.0)
Report.result(Tests.no_error_occurred, not error_tracer.has_errors)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(C15425929_Undo_Redo) | 7) Close the editor
|
lucky-tickets.go | package hole
import (
"math/rand"
"strconv"
)
type ticket struct {
digits, base int
result int64
}
var data = [...]ticket{
{8, 2, 70},
{14, 5, 454805755},
{6, 6, 4332},
{12, 7, 786588243},
{4, 8, 344},
{4, 9, 489},
{8, 9, 2306025},
{2, 10, 10},
{4, 10, 670},
{6, 10, 55252},
{8, 10, 4816030},
{12, 9, 12434998005},
{12, 10, 39581170420},
{12, 11, 112835748609},
{6, 13, 204763},
{4, 15, 2255},
{6, 15, 418503},
{8, 15, 82073295},
{10, 15, 16581420835},
}
func iPow(a, b int64) int64 {
result := int64(1)
for b != 0 {
if b&1 != 0 {
result *= a
}
b >>= 1
a *= a
}
return result
}
func sumDigits(number, base int64) (result int64) {
for number > 0 {
result += number % base
number /= base
}
return result
}
func luckyTickets() ([]string, string) {
var tickets [20]ticket
// Add 4 random different fixed cases.
for i, j := range rand.Perm(len(data))[:4] {
tickets[i] = data[j]
}
// Always add cases 14 12 and 14 7
tickets[4] = ticket{14, 12, 39222848622984}
tickets[5] = ticket{14, 7, 35751527189}
// Randomly generate additional test cases.
for i := 6; i < 20; i++ {
digits := 2 + 2*rand.Intn(5)
base := 2 + rand.Intn(15)
halfValue := iPow(int64(base), int64(digits/2))
maxSum := (base - 1) * digits / 2
counts := make([]int64, maxSum+1)
for j := int64(0); j < halfValue; j++ {
counts[sumDigits(j, int64(base))]++
}
var result int64
for _, count := range counts {
result += count * count
}
tickets[i] = ticket{digits, base, result}
}
tests := make([]test, len(tickets))
| strconv.Itoa(item.digits) + " " + strconv.Itoa(item.base),
strconv.FormatInt(item.result, 10),
}
}
return outputTests(shuffle(tests))
} | for i, item := range tickets {
tests[i] = test{ |
seqfetcher.py | # -*- coding: utf-8 -*-
"""provides sequencing fetching from NCBI and Ensembl
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import re
import bioutils.seqfetcher
from ..exceptions import HGVSDataNotAvailableError
_logger = logging.getLogger(__name__)
class SeqFetcher(object):
"""This class is intended primarily as a mixin for HGVS data providers
that doen't otherwise have access to sequence data. It uses the
fetch_seq() function in this module to fetch sequences from
several sources; see that function for details.
>> sf = SeqFetcher()
>> sf.fetch_seq('NP_056374.2',0,10)
'MESRETLSSS'
"""
def __init__(self):
# If HGVS_SEQREPO_DIR is defined, we use seqrepo for *all* sequences
# Otherwise, we fall back to remote sequence fetching
seqrepo_dir = os.environ.get("HGVS_SEQREPO_DIR")
if seqrepo_dir:
from biocommons.seqrepo import SeqRepo
sr = SeqRepo(seqrepo_dir, check_same_thread=False)
def _fetch_seq_seqrepo(ac, start_i=None, end_i=None):
return sr.fetch(ac, start_i, end_i)
self.fetcher = _fetch_seq_seqrepo
self.source = "SeqRepo ({})".format(seqrepo_dir)
else:
quit("""
V.V. usage can be quite heavy, variant validators "test_configuration.py" asserts that
we should at least explicitly chose the location, therefore, for vvhgvs, disable silent
public fallback, explicitly set a external seqrepo location if remote data is needed.
""")
self.fetcher = bioutils.seqfetcher.fetch_seq
self.source = "bioutils.seqfetcher"
_logger.info("Fetching sequences with " + self.source)
def fetch_seq(self, ac, start_i=None, end_i=None):
|
# <LICENSE>
# Copyright 2018 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
| try:
return self.fetcher(ac, start_i, end_i)
except Exception as ex:
raise HGVSDataNotAvailableError("Failed to fetch {ac} from {self.source} ({ex})".format(
ac=ac, ex=ex, self=self)) |
analyzer_executor.py | from __future__ import annotations
import base64
import hashlib
import inspect
import json
import os
import sys
import traceback
from collections import defaultdict
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
cast,
)
import boto3
import grapl_analyzerlib.counters # noqa: F401
from analyzer_executor_lib.redis_cache import EitherCache, construct_redis_client
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionComplete, ExecutionFailed, ExecutionHit
from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.nodes.base import BaseView
from grapl_analyzerlib.plugin_retriever import load_plugins
from grapl_analyzerlib.queryable import Queryable
from grapl_analyzerlib.subgraph_view import SubgraphView
from grapl_common.env_helpers import S3ResourceFactory
from grapl_common.grapl_logger import get_module_grapl_logger
from grapl_common.metrics.metric_reporter import MetricReporter, TagPair
from grapl_common.sqs.sqs_types import S3PutRecordDict, SQSMessageBody
from python_proto.pipeline import Metadata, OldEnvelope
if TYPE_CHECKING:
from mypy_boto3_s3 import S3ServiceResource
# Set up logger (this is for the whole file, including static methods)
LOGGER = get_module_grapl_logger()
# Set up plugins dir for models
MODEL_PLUGINS_DIR = os.getenv("MODEL_PLUGINS_DIR", "/tmp")
sys.path.insert(0, MODEL_PLUGINS_DIR)
# Ensure plugins dir exists
try:
directory = Path(MODEL_PLUGINS_DIR + "/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create model plugins directory", e)
def verbose_cast_to_int(input: Optional[str]) -> Optional[int]:
if not input:
return None
try:
return int(input)
except (TypeError, ValueError):
raise ValueError(f"Couldn't cast this env variable into an int: {input}")
class AnalyzerExecutor:
# constants
CHUNK_SIZE_RETRY: int = 10
CHUNK_SIZE_DEFAULT: int = 100
def __init__(
self,
model_plugins_bucket: str,
analyzers_bucket: str,
analyzer_matched_subgraphs_bucket: str,
message_cache: EitherCache,
hit_cache: EitherCache,
chunk_size: int,
logger: Logger,
metric_reporter: MetricReporter,
) -> None:
self.model_plugins_bucket = model_plugins_bucket
self.analyzers_bucket = analyzers_bucket
self.analyzer_matched_subgraphs_bucket = analyzer_matched_subgraphs_bucket
self.message_cache = message_cache
self.hit_cache = hit_cache
self.chunk_size = chunk_size
self.logger = logger
self.metric_reporter = metric_reporter
@classmethod
def from_env(cls, env: Optional[Mapping[str, str]] = None) -> AnalyzerExecutor:
env = env or os.environ
# If we're retrying, change the chunk size
is_retry = bool(env.get("IS_RETRY", False))
if is_retry:
chunk_size = cls.CHUNK_SIZE_RETRY
else:
chunk_size = cls.CHUNK_SIZE_DEFAULT
# Set up message cache
messagecache_addr = env.get("MESSAGECACHE_ADDR")
messagecache_port: Optional[int] = verbose_cast_to_int(
env.get("MESSAGECACHE_PORT")
)
message_cache = construct_redis_client(messagecache_addr, messagecache_port)
# Set up hit cache
hitcache_addr = env.get("HITCACHE_ADDR")
hitcache_port: Optional[int] = verbose_cast_to_int(env.get("HITCACHE_PORT"))
hit_cache = construct_redis_client(hitcache_addr, hitcache_port)
metric_reporter = MetricReporter.create("analyzer-executor")
model_plugins_bucket = env["GRAPL_MODEL_PLUGINS_BUCKET"]
analyzers_bucket = env["GRAPL_ANALYZERS_BUCKET"]
analyzer_matched_subgraphs_bucket = env[
"GRAPL_ANALYZER_MATCHED_SUBGRAPHS_BUCKET"
]
return AnalyzerExecutor(
model_plugins_bucket=model_plugins_bucket,
analyzers_bucket=analyzers_bucket,
analyzer_matched_subgraphs_bucket=analyzer_matched_subgraphs_bucket,
message_cache=message_cache,
hit_cache=hit_cache,
chunk_size=chunk_size,
logger=LOGGER,
metric_reporter=metric_reporter,
)
def check_caches(
self, file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
with self.metric_reporter.histogram_ctx("analyzer-executor.check_caches"):
if self.check_msg_cache(file_hash, node_key, msg_id):
self.logger.debug("cache hit - already processed")
return True
if self.check_hit_cache(analyzer_name, node_key):
self.logger.debug("cache hit - already matched")
return True
return False
def to_event_hash(self, components: Iterable[str]) -> str:
|
def check_msg_cache(self, file: str, node_key: str, msg_id: str) -> bool:
event_hash = self.to_event_hash((file, node_key, msg_id))
return bool(self.message_cache.get(event_hash))
def update_msg_cache(self, file: str, node_key: str, msg_id: str) -> None:
event_hash = self.to_event_hash((file, node_key, msg_id))
self.message_cache.set(event_hash, "1")
def check_hit_cache(self, file: str, node_key: str) -> bool:
event_hash = self.to_event_hash((file, node_key))
return bool(self.hit_cache.get(event_hash))
def update_hit_cache(self, file: str, node_key: str) -> None:
event_hash = self.to_event_hash((file, node_key))
self.hit_cache.set(event_hash, "1")
async def handle_events(self, events: SQSMessageBody, context: Any) -> None:
# Parse sns message
self.logger.debug(f"handling events: {events} context: {context}")
client = GraphClient()
s3 = S3ResourceFactory(boto3).from_env()
load_plugins(
self.model_plugins_bucket,
s3.meta.client,
os.path.abspath(MODEL_PLUGINS_DIR),
)
for event in events["Records"]:
data = parse_s3_event(s3, event)
# FIXME: this code assumes inner_message is json
envelope = OldEnvelope.deserialize(data)
message = json.loads(envelope.inner_message)
LOGGER.info(f'Executing Analyzer: {message["key"]}')
with self.metric_reporter.histogram_ctx(
"analyzer-executor.download_s3_file"
):
analyzer = download_s3_file(
s3,
self.analyzers_bucket,
message["key"],
).decode("utf8")
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
# TODO: Validate signature of S3 file
LOGGER.info(f"event {event} {envelope.metadata}")
rx: Connection
tx: Connection
rx, tx = Pipe(duplex=False)
p = Process(
target=self.execute_file,
args=(analyzer_name, analyzer, subgraph, tx, "", self.chunk_size),
)
p.start()
for exec_hit in self.poll_process(rx=rx, analyzer_name=analyzer_name):
with self.metric_reporter.histogram_ctx(
"analyzer-executor.emit_event.ms",
(TagPair("analyzer_name", exec_hit.analyzer_name),),
):
emit_event(
self.analyzer_matched_subgraphs_bucket,
s3,
exec_hit,
envelope.metadata,
)
self.update_msg_cache(analyzer, exec_hit.root_node_key, message["key"])
self.update_hit_cache(analyzer_name, exec_hit.root_node_key)
p.join()
def poll_process(
self,
rx: Connection,
analyzer_name: str,
) -> Iterator[ExecutionHit]:
"""
Keep polling the spawned Process, and yield any ExecutionHits.
(This will probably disappear if Analyzers move to Docker images.)
"""
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
)
continue
result: Optional[Any] = rx.recv()
if isinstance(result, ExecutionComplete):
self.logger.info(f"Analyzer {analyzer_name} execution complete")
return
# emit any hits to an S3 bucket
if isinstance(result, ExecutionHit):
self.logger.info(
f"Analyzer {analyzer_name} emitting event for:"
f"{result.analyzer_name} {result.root_node_key}"
)
yield result
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
def exec_analyzers(
self,
dg_client: GraphClient,
file: str,
msg_id: str,
nodes: List[BaseView],
analyzers: Dict[str, Analyzer],
sender: Any,
) -> None:
if not analyzers:
self.logger.warning("Received empty dict of analyzers")
return
if not nodes:
self.logger.warning("Received empty array of nodes")
for node in nodes:
querymap: Dict[str, List[Queryable]] = defaultdict(list)
for an_name, analyzer in analyzers.items():
if self.check_caches(file, msg_id, node.node_key, an_name):
continue
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for query in queries:
# TODO: Whether it was a hit or not is a good Tag
tags = (TagPair("analyzer_name", an_name),)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.query_first.ms", tags
):
response = query.query_first(
dg_client, contains_node_key=node.node_key
)
if response:
self.logger.debug(
f"Analyzer '{an_name}' received a hit, executing on_response()"
)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.on_response.ms", tags
):
analyzer.on_response(response, sender)
def execute_file(
self,
name: str,
file: str,
graph: SubgraphView,
sender: Connection,
msg_id: str,
chunk_size: int,
) -> None:
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client = GraphClient()
analyzers = get_analyzer_objects(client)
if not analyzers:
self.logger.warning(f"Got no analyzers for file: {name}")
self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
self.logger.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(
nodes: List[BaseView], sender: Connection
) -> List[BaseView]:
try:
self.exec_analyzers(
client, file, msg_id, nodes, analyzers, sender
)
return nodes
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(
f"Execution of {name} failed with {e} {e.args}"
)
sender.send(ExecutionFailed())
raise
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def parse_s3_event(s3: S3ServiceResource, event: S3PutRecordDict) -> bytes:
try:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
except KeyError:
LOGGER.error("Could not parse s3 event: {}", exc_info=True)
raise
return download_s3_file(s3, bucket, key)
def download_s3_file(s3: S3ServiceResource, bucket: str, key: str) -> bytes:
obj = s3.Object(bucket, key)
return cast(bytes, obj.get()["Body"].read())
def is_analyzer(analyzer_name: str, analyzer_cls: type) -> bool:
if analyzer_name == "Analyzer": # This is the base class
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: GraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def chunker(seq: List[BaseView], size: int) -> List[List[BaseView]]:
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def emit_event(
analyzer_matched_subgraphs_bucket: str,
s3: S3ServiceResource,
event: ExecutionHit,
metadata: Metadata,
) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
meta_dict = {
"trace_id": str(metadata.trace_id),
}
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
"metadata": meta_dict,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(analyzer_matched_subgraphs_bucket, key)
obj.put(Body=event_s.encode("utf-8"))
| joined = ",".join(components)
event_hash = hashlib.sha256(joined.encode()).hexdigest()
return event_hash |
gatsby-config.js | module.exports = {
siteMetadata: {
title: `Gatsby Default Starter`,
description: `Kick off your next, great Gatsby project with this default starter. This barebones starter ships with the main Gatsby configuration files you might need.`,
author: `@gatsbyjs`,
},
plugins: [
`gatsby-plugin-react-helmet`,
{
resolve: `gatsby-source-filesystem`,
options: {
name: `images`,
path: `${__dirname}/src/images`,
},
},
{
resolve: `gatsby-source-filesystem`,
options: {
name: `posts`,
path: `${__dirname}/src/posts`,
},
},
`gatsby-transformer-sharp`,
`gatsby-plugin-sharp`,
`gatsby-plugin-styled-components`,
{
resolve: `gatsby-plugin-manifest`,
options: {
name: `gatsby-starter-default`,
short_name: `starter`,
start_url: `/`,
background_color: `#663399`,
theme_color: `#663399`,
display: `minimal-ui`,
icon: `src/images/gatsby-icon.png`, // This path is relative to the root of the site.
},
},
{
resolve: `gatsby-plugin-mdx`,
options: {
extensions: [`.md`, `.mdx`],
gatsbyRemarkPlugins: [
{
resolve: `gatsby-remark-images`,
options: {
maxWidth: 1200,
},
},
],
},
},
{
resolve: `gatsby-plugin-google-fonts`, | options: {
fonts: [`Lato`, `muli\:400,400i,700,700i`],
display: "swap",
},
},
{
resolve: `gatsby-plugin-material-ui`,
options: {
stylesProvider: {
injectFirst: true,
},
disableAutoprefixing: false,
disableMinification: false,
},
},
],
} | |
contract.rs | use crate::error::ContractError;
use crate::msg::{
HandleMsg, InitMsg, QueryMsg, ClientBalanceResponse, RentResponse, RentCarResponse
};
use crate::state::{
Config, TimePeriod, Car, Client, Rent, config, config_read, cars, cars_read, clients, clients_read, rents, rents_read
};
use cosmwasm_std::{
attr, coin, to_binary, BankMsg, Binary, CanonicalAddr, Coin, CosmosMsg, Deps, DepsMut, Env,
HandleResponse, HumanAddr, InitResponse, MessageInfo, StdError, StdResult, Storage,
};
const RENT_PERIOD: u64 = 60;
pub fn init(
deps: DepsMut,
_env: Env,
info: MessageInfo,
msg: InitMsg,
) -> Result<InitResponse, ContractError> {
let config_state = Config {
denom: msg.denom,
kyc_verificator: deps.api.canonical_address(&msg.kyc_verificator)?,
manager: deps.api.canonical_address(&msg.manager)?,
rent_count: 0
};
config(deps.storage).save(&config_state)?;
Ok(InitResponse::default())
}
pub fn handle(
deps: DepsMut,
env: Env,
info: MessageInfo,
msg: HandleMsg,
) -> Result<HandleResponse, ContractError> {
match msg {
HandleMsg::RegisterCar {
id,
name,
rent_price,
deposit_price
} => register_car(deps, env, info, id, name, rent_price, deposit_price),
HandleMsg::RegisterClient {
name
} => register_client(deps, env, info, name),
HandleMsg::VerifyClient {
address,
} => verify_client(deps, env, info, address),
HandleMsg::RentCar {
car_id,
start,
end
} => rent_car(deps, env, info, car_id, start, end),
HandleMsg::StartRent {
rent_id,
date,
} => start_rent(deps, env, info, rent_id, date),
HandleMsg::EndRent {
rent_id,
date,
} => end_rent(deps, env, info, rent_id, date),
}
}
pub fn register_car(
deps: DepsMut,
_env: Env,
info: MessageInfo,
id: HumanAddr,
name: String,
rent_price: u128,
deposit_price: u128
) -> Result<HandleResponse, ContractError> {
let sender_address_raw = deps.api.canonical_address(&info.sender)?;
let config_state = config(deps.storage).load()?;
if sender_address_raw != config_state.manager {
return Err(ContractError::Unauthorized {});
}
let car_address_raw = deps.api.canonical_address(&id)?;
let key = car_address_raw.as_slice();
let stored_car = cars_read(deps.storage).may_load(key)?;
if stored_car.is_some() {
return Err(ContractError::CarExist {});
}
let car = Car {
id: deps.api.canonical_address(&id)?,
name: name,
rent_price: rent_price,
deposit_price: deposit_price,
usage_periods: vec![],
balance: 0
};
cars(deps.storage).save(key, &car)?;
Ok(HandleResponse::default())
}
pub fn register_client(
deps: DepsMut,
_env: Env,
info: MessageInfo,
name: String
) -> Result<HandleResponse, ContractError> {
let sender_address_raw = deps.api.canonical_address(&info.sender)?;
let key = &sender_address_raw.as_slice();
let stored_client = clients_read(deps.storage).may_load(key)?;
if stored_client.is_some() {
return Err(ContractError::ClientExist {});
}
let config_state = config(deps.storage).load()?;
let sent_funds = info
.sent_funds
.iter()
.find(|coin| coin.denom.eq(&config_state.denom))
.unwrap();
let client = Client {
id: deps.api.canonical_address(&info.sender)?,
name: name,
verified: false,
balance: sent_funds.amount.u128(),
locked_balance: 0
};
clients(deps.storage).save(key, &client)?;
Ok(HandleResponse::default())
}
pub fn verify_client(
deps: DepsMut,
_env: Env,
info: MessageInfo,
address: HumanAddr,
) -> Result<HandleResponse, ContractError> {
let sender_address_raw = deps.api.canonical_address(&info.sender)?;
let config_state = config(deps.storage).load()?;
if sender_address_raw != config_state.kyc_verificator {
return Err(ContractError::Unauthorized {});
}
let client_address_raw = deps.api.canonical_address(&address)?;
let key = &client_address_raw.as_slice();
clients(deps.storage).update(key, |record| {
if let Some(mut record) = record {
record.verified = true;
Ok(record)
} else {
return Err(ContractError::ClientNotExist {});
}
})?;
Ok(HandleResponse::default())
}
pub fn | (
deps: DepsMut,
_env: Env,
info: MessageInfo,
car_id: HumanAddr,
start: u64,
end: u64
) -> Result<HandleResponse, ContractError> {
let car_address_raw = deps.api.canonical_address(&car_id)?;
let car = match cars_read(deps.storage).may_load(&car_address_raw.as_slice())? {
Some(car) => Some(car),
None => return Err(ContractError::CarNotExist {})
}
.unwrap();
let sender_address_raw = deps.api.canonical_address(&info.sender)?;
let client_key = &sender_address_raw.as_slice();
let mut client = match clients_read(deps.storage).may_load(client_key)? {
Some(client) => Some(client),
None => return Err(ContractError::ClientNotExist {})
}
.unwrap();
if !client.verified {
return Err(ContractError::ClientNotVerified {});
}
let rent_cost = car.deposit_price + car.rent_price * u128::from((end - start) / RENT_PERIOD);
if client.balance < rent_cost {
return Err(ContractError::InsufficientFunds {});
}
client.balance -= rent_cost;
client.locked_balance += rent_cost;
let rent = Rent {
client_id: deps.api.canonical_address(&info.sender)?,
car_id: car_address_raw,
balance: rent_cost,
usage: TimePeriod{start, end},
actual_start: 0
};
let mut config_state = config(deps.storage).load()?;
let rent_id = config_state.rent_count + 1;
config_state.rent_count = rent_id;
let rent_key = &rent_id.to_be_bytes();
config(deps.storage).save(&config_state)?;
clients(deps.storage).save(client_key, &client)?;
rents(deps.storage).save(rent_key, &rent)?;
let r = HandleResponse {
messages: vec![],
attributes: vec![
attr("action", "rent_car"),
attr("rent_id", &rent_id),
],
data: Some(to_binary(&RentCarResponse { rent_id })?),
};
Ok(r)
}
pub fn start_rent(
deps: DepsMut,
_env: Env,
info: MessageInfo,
rent_id: u64,
date: u64
) -> Result<HandleResponse, ContractError> {
let key = &rent_id.to_be_bytes();
let sender = deps.api.canonical_address(&info.sender)?;
rents(deps.storage).update(key, |record| {
if let Some(mut record) = record {
if sender != record.car_id {
return Err(ContractError::Unauthorized {});
}
record.actual_start = date;
Ok(record)
} else {
return Err(ContractError::RentNotExist {});
}
})?;
Ok(HandleResponse::default())
}
pub fn end_rent(
deps: DepsMut,
_env: Env,
info: MessageInfo,
rent_id: u64,
date: u64
) -> Result<HandleResponse, ContractError> {
let rent_key = &rent_id.to_be_bytes();
let mut rent = match rents_read(deps.storage).may_load(rent_key)? {
Some(rent) => Some(rent),
None => return Err(ContractError::RentNotExist {}),
}
.unwrap();
if rent.balance == 0 {
return Err(ContractError::RentClosed {});
}
let car_key = &rent.car_id.as_slice();
let mut car = match cars_read(deps.storage).may_load(car_key)? {
Some(car) => Some(car),
None => return Err(ContractError::CarNotExist {}),
}
.unwrap();
let mut payment = rent.balance - car.deposit_price;
if date > rent.usage.end {
payment += u128::from((date - rent.usage.end) / RENT_PERIOD) * car.rent_price;
}
car.balance += payment;
let client_key = &rent.client_id.as_slice();
clients(deps.storage).update(client_key, |record| {
if let Some(mut record) = record {
record.locked_balance -= rent.balance;
record.balance += rent.balance - payment;
Ok(record)
} else {
return Err(ContractError::ClientNotExist {});
}
})?;
rent.balance = 0;
rents(deps.storage).save(rent_key, &rent)?;
cars(deps.storage).save(car_key, &car)?;
Ok(HandleResponse::default())
}
pub fn query(deps: Deps, _env: Env, msg: QueryMsg) -> StdResult<Binary> {
match msg {
QueryMsg::Balance { address } => client_balance(deps, address),
QueryMsg::Rent { rent_id } => rent_by_id(deps, rent_id),
}
}
fn client_balance(deps: Deps, address: HumanAddr) -> StdResult<Binary> {
let sender_address_raw = deps.api.canonical_address(&address)?;
let key = &sender_address_raw.as_slice();
let client = match clients_read(deps.storage).may_load(key)? {
Some(client) => Some(client),
None => return Err(StdError::generic_err("Client does not exist"))
}
.unwrap();
let resp = ClientBalanceResponse {
balance: client.balance,
locked_balance: client.locked_balance,
};
to_binary(&resp)
}
fn rent_by_id(deps: Deps, rent_id: u64) -> StdResult<Binary> {
let key = &rent_id.to_be_bytes();
let rent = match rents_read(deps.storage).may_load(key)? {
Some(rent) => Some(rent),
None => return Err(StdError::generic_err("Rent does not exist"))
}
.unwrap();
let resp = RentResponse {
client: deps.api.human_address(&rent.client_id)?,
car: deps.api.human_address(&rent.car_id)?,
balance: rent.balance,
usage_start: rent.usage.start,
usage_end: rent.usage.end,
actual_start: rent.actual_start,
};
to_binary(&resp)
} | rent_car |
stack_test.go | package v7action_test
import (
"errors"
"code.cloudfoundry.org/cli/actor/actionerror"
. "code.cloudfoundry.org/cli/actor/v7action"
"code.cloudfoundry.org/cli/actor/v7action/v7actionfakes"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccv3"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Stack", func() {
var (
actor *Actor
fakeCloudControllerClient *v7actionfakes.FakeCloudControllerClient
)
BeforeEach(func() {
fakeCloudControllerClient = new(v7actionfakes.FakeCloudControllerClient)
fakeConfig := new(v7actionfakes.FakeConfig)
actor = NewActor(fakeCloudControllerClient, fakeConfig, nil, nil, nil)
})
Describe("Get stack by name", func() {
var expectedErr error
var err error
var warnings Warnings
var stack Stack
JustBeforeEach(func() {
stack, warnings, err = actor.GetStackByName("some-stack-name")
})
Describe("When there are errors", func() {
When("The client errors", func() {
BeforeEach(func() {
expectedErr = errors.New("CC Error")
fakeCloudControllerClient.GetStacksReturns(
[]ccv3.Stack{},
ccv3.Warnings{"warning-1", "warning-2"},
expectedErr,
)
})
It("Returns the same error", func() {
Expect(err).To(MatchError(expectedErr))
Expect(warnings).To(ConsistOf("warning-1", "warning-2"))
})
})
When("The stack does not exist", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetStacksReturns(
[]ccv3.Stack{},
ccv3.Warnings{"warning-1", "warning-2"},
actionerror.StackNotFoundError{Name: "some-stack-name"},
)
})
It("Returns a StackNotFound error", func() {
Expect(err).To(MatchError(actionerror.StackNotFoundError{Name: "some-stack-name"}))
Expect(warnings).To(ConsistOf("warning-1", "warning-2"))
})
})
})
Context("When there are no errors", func() {
When("The stack exists", func() {
expectedStack := ccv3.Stack{
GUID: "some-stack-guid",
Name: "some-stack-name",
Description: "Some stack desc",
}
expectedParams := []ccv3.Query{{Key: ccv3.NameFilter, Values: []string{"some-stack-name"}}}
BeforeEach(func() {
fakeCloudControllerClient.GetStacksReturns(
[]ccv3.Stack{expectedStack},
ccv3.Warnings{"warning-1", "warning-2"},
nil,
)
})
It("Returns the desired stack", func() {
actualParams := fakeCloudControllerClient.GetStacksArgsForCall(0)
Expect(actualParams).To(Equal(expectedParams))
Expect(fakeCloudControllerClient.GetStacksCallCount()).To(Equal(1))
Expect(stack.GUID).To(Equal(expectedStack.GUID))
Expect(stack.Name).To(Equal(expectedStack.Name))
Expect(stack.Description).To(Equal(expectedStack.Description))
Expect(err).To(BeNil())
Expect(warnings).To(ConsistOf("warning-1", "warning-2"))
})
})
})
})
Describe("GetStacks", func() {
var (
ccv3Stacks []ccv3.Stack
stacks []Stack
stack1Name string
stack1Description string
stack2Name string
stack2Description string
warnings Warnings
executeErr error
)
BeforeEach(func() {
ccv3Stacks = []ccv3.Stack{
{Name: stack1Name, Description: stack1Description},
{Name: stack2Name, Description: stack2Description},
}
})
JustBeforeEach(func() {
stacks, warnings, executeErr = actor.GetStacks()
})
When("getting stacks returns an error", func() {
var expectedErr error
BeforeEach(func() {
expectedErr = errors.New("some error")
fakeCloudControllerClient.GetStacksReturns(
[]ccv3.Stack{},
ccv3.Warnings{"warning-1", "warning-2"}, expectedErr)
})
It("returns warnings and the error", func() {
Expect(warnings).To(ConsistOf("warning-1", "warning-2"))
Expect(executeErr).To(MatchError(expectedErr))
})
})
When("the GetStacks call is successful", func() {
When("the cloud controller returns back stacks", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetStacksReturns(
ccv3Stacks,
ccv3.Warnings{"some-stack-warning"}, nil)
})
It("returns back the stacks and warnings", func() {
Expect(executeErr).ToNot(HaveOccurred())
Expect(stacks).To(ConsistOf(Stack{Name: stack1Name, Description: stack1Description}, Stack{Name: stack2Name, Description: stack2Description})) | })
})
When("the GetStacks call is unsuccessful", func() {
BeforeEach(func() {
fakeCloudControllerClient.GetStacksReturns(
nil,
ccv3.Warnings{"some-stack-warning"},
errors.New("some-error"))
})
It("returns an error and warnings", func() {
Expect(executeErr).To(MatchError("some-error"))
Expect(warnings).To(ConsistOf("some-stack-warning"))
})
})
})
})
}) | Expect(warnings).To(ConsistOf("some-stack-warning"))
Expect(fakeCloudControllerClient.GetStacksCallCount()).To(Equal(1)) |
user.go | package helpers
import ( | jwt "github.com/golang-jwt/jwt/v4"
corev2 "github.com/sensu/sensu-go/api/core/v2"
"github.com/sensu/sensu-go/cli/client/config"
)
// GetCurrentUsername retrieves the username from the active JWT
func GetCurrentUsername(cfg config.Config) string {
tokens := cfg.Tokens()
if tokens == nil {
return ""
}
accessToken := tokens.Access
token, _ := jwt.ParseWithClaims(accessToken, &corev2.Claims{}, nil)
if token == nil {
return ""
}
claims := token.Claims.(*corev2.Claims)
return claims.StandardClaims.Subject
} | |
run.go | package commands
import (
"context"
"fmt"
"io"
"log"
"os"
"runtime"
"strings"
"time"
"github.com/fatih/color"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/exitcodes"
"github.com/golangci/golangci-lint/pkg/lint"
"github.com/golangci/golangci-lint/pkg/lint/lintersdb"
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/golangci/golangci-lint/pkg/packages"
"github.com/golangci/golangci-lint/pkg/printers"
"github.com/golangci/golangci-lint/pkg/result"
"github.com/golangci/golangci-lint/pkg/result/processors"
)
const defaultFileMode = 0644
func getDefaultIssueExcludeHelp() string {
parts := []string{"Use or not use default excludes:"}
for _, ep := range config.DefaultExcludePatterns {
parts = append(parts,
fmt.Sprintf(" # %s %s: %s", ep.ID, ep.Linter, ep.Why),
fmt.Sprintf(" - %s", color.YellowString(ep.Pattern)),
"",
)
}
return strings.Join(parts, "\n")
}
func getDefaultDirectoryExcludeHelp() string {
parts := []string{"Use or not use default excluded directories:"}
for _, dir := range packages.StdExcludeDirRegexps {
parts = append(parts, fmt.Sprintf(" - %s", color.YellowString(dir)))
}
parts = append(parts, "")
return strings.Join(parts, "\n")
}
func wh(text string) string {
return color.GreenString(text)
}
const defaultTimeout = time.Minute
//nolint:funlen,gomnd
func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, isFinalInit bool) {
hideFlag := func(name string) {
if err := fs.MarkHidden(name); err != nil {
panic(err)
}
// we run initFlagSet multiple times, but we wouldn't like to see deprecation message multiple times
if isFinalInit {
const deprecateMessage = "flag will be removed soon, please, use .golangci.yml config"
if err := fs.MarkDeprecated(name, deprecateMessage); err != nil {
panic(err)
}
}
}
// Output config
oc := &cfg.Output
fs.StringVar(&oc.Format, "out-format",
config.OutFormatColoredLineNumber,
wh(fmt.Sprintf("Format of output: %s", strings.Join(config.OutFormats, "|"))))
fs.BoolVar(&oc.PrintIssuedLine, "print-issued-lines", true, wh("Print lines of code with issue"))
fs.BoolVar(&oc.PrintLinterName, "print-linter-name", true, wh("Print linter name in issue line"))
fs.BoolVar(&oc.UniqByLine, "uniq-by-line", true, wh("Make issues output unique by line"))
fs.BoolVar(&oc.SortResults, "sort-results", false, wh("Sort linter results"))
fs.BoolVar(&oc.PrintWelcomeMessage, "print-welcome", false, wh("Print welcome message"))
fs.StringVar(&oc.PathPrefix, "path-prefix", "", wh("Path prefix to add to output"))
hideFlag("print-welcome") // no longer used
fs.BoolVar(&cfg.InternalCmdTest, "internal-cmd-test", false, wh("Option is used only for testing golangci-lint command, don't use it"))
if err := fs.MarkHidden("internal-cmd-test"); err != nil {
panic(err)
}
// Run config
rc := &cfg.Run
fs.StringVar(&rc.ModulesDownloadMode, "modules-download-mode", "",
"Modules download mode. If not empty, passed as -mod=<mode> to go tools")
fs.IntVar(&rc.ExitCodeIfIssuesFound, "issues-exit-code",
exitcodes.IssuesFound, wh("Exit code when issues were found"))
fs.StringSliceVar(&rc.BuildTags, "build-tags", nil, wh("Build tags"))
fs.DurationVar(&rc.Timeout, "deadline", defaultTimeout, wh("Deadline for total work"))
if err := fs.MarkHidden("deadline"); err != nil {
panic(err)
}
fs.DurationVar(&rc.Timeout, "timeout", defaultTimeout, wh("Timeout for total work"))
fs.BoolVar(&rc.AnalyzeTests, "tests", true, wh("Analyze tests (*_test.go)"))
fs.BoolVar(&rc.PrintResourcesUsage, "print-resources-usage", false,
wh("Print avg and max memory usage of golangci-lint and total time"))
fs.StringVarP(&rc.Config, "config", "c", "", wh("Read config from file path `PATH`"))
fs.BoolVar(&rc.NoConfig, "no-config", false, wh("Don't read config"))
fs.StringSliceVar(&rc.SkipDirs, "skip-dirs", nil, wh("Regexps of directories to skip"))
fs.BoolVar(&rc.UseDefaultSkipDirs, "skip-dirs-use-default", true, getDefaultDirectoryExcludeHelp())
fs.StringSliceVar(&rc.SkipFiles, "skip-files", nil, wh("Regexps of files to skip"))
const allowParallelDesc = "Allow multiple parallel golangci-lint instances running. " +
"If false (default) - golangci-lint acquires file lock on start."
fs.BoolVar(&rc.AllowParallelRunners, "allow-parallel-runners", false, wh(allowParallelDesc))
const allowSerialDesc = "Allow multiple golangci-lint instances running, but serialize them around a lock. " +
"If false (default) - golangci-lint exits with an error if it fails to acquire file lock on start."
fs.BoolVar(&rc.AllowSerialRunners, "allow-serial-runners", false, wh(allowSerialDesc))
// Linters settings config
lsc := &cfg.LintersSettings
// Hide all linters settings flags: they were initially visible,
// but when number of linters started to grow it became obvious that
// we can't fill 90% of flags by linters settings: common flags became hard to find.
// New linters settings should be done only through config file.
fs.BoolVar(&lsc.Errcheck.CheckTypeAssertions, "errcheck.check-type-assertions",
false, "Errcheck: check for ignored type assertion results")
hideFlag("errcheck.check-type-assertions")
fs.BoolVar(&lsc.Errcheck.CheckAssignToBlank, "errcheck.check-blank", false,
"Errcheck: check for errors assigned to blank identifier: _ = errFunc()")
hideFlag("errcheck.check-blank")
fs.StringVar(&lsc.Errcheck.Exclude, "errcheck.exclude", "",
"Path to a file containing a list of functions to exclude from checking")
hideFlag("errcheck.exclude")
fs.StringVar(&lsc.Errcheck.Ignore, "errcheck.ignore", "fmt:.*",
`Comma-separated list of pairs of the form pkg:regex. The regex is used to ignore names within pkg`)
hideFlag("errcheck.ignore")
fs.BoolVar(&lsc.Govet.CheckShadowing, "govet.check-shadowing", false,
"Govet: check for shadowed variables")
hideFlag("govet.check-shadowing")
fs.Float64Var(&lsc.Golint.MinConfidence, "golint.min-confidence", 0.8,
"Golint: minimum confidence of a problem to print it")
hideFlag("golint.min-confidence")
fs.BoolVar(&lsc.Gofmt.Simplify, "gofmt.simplify", true, "Gofmt: simplify code")
hideFlag("gofmt.simplify")
fs.IntVar(&lsc.Gocyclo.MinComplexity, "gocyclo.min-complexity",
30, "Minimal complexity of function to report it")
hideFlag("gocyclo.min-complexity")
fs.BoolVar(&lsc.Maligned.SuggestNewOrder, "maligned.suggest-new", false,
"Maligned: print suggested more optimal struct fields ordering")
hideFlag("maligned.suggest-new")
fs.IntVar(&lsc.Dupl.Threshold, "dupl.threshold",
150, "Dupl: Minimal threshold to detect copy-paste")
hideFlag("dupl.threshold")
fs.BoolVar(&lsc.Goconst.MatchWithConstants, "goconst.match-constant",
true, "Goconst: look for existing constants matching the values")
hideFlag("goconst.match-constant")
fs.IntVar(&lsc.Goconst.MinStringLen, "goconst.min-len",
3, "Goconst: minimum constant string length")
hideFlag("goconst.min-len")
fs.IntVar(&lsc.Goconst.MinOccurrencesCount, "goconst.min-occurrences",
3, "Goconst: minimum occurrences of constant string count to trigger issue")
hideFlag("goconst.min-occurrences")
fs.BoolVar(&lsc.Goconst.ParseNumbers, "goconst.numbers",
false, "Goconst: search also for duplicated numbers")
hideFlag("goconst.numbers")
fs.IntVar(&lsc.Goconst.NumberMin, "goconst.min",
3, "minimum value, only works with goconst.numbers")
hideFlag("goconst.min")
fs.IntVar(&lsc.Goconst.NumberMax, "goconst.max",
3, "maximum value, only works with goconst.numbers")
hideFlag("goconst.max")
fs.BoolVar(&lsc.Goconst.IgnoreCalls, "goconst.ignore-calls",
true, "Goconst: ignore when constant is not used as function argument")
hideFlag("goconst.ignore-calls")
// (@dixonwille) These flag is only used for testing purposes.
fs.StringSliceVar(&lsc.Depguard.Packages, "depguard.packages", nil,
"Depguard: packages to add to the list")
hideFlag("depguard.packages")
fs.BoolVar(&lsc.Depguard.IncludeGoRoot, "depguard.include-go-root", false,
"Depguard: check list against standard lib")
hideFlag("depguard.include-go-root")
fs.IntVar(&lsc.Lll.TabWidth, "lll.tab-width", 1,
"Lll: tab width in spaces")
hideFlag("lll.tab-width")
// Linters config
lc := &cfg.Linters
fs.StringSliceVarP(&lc.Enable, "enable", "E", nil, wh("Enable specific linter"))
fs.StringSliceVarP(&lc.Disable, "disable", "D", nil, wh("Disable specific linter"))
fs.BoolVar(&lc.EnableAll, "enable-all", false, wh("Enable all linters"))
fs.BoolVar(&lc.DisableAll, "disable-all", false, wh("Disable all linters"))
fs.StringSliceVarP(&lc.Presets, "presets", "p", nil,
wh(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint linters' to see "+
"them. This option implies option --disable-all", strings.Join(m.AllPresets(), "|"))))
fs.BoolVar(&lc.Fast, "fast", false, wh("Run only fast linters from enabled linters set (first run won't be fast)"))
// Issues config
ic := &cfg.Issues
fs.StringSliceVarP(&ic.ExcludePatterns, "exclude", "e", nil, wh("Exclude issue by regexp"))
fs.BoolVar(&ic.UseDefaultExcludes, "exclude-use-default", true, getDefaultIssueExcludeHelp())
fs.BoolVar(&ic.ExcludeCaseSensitive, "exclude-case-sensitive", false, wh("If set to true exclude "+
"and exclude rules regular expressions are case sensitive"))
fs.IntVar(&ic.MaxIssuesPerLinter, "max-issues-per-linter", 50,
wh("Maximum issues count per one linter. Set to 0 to disable"))
fs.IntVar(&ic.MaxSameIssues, "max-same-issues", 3,
wh("Maximum count of issues with the same text. Set to 0 to disable"))
fs.BoolVarP(&ic.Diff, "new", "n", false,
wh("Show only new issues: if there are unstaged changes or untracked files, only those changes "+
"are analyzed, else only changes in HEAD~ are analyzed.\nIt's a super-useful option for integration "+
"of golangci-lint into existing large codebase.\nIt's not practical to fix all existing issues at "+
"the moment of integration: much better to not allow issues in new code.\nFor CI setups, prefer "+
"--new-from-rev=HEAD~, as --new can skip linting the current patch if any scripts generate "+
"unstaged files before golangci-lint runs."))
fs.StringVar(&ic.DiffFromRevision, "new-from-rev", "",
wh("Show only new issues created after git revision `REV`"))
fs.StringVar(&ic.DiffPatchFilePath, "new-from-patch", "",
wh("Show only new issues created in git patch with file path `PATH`"))
fs.BoolVar(&ic.WholeFiles, "whole-files", false,
wh("Show issues in any part of update files (requires new-from-rev or new-from-patch)"))
fs.BoolVar(&ic.NeedFix, "fix", false, "Fix found issues (if it's supported by the linter)")
}
func (e *Executor) initRunConfiguration(cmd *cobra.Command) {
fs := cmd.Flags()
fs.SortFlags = false // sort them as they are defined here
initFlagSet(fs, e.cfg, e.DBManager, true)
}
func (e *Executor) getConfigForCommandLine() (*config.Config, error) {
// We use another pflag.FlagSet here to not set `changed` flag
// on cmd.Flags() options. Otherwise, string slice options will be duplicated.
fs := pflag.NewFlagSet("config flag set", pflag.ContinueOnError)
var cfg config.Config
// Don't do `fs.AddFlagSet(cmd.Flags())` because it shares flags representations:
// `changed` variable inside string slice vars will be shared.
// Use another config variable here, not e.cfg, to not
// affect main parsing by this parsing of only config option.
initFlagSet(fs, &cfg, e.DBManager, false)
initVersionFlagSet(fs, &cfg)
// Parse max options, even force version option: don't want
// to get access to Executor here: it's error-prone to use
// cfg vs e.cfg.
initRootFlagSet(fs, &cfg, true)
fs.Usage = func() {} // otherwise, help text will be printed twice
if err := fs.Parse(os.Args); err != nil {
if err == pflag.ErrHelp {
return nil, err
}
return nil, fmt.Errorf("can't parse args: %s", err)
}
return &cfg, nil
}
func (e *Executor) initRun() {
e.runCmd = &cobra.Command{
Use: "run",
Short: "Run the linters",
Run: e.executeRun,
PreRun: func(_ *cobra.Command, _ []string) {
if ok := e.acquireFileLock(); !ok {
e.log.Fatalf("Parallel golangci-lint is running")
}
},
PostRun: func(_ *cobra.Command, _ []string) {
e.releaseFileLock()
},
}
e.rootCmd.AddCommand(e.runCmd)
e.runCmd.SetOut(logutils.StdOut) // use custom output to properly color it in Windows terminals
e.runCmd.SetErr(logutils.StdErr)
e.initRunConfiguration(e.runCmd)
}
func fixSlicesFlags(fs *pflag.FlagSet) {
// It's a dirty hack to set flag.Changed to true for every string slice flag.
// It's necessary to merge config and command-line slices: otherwise command-line
// flags will always overwrite ones from the config.
fs.VisitAll(func(f *pflag.Flag) {
if f.Value.Type() != "stringSlice" {
return
}
s, err := fs.GetStringSlice(f.Name)
if err != nil {
return
}
if s == nil { // assume that every string slice flag has nil as the default
return
}
var safe []string
for _, v := range s {
// add quotes to escape comma because spf13/pflag use a CSV parser:
// https://github.com/spf13/pflag/blob/85dd5c8bc61cfa382fecd072378089d4e856579d/string_slice.go#L43
safe = append(safe, `"`+v+`"`)
}
// calling Set sets Changed to true: next Set calls will append, not overwrite
_ = f.Value.Set(strings.Join(safe, ","))
})
}
// runAnalysis executes the linters that have been enabled in the configuration.
func (e *Executor) runAnalysis(ctx context.Context, args []string) ([]result.Issue, error) {
e.cfg.Run.Args = args
lintersToRun, err := e.EnabledLintersSet.GetOptimizedLinters()
if err != nil {
return nil, err
}
enabledLintersMap, err := e.EnabledLintersSet.GetEnabledLintersMap()
if err != nil {
return nil, err
}
for _, lc := range e.DBManager.GetAllSupportedLinterConfigs() {
isEnabled := enabledLintersMap[lc.Name()] != nil
e.reportData.AddLinter(lc.Name(), isEnabled, lc.EnabledByDefault)
}
lintCtx, err := e.contextLoader.Load(ctx, lintersToRun)
if err != nil {
return nil, errors.Wrap(err, "context loading failed")
}
lintCtx.Log = e.log.Child("linters context")
runner, err := lint.NewRunner(e.cfg, e.log.Child("runner"),
e.goenv, e.EnabledLintersSet, e.lineCache, e.DBManager, lintCtx.Packages)
if err != nil {
return nil, err
}
issues, err := runner.Run(ctx, lintersToRun, lintCtx)
if err != nil {
return nil, err
}
fixer := processors.NewFixer(e.cfg, e.log, e.fileCache)
return fixer.Process(issues), nil
}
func (e *Executor) setOutputToDevNull() (savedStdout, savedStderr *os.File) {
savedStdout, savedStderr = os.Stdout, os.Stderr
devNull, err := os.Open(os.DevNull)
if err != nil {
e.log.Warnf("Can't open null device %q: %s", os.DevNull, err)
return
}
os.Stdout, os.Stderr = devNull, devNull
return
}
func (e *Executor) setExitCodeIfIssuesFound(issues []result.Issue) {
if len(issues) != 0 {
e.exitCode = e.cfg.Run.ExitCodeIfIssuesFound
}
}
func (e *Executor) runAndPrint(ctx context.Context, args []string) error {
if err := e.goenv.Discover(ctx); err != nil {
e.log.Warnf("Failed to discover go env: %s", err)
}
if !logutils.HaveDebugTag("linters_output") {
// Don't allow linters and loader to print anything
log.SetOutput(io.Discard)
savedStdout, savedStderr := e.setOutputToDevNull()
defer func() {
os.Stdout, os.Stderr = savedStdout, savedStderr
}()
}
issues, err := e.runAnalysis(ctx, args)
if err != nil {
return err // XXX: don't loose type
}
formats := strings.Split(e.cfg.Output.Format, ",")
for _, format := range formats {
out := strings.SplitN(format, ":", 2)
if len(out) < 2 {
out = append(out, "")
}
err := e.printReports(ctx, issues, out[1], out[0])
if err != nil {
return err
}
}
e.setExitCodeIfIssuesFound(issues)
e.fileCache.PrintStats(e.log)
return nil
}
func (e *Executor) printReports(ctx context.Context, issues []result.Issue, path, format string) error {
w, shouldClose, err := e.createWriter(path)
if err != nil {
return fmt.Errorf("can't create output for %s: %w", path, err)
}
p, err := e.createPrinter(format, w)
if err != nil {
if file, ok := w.(io.Closer); shouldClose && ok {
_ = file.Close()
}
return err
}
if err = p.Print(ctx, issues); err != nil {
if file, ok := w.(io.Closer); shouldClose && ok {
_ = file.Close()
}
return fmt.Errorf("can't print %d issues: %s", len(issues), err)
}
if file, ok := w.(io.Closer); shouldClose && ok {
_ = file.Close()
}
return nil
}
func (e *Executor) createWriter(path string) (io.Writer, bool, error) {
if path == "" || path == "stdout" {
return logutils.StdOut, false, nil
}
if path == "stderr" {
return logutils.StdErr, false, nil
}
f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, defaultFileMode)
if err != nil {
return nil, false, err
}
return f, true, nil
}
func (e *Executor) createPrinter(format string, w io.Writer) (printers.Printer, error) {
var p printers.Printer
switch format {
case config.OutFormatJSON:
p = printers.NewJSON(&e.reportData, w)
case config.OutFormatColoredLineNumber, config.OutFormatLineNumber:
p = printers.NewText(e.cfg.Output.PrintIssuedLine,
format == config.OutFormatColoredLineNumber, e.cfg.Output.PrintLinterName,
e.log.Child("text_printer"), w)
case config.OutFormatTab:
p = printers.NewTab(e.cfg.Output.PrintLinterName, e.log.Child("tab_printer"), w)
case config.OutFormatCheckstyle:
p = printers.NewCheckstyle(w)
case config.OutFormatCodeClimate:
p = printers.NewCodeClimate(w)
case config.OutFormatHTML:
p = printers.NewHTML(w)
case config.OutFormatJunitXML:
p = printers.NewJunitXML(w)
case config.OutFormatGithubActions:
p = printers.NewGithub(w)
default:
return nil, fmt.Errorf("unknown output format %s", format)
}
return p, nil
}
// executeRun executes the 'run' CLI command, which runs the linters.
func (e *Executor) executeRun(_ *cobra.Command, args []string) {
needTrackResources := e.cfg.Run.IsVerbose || e.cfg.Run.PrintResourcesUsage
trackResourcesEndCh := make(chan struct{})
defer func() { // XXX: this defer must be before ctx.cancel defer
if needTrackResources { // wait until resource tracking finished to print properly
<-trackResourcesEndCh
}
}()
e.setTimeoutToDeadlineIfOnlyDeadlineIsSet()
ctx, cancel := context.WithTimeout(context.Background(), e.cfg.Run.Timeout)
defer cancel()
if needTrackResources {
go watchResources(ctx, trackResourcesEndCh, e.log, e.debugf)
}
if err := e.runAndPrint(ctx, args); err != nil {
e.log.Errorf("Running error: %s", err)
if e.exitCode == exitcodes.Success {
if exitErr, ok := errors.Cause(err).(*exitcodes.ExitError); ok {
e.exitCode = exitErr.Code
} else {
e.exitCode = exitcodes.Failure
}
}
}
e.setupExitCode(ctx)
}
// to be removed when deadline is finally decommissioned
func (e *Executor) setTimeoutToDeadlineIfOnlyDeadlineIsSet() {
deadlineValue := e.cfg.Run.Deadline
if deadlineValue != 0 && e.cfg.Run.Timeout == defaultTimeout {
e.cfg.Run.Timeout = deadlineValue
}
}
func (e *Executor) setupExitCode(ctx context.Context) {
if ctx.Err() != nil {
e.exitCode = exitcodes.Timeout
e.log.Errorf("Timeout exceeded: try increasing it by passing --timeout option")
return
}
if e.exitCode != exitcodes.Success {
return
}
needFailOnWarnings := os.Getenv("GL_TEST_RUN") == "1" || os.Getenv("FAIL_ON_WARNINGS") == "1"
if needFailOnWarnings && len(e.reportData.Warnings) != 0 {
e.exitCode = exitcodes.WarningInTest
return
}
if e.reportData.Error != "" {
// it's a case e.g. when typecheck linter couldn't parse and error and just logged it
e.exitCode = exitcodes.ErrorWasLogged
return
}
}
func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log, debugf logutils.DebugFunc) | {
startedAt := time.Now()
debugf("Started tracking time")
var maxRSSMB, totalRSSMB float64
var iterationsCount int
const intervalMS = 100
ticker := time.NewTicker(intervalMS * time.Millisecond)
defer ticker.Stop()
logEveryRecord := os.Getenv("GL_MEM_LOG_EVERY") == "1"
const MB = 1024 * 1024
track := func() {
var m runtime.MemStats
runtime.ReadMemStats(&m)
if logEveryRecord {
debugf("Stopping memory tracing iteration, printing ...")
printMemStats(&m, logger)
}
rssMB := float64(m.Sys) / MB
if rssMB > maxRSSMB {
maxRSSMB = rssMB
}
totalRSSMB += rssMB
iterationsCount++
}
for {
track()
stop := false
select {
case <-ctx.Done():
stop = true
debugf("Stopped resources tracking")
case <-ticker.C:
}
if stop {
break
}
}
track()
avgRSSMB := totalRSSMB / float64(iterationsCount)
logger.Infof("Memory: %d samples, avg is %.1fMB, max is %.1fMB",
iterationsCount, avgRSSMB, maxRSSMB)
logger.Infof("Execution took %s", time.Since(startedAt))
close(done)
} |
|
cache.ts | import dotProp from "dot-prop-immutable";
import {
GET_FORM_REVISIONS,
GetFormRevisionsQueryResponse,
GetFormRevisionsQueryVariables,
LIST_FORMS,
ListFormsQueryResponse
} from "../graphql";
import { DataProxy } from "apollo-cache";
import { FbRevisionModel } from "~/types";
// Replace existing "latest" revision with the new revision
export const updateLatestRevisionInListCache = (
cache: DataProxy,
revision: FbRevisionModel
): void => {
const gqlParams = { query: LIST_FORMS };
const [uniqueId] = revision.id.split("#");
const response = cache.readQuery<ListFormsQueryResponse>(gqlParams);
if (!response || !response.formBuilder) {
return;
}
const { formBuilder } = response;
const index = formBuilder.listForms.data.findIndex(item => item.id.startsWith(uniqueId));
cache.writeQuery({
...gqlParams,
data: {
formBuilder: dotProp.set(formBuilder, `listForms.data.${index}`, revision)
}
});
};
export const addFormToListCache = (cache: DataProxy, revision: FbRevisionModel): void => {
const gqlParams = { query: LIST_FORMS };
const response = cache.readQuery<ListFormsQueryResponse>(gqlParams);
if (!response || !response.formBuilder) {
return;
}
const { formBuilder } = response;
cache.writeQuery({
...gqlParams,
data: {
formBuilder: dotProp.set(formBuilder, `listForms.data`, [
revision,
...formBuilder.listForms.data
])
}
});
};
export const addRevisionToRevisionsCache = (
cache: DataProxy,
newRevision: FbRevisionModel
): void => {
const gqlParams = {
query: GET_FORM_REVISIONS,
variables: { id: newRevision.id.split("#")[0] }
};
const response = cache.readQuery<GetFormRevisionsQueryResponse, GetFormRevisionsQueryVariables>(
gqlParams
);
if (!response || !response.formBuilder) {
return;
}
const { formBuilder } = response;
cache.writeQuery({
...gqlParams,
data: {
formBuilder: dotProp.set(formBuilder, `revisions.data`, [
newRevision,
...formBuilder.revisions.data
])
}
});
};
export const removeFormFromListCache = (cache: DataProxy, form: FbRevisionModel): void => {
// Delete the form from list cache
const gqlParams = { query: LIST_FORMS };
const response = cache.readQuery<ListFormsQueryResponse>(gqlParams);
if (!response || !response.formBuilder) {
return;
}
const { formBuilder } = response;
const index = formBuilder.listForms.data.findIndex(item => item.id === form.id);
cache.writeQuery({
...gqlParams,
data: {
formBuilder: dotProp.delete(formBuilder, `listForms.data.${index}`)
}
});
};
export const removeRevisionFromFormCache = (
cache: DataProxy,
form: FbRevisionModel,
revision: FbRevisionModel
): FbRevisionModel[] => {
const gqlParams = {
query: GET_FORM_REVISIONS,
variables: { id: form.id.split("#")[0] }
};
const response = cache.readQuery<GetFormRevisionsQueryResponse, GetFormRevisionsQueryVariables>(
gqlParams
);
if (!response || !response.formBuilder) {
return [];
}
let { formBuilder } = response;
const index = formBuilder.revisions.data.findIndex(item => item.id === revision.id);
formBuilder = dotProp.delete(
formBuilder,
`revisions.data.${index}`
) as GetFormRevisionsQueryResponse["formBuilder"];
cache.writeQuery({
...gqlParams,
data: { | }
});
// Return new revisions
return formBuilder.revisions.data;
}; | formBuilder |
coupon.py | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Coupon(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'affiliate_oid': 'int',
'allow_multiple_one_time_codes': 'bool',
'amount_off_items': 'CouponAmountOffItems',
'amount_off_shipping': 'CouponAmountOffShipping',
'amount_off_shipping_with_items_purchase': 'CouponAmountOffShippingWithItemsPurchase',
'amount_off_subtotal': 'CouponAmountOffSubtotal',
'amount_off_subtotal_and_free_shipping': 'CouponAmountOffSubtotalFreeShippingWithPurchase',
'amount_off_subtotal_and_shipping': 'CouponAmountOffSubtotalAndShipping',
'amount_off_subtotal_with_block_purchase': 'CouponAmountOffSubtotalWithBlockPurchase',
'amount_off_subtotal_with_items_purchase': 'CouponAmountOffSubtotalWithItemsPurchase',
'amount_off_subtotal_with_purchase': 'CouponAmountOffSubtotalWithPurchase',
'automatically_apply_coupon_codes': 'CouponAutomaticallyApplyCouponCodes',
'buy_one_get_one': 'CouponBuyOneGetOneLimit',
'calculated_description': 'str',
'can_be_used_with_other_coupons': 'bool',
'coupon_oid': 'int',
'coupon_type': 'str',
'description': 'str',
'discount_item_with_item_purchase': 'CouponDiscountItemWithItemPurchase',
'discount_items': 'CouponDiscountItems',
'expiration_dts': 'str',
'free_item_and_shipping_with_subtotal': 'CouponFreeItemAndShippingWithSubtotal',
'free_item_with_item_purchase': 'CouponFreeItemWithItemPurchase',
'free_item_with_subtotal': 'CouponFreeItemWithSubtotal',
'free_items_with_item_purchase': 'CouponFreeItemsWithItemPurchase',
'free_items_with_mixmatch_purchase': 'CouponFreeItemsWithMixMatchPurchase',
'free_shipping': 'CouponFreeShipping',
'free_shipping_specific_items': 'CouponFreeShippingSpecificItems',
'free_shipping_with_items_purchase': 'CouponFreeShippingWithItemsPurchase',
'free_shipping_with_subtotal': 'CouponFreeShippingWithSubtotal',
'hide_from_customer': 'bool',
'merchant_code': 'str',
'merchant_notes': 'str',
'multiple_amounts_off_items': 'CouponMultipleAmountsOffItems',
'no_discount': 'CouponNoDiscount',
'percent_off_item_with_items_quantity_purchase': 'CouponPercentOffItemWithItemsQuantityPurchase',
'percent_off_items': 'CouponPercentOffItems',
'percent_off_items_and_free_shipping': 'CouponPercentOffItemsAndFreeShipping',
'percent_off_items_with_items_purchase': 'CouponPercentOffItemsWithItemsPurchase',
'percent_off_msrp_items': 'CouponPercentOffMsrpItems',
'percent_off_retail_price_items': 'CouponPercentOffRetailPriceItems',
'percent_off_shipping': 'CouponPercentOffShipping',
'percent_off_subtotal': 'CouponPercentOffSubtotal',
'percent_off_subtotal_and_free_shipping': 'CouponPercentOffSubtotalAndFreeShipping',
'percent_off_subtotal_limit': 'CouponPercentOffSubtotalLimit',
'percent_off_subtotal_with_items_purchase': 'CouponPercentOffSubtotalWithItemsPurchase',
'percent_off_subtotal_with_subtotal': 'CouponPercentOffSubtotalWithSubtotal',
'quickbooks_code': 'str',
'restrict_by_postal_codes': 'list[str]',
'restrict_by_screen_branding_theme_codes': 'list[CouponRestriction]',
'restrict_by_storefronts': 'list[CouponRestriction]',
'start_dts': 'str',
'super_coupon': 'bool',
'tiered_amount_off_items': 'CouponTieredAmountOffItems',
'tiered_amount_off_subtotal': 'CouponTieredAmountOffSubtotal',
'tiered_percent_off_items': 'CouponTieredPercentOffItems',
'tiered_percent_off_shipping': 'CouponTieredPercentOffShipping',
'tiered_percent_off_subtotal': 'CouponTieredPercentOffSubtotal',
'usable_by': 'str'
}
attribute_map = {
'affiliate_oid': 'affiliate_oid',
'allow_multiple_one_time_codes': 'allow_multiple_one_time_codes',
'amount_off_items': 'amount_off_items',
'amount_off_shipping': 'amount_off_shipping',
'amount_off_shipping_with_items_purchase': 'amount_off_shipping_with_items_purchase',
'amount_off_subtotal': 'amount_off_subtotal',
'amount_off_subtotal_and_free_shipping': 'amount_off_subtotal_and_free_shipping',
'amount_off_subtotal_and_shipping': 'amount_off_subtotal_and_shipping',
'amount_off_subtotal_with_block_purchase': 'amount_off_subtotal_with_block_purchase',
'amount_off_subtotal_with_items_purchase': 'amount_off_subtotal_with_items_purchase',
'amount_off_subtotal_with_purchase': 'amount_off_subtotal_with_purchase',
'automatically_apply_coupon_codes': 'automatically_apply_coupon_codes',
'buy_one_get_one': 'buy_one_get_one',
'calculated_description': 'calculated_description',
'can_be_used_with_other_coupons': 'can_be_used_with_other_coupons',
'coupon_oid': 'coupon_oid',
'coupon_type': 'coupon_type',
'description': 'description',
'discount_item_with_item_purchase': 'discount_item_with_item_purchase',
'discount_items': 'discount_items',
'expiration_dts': 'expiration_dts',
'free_item_and_shipping_with_subtotal': 'free_item_and_shipping_with_subtotal',
'free_item_with_item_purchase': 'free_item_with_item_purchase',
'free_item_with_subtotal': 'free_item_with_subtotal',
'free_items_with_item_purchase': 'free_items_with_item_purchase',
'free_items_with_mixmatch_purchase': 'free_items_with_mixmatch_purchase',
'free_shipping': 'free_shipping',
'free_shipping_specific_items': 'free_shipping_specific_items',
'free_shipping_with_items_purchase': 'free_shipping_with_items_purchase',
'free_shipping_with_subtotal': 'free_shipping_with_subtotal',
'hide_from_customer': 'hide_from_customer',
'merchant_code': 'merchant_code',
'merchant_notes': 'merchant_notes',
'multiple_amounts_off_items': 'multiple_amounts_off_items',
'no_discount': 'no_discount',
'percent_off_item_with_items_quantity_purchase': 'percent_off_item_with_items_quantity_purchase',
'percent_off_items': 'percent_off_items',
'percent_off_items_and_free_shipping': 'percent_off_items_and_free_shipping',
'percent_off_items_with_items_purchase': 'percent_off_items_with_items_purchase',
'percent_off_msrp_items': 'percent_off_msrp_items',
'percent_off_retail_price_items': 'percent_off_retail_price_items',
'percent_off_shipping': 'percent_off_shipping',
'percent_off_subtotal': 'percent_off_subtotal',
'percent_off_subtotal_and_free_shipping': 'percent_off_subtotal_and_free_shipping',
'percent_off_subtotal_limit': 'percent_off_subtotal_limit',
'percent_off_subtotal_with_items_purchase': 'percent_off_subtotal_with_items_purchase',
'percent_off_subtotal_with_subtotal': 'percent_off_subtotal_with_subtotal',
'quickbooks_code': 'quickbooks_code',
'restrict_by_postal_codes': 'restrict_by_postal_codes',
'restrict_by_screen_branding_theme_codes': 'restrict_by_screen_branding_theme_codes',
'restrict_by_storefronts': 'restrict_by_storefronts',
'start_dts': 'start_dts',
'super_coupon': 'super_coupon',
'tiered_amount_off_items': 'tiered_amount_off_items',
'tiered_amount_off_subtotal': 'tiered_amount_off_subtotal',
'tiered_percent_off_items': 'tiered_percent_off_items',
'tiered_percent_off_shipping': 'tiered_percent_off_shipping',
'tiered_percent_off_subtotal': 'tiered_percent_off_subtotal',
'usable_by': 'usable_by'
}
def __init__(self, affiliate_oid=None, allow_multiple_one_time_codes=None, amount_off_items=None, amount_off_shipping=None, amount_off_shipping_with_items_purchase=None, amount_off_subtotal=None, amount_off_subtotal_and_free_shipping=None, amount_off_subtotal_and_shipping=None, amount_off_subtotal_with_block_purchase=None, amount_off_subtotal_with_items_purchase=None, amount_off_subtotal_with_purchase=None, automatically_apply_coupon_codes=None, buy_one_get_one=None, calculated_description=None, can_be_used_with_other_coupons=None, coupon_oid=None, coupon_type=None, description=None, discount_item_with_item_purchase=None, discount_items=None, expiration_dts=None, free_item_and_shipping_with_subtotal=None, free_item_with_item_purchase=None, free_item_with_subtotal=None, free_items_with_item_purchase=None, free_items_with_mixmatch_purchase=None, free_shipping=None, free_shipping_specific_items=None, free_shipping_with_items_purchase=None, free_shipping_with_subtotal=None, hide_from_customer=None, merchant_code=None, merchant_notes=None, multiple_amounts_off_items=None, no_discount=None, percent_off_item_with_items_quantity_purchase=None, percent_off_items=None, percent_off_items_and_free_shipping=None, percent_off_items_with_items_purchase=None, percent_off_msrp_items=None, percent_off_retail_price_items=None, percent_off_shipping=None, percent_off_subtotal=None, percent_off_subtotal_and_free_shipping=None, percent_off_subtotal_limit=None, percent_off_subtotal_with_items_purchase=None, percent_off_subtotal_with_subtotal=None, quickbooks_code=None, restrict_by_postal_codes=None, restrict_by_screen_branding_theme_codes=None, restrict_by_storefronts=None, start_dts=None, super_coupon=None, tiered_amount_off_items=None, tiered_amount_off_subtotal=None, tiered_percent_off_items=None, tiered_percent_off_shipping=None, tiered_percent_off_subtotal=None, usable_by=None): # noqa: E501
"""Coupon - a model defined in Swagger""" # noqa: E501
self._affiliate_oid = None
self._allow_multiple_one_time_codes = None
self._amount_off_items = None
self._amount_off_shipping = None
self._amount_off_shipping_with_items_purchase = None
self._amount_off_subtotal = None
self._amount_off_subtotal_and_free_shipping = None
self._amount_off_subtotal_and_shipping = None
self._amount_off_subtotal_with_block_purchase = None
self._amount_off_subtotal_with_items_purchase = None
self._amount_off_subtotal_with_purchase = None
self._automatically_apply_coupon_codes = None
self._buy_one_get_one = None
self._calculated_description = None
self._can_be_used_with_other_coupons = None
self._coupon_oid = None
self._coupon_type = None
self._description = None
self._discount_item_with_item_purchase = None
self._discount_items = None
self._expiration_dts = None
self._free_item_and_shipping_with_subtotal = None
self._free_item_with_item_purchase = None
self._free_item_with_subtotal = None
self._free_items_with_item_purchase = None
self._free_items_with_mixmatch_purchase = None
self._free_shipping = None
self._free_shipping_specific_items = None
self._free_shipping_with_items_purchase = None
self._free_shipping_with_subtotal = None
self._hide_from_customer = None
self._merchant_code = None
self._merchant_notes = None
self._multiple_amounts_off_items = None
self._no_discount = None
self._percent_off_item_with_items_quantity_purchase = None
self._percent_off_items = None
self._percent_off_items_and_free_shipping = None
self._percent_off_items_with_items_purchase = None
self._percent_off_msrp_items = None
self._percent_off_retail_price_items = None
self._percent_off_shipping = None
self._percent_off_subtotal = None
self._percent_off_subtotal_and_free_shipping = None
self._percent_off_subtotal_limit = None
self._percent_off_subtotal_with_items_purchase = None
self._percent_off_subtotal_with_subtotal = None
self._quickbooks_code = None
self._restrict_by_postal_codes = None
self._restrict_by_screen_branding_theme_codes = None
self._restrict_by_storefronts = None
self._start_dts = None
self._super_coupon = None
self._tiered_amount_off_items = None
self._tiered_amount_off_subtotal = None
self._tiered_percent_off_items = None
self._tiered_percent_off_shipping = None
self._tiered_percent_off_subtotal = None
self._usable_by = None
self.discriminator = None
if affiliate_oid is not None:
self.affiliate_oid = affiliate_oid
if allow_multiple_one_time_codes is not None:
self.allow_multiple_one_time_codes = allow_multiple_one_time_codes
if amount_off_items is not None:
self.amount_off_items = amount_off_items
if amount_off_shipping is not None:
self.amount_off_shipping = amount_off_shipping
if amount_off_shipping_with_items_purchase is not None:
self.amount_off_shipping_with_items_purchase = amount_off_shipping_with_items_purchase
if amount_off_subtotal is not None:
self.amount_off_subtotal = amount_off_subtotal
if amount_off_subtotal_and_free_shipping is not None:
self.amount_off_subtotal_and_free_shipping = amount_off_subtotal_and_free_shipping
if amount_off_subtotal_and_shipping is not None:
self.amount_off_subtotal_and_shipping = amount_off_subtotal_and_shipping
if amount_off_subtotal_with_block_purchase is not None:
self.amount_off_subtotal_with_block_purchase = amount_off_subtotal_with_block_purchase
if amount_off_subtotal_with_items_purchase is not None:
self.amount_off_subtotal_with_items_purchase = amount_off_subtotal_with_items_purchase
if amount_off_subtotal_with_purchase is not None:
self.amount_off_subtotal_with_purchase = amount_off_subtotal_with_purchase
if automatically_apply_coupon_codes is not None:
self.automatically_apply_coupon_codes = automatically_apply_coupon_codes
if buy_one_get_one is not None:
self.buy_one_get_one = buy_one_get_one
if calculated_description is not None:
self.calculated_description = calculated_description
if can_be_used_with_other_coupons is not None:
self.can_be_used_with_other_coupons = can_be_used_with_other_coupons
if coupon_oid is not None:
self.coupon_oid = coupon_oid
if coupon_type is not None:
self.coupon_type = coupon_type
if description is not None:
self.description = description
if discount_item_with_item_purchase is not None:
self.discount_item_with_item_purchase = discount_item_with_item_purchase
if discount_items is not None:
self.discount_items = discount_items
if expiration_dts is not None:
self.expiration_dts = expiration_dts
if free_item_and_shipping_with_subtotal is not None:
self.free_item_and_shipping_with_subtotal = free_item_and_shipping_with_subtotal
if free_item_with_item_purchase is not None:
self.free_item_with_item_purchase = free_item_with_item_purchase
if free_item_with_subtotal is not None:
self.free_item_with_subtotal = free_item_with_subtotal
if free_items_with_item_purchase is not None:
self.free_items_with_item_purchase = free_items_with_item_purchase
if free_items_with_mixmatch_purchase is not None:
self.free_items_with_mixmatch_purchase = free_items_with_mixmatch_purchase
if free_shipping is not None:
self.free_shipping = free_shipping
if free_shipping_specific_items is not None:
self.free_shipping_specific_items = free_shipping_specific_items
if free_shipping_with_items_purchase is not None:
self.free_shipping_with_items_purchase = free_shipping_with_items_purchase
if free_shipping_with_subtotal is not None:
self.free_shipping_with_subtotal = free_shipping_with_subtotal
if hide_from_customer is not None:
self.hide_from_customer = hide_from_customer
if merchant_code is not None:
self.merchant_code = merchant_code
if merchant_notes is not None:
self.merchant_notes = merchant_notes
if multiple_amounts_off_items is not None:
self.multiple_amounts_off_items = multiple_amounts_off_items
if no_discount is not None:
self.no_discount = no_discount
if percent_off_item_with_items_quantity_purchase is not None:
self.percent_off_item_with_items_quantity_purchase = percent_off_item_with_items_quantity_purchase
if percent_off_items is not None:
self.percent_off_items = percent_off_items
if percent_off_items_and_free_shipping is not None:
self.percent_off_items_and_free_shipping = percent_off_items_and_free_shipping
if percent_off_items_with_items_purchase is not None:
self.percent_off_items_with_items_purchase = percent_off_items_with_items_purchase
if percent_off_msrp_items is not None:
self.percent_off_msrp_items = percent_off_msrp_items
if percent_off_retail_price_items is not None:
self.percent_off_retail_price_items = percent_off_retail_price_items
if percent_off_shipping is not None:
self.percent_off_shipping = percent_off_shipping
if percent_off_subtotal is not None:
self.percent_off_subtotal = percent_off_subtotal
if percent_off_subtotal_and_free_shipping is not None:
self.percent_off_subtotal_and_free_shipping = percent_off_subtotal_and_free_shipping
if percent_off_subtotal_limit is not None:
self.percent_off_subtotal_limit = percent_off_subtotal_limit
if percent_off_subtotal_with_items_purchase is not None:
self.percent_off_subtotal_with_items_purchase = percent_off_subtotal_with_items_purchase
if percent_off_subtotal_with_subtotal is not None:
self.percent_off_subtotal_with_subtotal = percent_off_subtotal_with_subtotal
if quickbooks_code is not None:
self.quickbooks_code = quickbooks_code
if restrict_by_postal_codes is not None:
self.restrict_by_postal_codes = restrict_by_postal_codes
if restrict_by_screen_branding_theme_codes is not None:
self.restrict_by_screen_branding_theme_codes = restrict_by_screen_branding_theme_codes
if restrict_by_storefronts is not None:
self.restrict_by_storefronts = restrict_by_storefronts
if start_dts is not None:
self.start_dts = start_dts
if super_coupon is not None:
self.super_coupon = super_coupon
if tiered_amount_off_items is not None:
self.tiered_amount_off_items = tiered_amount_off_items
if tiered_amount_off_subtotal is not None:
self.tiered_amount_off_subtotal = tiered_amount_off_subtotal
if tiered_percent_off_items is not None:
self.tiered_percent_off_items = tiered_percent_off_items
if tiered_percent_off_shipping is not None:
self.tiered_percent_off_shipping = tiered_percent_off_shipping
if tiered_percent_off_subtotal is not None:
self.tiered_percent_off_subtotal = tiered_percent_off_subtotal
if usable_by is not None:
self.usable_by = usable_by
@property
def affiliate_oid(self):
"""Gets the affiliate_oid of this Coupon. # noqa: E501
Associates an order with an affiliate when this value is set. # noqa: E501
:return: The affiliate_oid of this Coupon. # noqa: E501
:rtype: int
"""
return self._affiliate_oid
@affiliate_oid.setter
def affiliate_oid(self, affiliate_oid):
"""Sets the affiliate_oid of this Coupon.
Associates an order with an affiliate when this value is set. # noqa: E501
:param affiliate_oid: The affiliate_oid of this Coupon. # noqa: E501
:type: int
"""
self._affiliate_oid = affiliate_oid
@property
def allow_multiple_one_time_codes(self):
"""Gets the allow_multiple_one_time_codes of this Coupon. # noqa: E501
True if multiple one time codes for this coupon can be used on a cart at the same time. # noqa: E501
:return: The allow_multiple_one_time_codes of this Coupon. # noqa: E501
:rtype: bool
"""
return self._allow_multiple_one_time_codes
@allow_multiple_one_time_codes.setter
def allow_multiple_one_time_codes(self, allow_multiple_one_time_codes):
"""Sets the allow_multiple_one_time_codes of this Coupon.
True if multiple one time codes for this coupon can be used on a cart at the same time. # noqa: E501
:param allow_multiple_one_time_codes: The allow_multiple_one_time_codes of this Coupon. # noqa: E501
:type: bool
"""
self._allow_multiple_one_time_codes = allow_multiple_one_time_codes
@property
def amount_off_items(self):
"""Gets the amount_off_items of this Coupon. # noqa: E501
:return: The amount_off_items of this Coupon. # noqa: E501
:rtype: CouponAmountOffItems
"""
return self._amount_off_items
@amount_off_items.setter
def amount_off_items(self, amount_off_items):
"""Sets the amount_off_items of this Coupon.
:param amount_off_items: The amount_off_items of this Coupon. # noqa: E501
:type: CouponAmountOffItems
"""
self._amount_off_items = amount_off_items
@property
def amount_off_shipping(self):
"""Gets the amount_off_shipping of this Coupon. # noqa: E501
:return: The amount_off_shipping of this Coupon. # noqa: E501
:rtype: CouponAmountOffShipping
"""
return self._amount_off_shipping
@amount_off_shipping.setter
def amount_off_shipping(self, amount_off_shipping):
"""Sets the amount_off_shipping of this Coupon.
:param amount_off_shipping: The amount_off_shipping of this Coupon. # noqa: E501
:type: CouponAmountOffShipping
"""
self._amount_off_shipping = amount_off_shipping
@property
def amount_off_shipping_with_items_purchase(self):
"""Gets the amount_off_shipping_with_items_purchase of this Coupon. # noqa: E501
:return: The amount_off_shipping_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffShippingWithItemsPurchase
"""
return self._amount_off_shipping_with_items_purchase
@amount_off_shipping_with_items_purchase.setter
def amount_off_shipping_with_items_purchase(self, amount_off_shipping_with_items_purchase):
"""Sets the amount_off_shipping_with_items_purchase of this Coupon.
:param amount_off_shipping_with_items_purchase: The amount_off_shipping_with_items_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffShippingWithItemsPurchase
"""
self._amount_off_shipping_with_items_purchase = amount_off_shipping_with_items_purchase
@property
def amount_off_subtotal(self):
"""Gets the amount_off_subtotal of this Coupon. # noqa: E501
:return: The amount_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotal
"""
return self._amount_off_subtotal
@amount_off_subtotal.setter
def amount_off_subtotal(self, amount_off_subtotal):
"""Sets the amount_off_subtotal of this Coupon.
:param amount_off_subtotal: The amount_off_subtotal of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotal
"""
self._amount_off_subtotal = amount_off_subtotal
@property
def amount_off_subtotal_and_free_shipping(self):
"""Gets the amount_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:return: The amount_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalFreeShippingWithPurchase
"""
return self._amount_off_subtotal_and_free_shipping
@amount_off_subtotal_and_free_shipping.setter
def amount_off_subtotal_and_free_shipping(self, amount_off_subtotal_and_free_shipping):
"""Sets the amount_off_subtotal_and_free_shipping of this Coupon.
:param amount_off_subtotal_and_free_shipping: The amount_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalFreeShippingWithPurchase
"""
self._amount_off_subtotal_and_free_shipping = amount_off_subtotal_and_free_shipping
@property
def amount_off_subtotal_and_shipping(self):
"""Gets the amount_off_subtotal_and_shipping of this Coupon. # noqa: E501
:return: The amount_off_subtotal_and_shipping of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalAndShipping
"""
return self._amount_off_subtotal_and_shipping
@amount_off_subtotal_and_shipping.setter
def amount_off_subtotal_and_shipping(self, amount_off_subtotal_and_shipping):
"""Sets the amount_off_subtotal_and_shipping of this Coupon.
:param amount_off_subtotal_and_shipping: The amount_off_subtotal_and_shipping of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalAndShipping
"""
self._amount_off_subtotal_and_shipping = amount_off_subtotal_and_shipping
@property
def amount_off_subtotal_with_block_purchase(self):
"""Gets the amount_off_subtotal_with_block_purchase of this Coupon. # noqa: E501
:return: The amount_off_subtotal_with_block_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalWithBlockPurchase
"""
return self._amount_off_subtotal_with_block_purchase
@amount_off_subtotal_with_block_purchase.setter
def amount_off_subtotal_with_block_purchase(self, amount_off_subtotal_with_block_purchase):
"""Sets the amount_off_subtotal_with_block_purchase of this Coupon.
:param amount_off_subtotal_with_block_purchase: The amount_off_subtotal_with_block_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalWithBlockPurchase
"""
self._amount_off_subtotal_with_block_purchase = amount_off_subtotal_with_block_purchase
@property
def amount_off_subtotal_with_items_purchase(self):
"""Gets the amount_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:return: The amount_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalWithItemsPurchase
"""
return self._amount_off_subtotal_with_items_purchase
@amount_off_subtotal_with_items_purchase.setter
def amount_off_subtotal_with_items_purchase(self, amount_off_subtotal_with_items_purchase):
"""Sets the amount_off_subtotal_with_items_purchase of this Coupon.
:param amount_off_subtotal_with_items_purchase: The amount_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalWithItemsPurchase
"""
self._amount_off_subtotal_with_items_purchase = amount_off_subtotal_with_items_purchase
@property
def amount_off_subtotal_with_purchase(self):
"""Gets the amount_off_subtotal_with_purchase of this Coupon. # noqa: E501
:return: The amount_off_subtotal_with_purchase of this Coupon. # noqa: E501
:rtype: CouponAmountOffSubtotalWithPurchase
"""
return self._amount_off_subtotal_with_purchase
@amount_off_subtotal_with_purchase.setter
def amount_off_subtotal_with_purchase(self, amount_off_subtotal_with_purchase):
"""Sets the amount_off_subtotal_with_purchase of this Coupon.
| :param amount_off_subtotal_with_purchase: The amount_off_subtotal_with_purchase of this Coupon. # noqa: E501
:type: CouponAmountOffSubtotalWithPurchase
"""
self._amount_off_subtotal_with_purchase = amount_off_subtotal_with_purchase
@property
def automatically_apply_coupon_codes(self):
"""Gets the automatically_apply_coupon_codes of this Coupon. # noqa: E501
:return: The automatically_apply_coupon_codes of this Coupon. # noqa: E501
:rtype: CouponAutomaticallyApplyCouponCodes
"""
return self._automatically_apply_coupon_codes
@automatically_apply_coupon_codes.setter
def automatically_apply_coupon_codes(self, automatically_apply_coupon_codes):
"""Sets the automatically_apply_coupon_codes of this Coupon.
:param automatically_apply_coupon_codes: The automatically_apply_coupon_codes of this Coupon. # noqa: E501
:type: CouponAutomaticallyApplyCouponCodes
"""
self._automatically_apply_coupon_codes = automatically_apply_coupon_codes
@property
def buy_one_get_one(self):
"""Gets the buy_one_get_one of this Coupon. # noqa: E501
:return: The buy_one_get_one of this Coupon. # noqa: E501
:rtype: CouponBuyOneGetOneLimit
"""
return self._buy_one_get_one
@buy_one_get_one.setter
def buy_one_get_one(self, buy_one_get_one):
"""Sets the buy_one_get_one of this Coupon.
:param buy_one_get_one: The buy_one_get_one of this Coupon. # noqa: E501
:type: CouponBuyOneGetOneLimit
"""
self._buy_one_get_one = buy_one_get_one
@property
def calculated_description(self):
"""Gets the calculated_description of this Coupon. # noqa: E501
Calculated description displayed to the customer if no description is specified. # noqa: E501
:return: The calculated_description of this Coupon. # noqa: E501
:rtype: str
"""
return self._calculated_description
@calculated_description.setter
def calculated_description(self, calculated_description):
"""Sets the calculated_description of this Coupon.
Calculated description displayed to the customer if no description is specified. # noqa: E501
:param calculated_description: The calculated_description of this Coupon. # noqa: E501
:type: str
"""
self._calculated_description = calculated_description
@property
def can_be_used_with_other_coupons(self):
"""Gets the can_be_used_with_other_coupons of this Coupon. # noqa: E501
True if this coupon can be used with other coupons in a single order. # noqa: E501
:return: The can_be_used_with_other_coupons of this Coupon. # noqa: E501
:rtype: bool
"""
return self._can_be_used_with_other_coupons
@can_be_used_with_other_coupons.setter
def can_be_used_with_other_coupons(self, can_be_used_with_other_coupons):
"""Sets the can_be_used_with_other_coupons of this Coupon.
True if this coupon can be used with other coupons in a single order. # noqa: E501
:param can_be_used_with_other_coupons: The can_be_used_with_other_coupons of this Coupon. # noqa: E501
:type: bool
"""
self._can_be_used_with_other_coupons = can_be_used_with_other_coupons
@property
def coupon_oid(self):
"""Gets the coupon_oid of this Coupon. # noqa: E501
Coupon oid. # noqa: E501
:return: The coupon_oid of this Coupon. # noqa: E501
:rtype: int
"""
return self._coupon_oid
@coupon_oid.setter
def coupon_oid(self, coupon_oid):
"""Sets the coupon_oid of this Coupon.
Coupon oid. # noqa: E501
:param coupon_oid: The coupon_oid of this Coupon. # noqa: E501
:type: int
"""
self._coupon_oid = coupon_oid
@property
def coupon_type(self):
"""Gets the coupon_type of this Coupon. # noqa: E501
Coupon type. # noqa: E501
:return: The coupon_type of this Coupon. # noqa: E501
:rtype: str
"""
return self._coupon_type
@coupon_type.setter
def coupon_type(self, coupon_type):
"""Sets the coupon_type of this Coupon.
Coupon type. # noqa: E501
:param coupon_type: The coupon_type of this Coupon. # noqa: E501
:type: str
"""
if coupon_type is not None and len(coupon_type) > 65:
raise ValueError("Invalid value for `coupon_type`, length must be less than or equal to `65`") # noqa: E501
self._coupon_type = coupon_type
@property
def description(self):
"""Gets the description of this Coupon. # noqa: E501
Description of the coupon up to 50 characters. # noqa: E501
:return: The description of this Coupon. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Coupon.
Description of the coupon up to 50 characters. # noqa: E501
:param description: The description of this Coupon. # noqa: E501
:type: str
"""
if description is not None and len(description) > 50:
raise ValueError("Invalid value for `description`, length must be less than or equal to `50`") # noqa: E501
self._description = description
@property
def discount_item_with_item_purchase(self):
"""Gets the discount_item_with_item_purchase of this Coupon. # noqa: E501
:return: The discount_item_with_item_purchase of this Coupon. # noqa: E501
:rtype: CouponDiscountItemWithItemPurchase
"""
return self._discount_item_with_item_purchase
@discount_item_with_item_purchase.setter
def discount_item_with_item_purchase(self, discount_item_with_item_purchase):
"""Sets the discount_item_with_item_purchase of this Coupon.
:param discount_item_with_item_purchase: The discount_item_with_item_purchase of this Coupon. # noqa: E501
:type: CouponDiscountItemWithItemPurchase
"""
self._discount_item_with_item_purchase = discount_item_with_item_purchase
@property
def discount_items(self):
"""Gets the discount_items of this Coupon. # noqa: E501
:return: The discount_items of this Coupon. # noqa: E501
:rtype: CouponDiscountItems
"""
return self._discount_items
@discount_items.setter
def discount_items(self, discount_items):
"""Sets the discount_items of this Coupon.
:param discount_items: The discount_items of this Coupon. # noqa: E501
:type: CouponDiscountItems
"""
self._discount_items = discount_items
@property
def expiration_dts(self):
"""Gets the expiration_dts of this Coupon. # noqa: E501
Date/time when coupon expires # noqa: E501
:return: The expiration_dts of this Coupon. # noqa: E501
:rtype: str
"""
return self._expiration_dts
@expiration_dts.setter
def expiration_dts(self, expiration_dts):
"""Sets the expiration_dts of this Coupon.
Date/time when coupon expires # noqa: E501
:param expiration_dts: The expiration_dts of this Coupon. # noqa: E501
:type: str
"""
self._expiration_dts = expiration_dts
@property
def free_item_and_shipping_with_subtotal(self):
"""Gets the free_item_and_shipping_with_subtotal of this Coupon. # noqa: E501
:return: The free_item_and_shipping_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponFreeItemAndShippingWithSubtotal
"""
return self._free_item_and_shipping_with_subtotal
@free_item_and_shipping_with_subtotal.setter
def free_item_and_shipping_with_subtotal(self, free_item_and_shipping_with_subtotal):
"""Sets the free_item_and_shipping_with_subtotal of this Coupon.
:param free_item_and_shipping_with_subtotal: The free_item_and_shipping_with_subtotal of this Coupon. # noqa: E501
:type: CouponFreeItemAndShippingWithSubtotal
"""
self._free_item_and_shipping_with_subtotal = free_item_and_shipping_with_subtotal
@property
def free_item_with_item_purchase(self):
"""Gets the free_item_with_item_purchase of this Coupon. # noqa: E501
:return: The free_item_with_item_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeItemWithItemPurchase
"""
return self._free_item_with_item_purchase
@free_item_with_item_purchase.setter
def free_item_with_item_purchase(self, free_item_with_item_purchase):
"""Sets the free_item_with_item_purchase of this Coupon.
:param free_item_with_item_purchase: The free_item_with_item_purchase of this Coupon. # noqa: E501
:type: CouponFreeItemWithItemPurchase
"""
self._free_item_with_item_purchase = free_item_with_item_purchase
@property
def free_item_with_subtotal(self):
"""Gets the free_item_with_subtotal of this Coupon. # noqa: E501
:return: The free_item_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponFreeItemWithSubtotal
"""
return self._free_item_with_subtotal
@free_item_with_subtotal.setter
def free_item_with_subtotal(self, free_item_with_subtotal):
"""Sets the free_item_with_subtotal of this Coupon.
:param free_item_with_subtotal: The free_item_with_subtotal of this Coupon. # noqa: E501
:type: CouponFreeItemWithSubtotal
"""
self._free_item_with_subtotal = free_item_with_subtotal
@property
def free_items_with_item_purchase(self):
"""Gets the free_items_with_item_purchase of this Coupon. # noqa: E501
:return: The free_items_with_item_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeItemsWithItemPurchase
"""
return self._free_items_with_item_purchase
@free_items_with_item_purchase.setter
def free_items_with_item_purchase(self, free_items_with_item_purchase):
"""Sets the free_items_with_item_purchase of this Coupon.
:param free_items_with_item_purchase: The free_items_with_item_purchase of this Coupon. # noqa: E501
:type: CouponFreeItemsWithItemPurchase
"""
self._free_items_with_item_purchase = free_items_with_item_purchase
@property
def free_items_with_mixmatch_purchase(self):
"""Gets the free_items_with_mixmatch_purchase of this Coupon. # noqa: E501
:return: The free_items_with_mixmatch_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeItemsWithMixMatchPurchase
"""
return self._free_items_with_mixmatch_purchase
@free_items_with_mixmatch_purchase.setter
def free_items_with_mixmatch_purchase(self, free_items_with_mixmatch_purchase):
"""Sets the free_items_with_mixmatch_purchase of this Coupon.
:param free_items_with_mixmatch_purchase: The free_items_with_mixmatch_purchase of this Coupon. # noqa: E501
:type: CouponFreeItemsWithMixMatchPurchase
"""
self._free_items_with_mixmatch_purchase = free_items_with_mixmatch_purchase
@property
def free_shipping(self):
"""Gets the free_shipping of this Coupon. # noqa: E501
:return: The free_shipping of this Coupon. # noqa: E501
:rtype: CouponFreeShipping
"""
return self._free_shipping
@free_shipping.setter
def free_shipping(self, free_shipping):
"""Sets the free_shipping of this Coupon.
:param free_shipping: The free_shipping of this Coupon. # noqa: E501
:type: CouponFreeShipping
"""
self._free_shipping = free_shipping
@property
def free_shipping_specific_items(self):
"""Gets the free_shipping_specific_items of this Coupon. # noqa: E501
:return: The free_shipping_specific_items of this Coupon. # noqa: E501
:rtype: CouponFreeShippingSpecificItems
"""
return self._free_shipping_specific_items
@free_shipping_specific_items.setter
def free_shipping_specific_items(self, free_shipping_specific_items):
"""Sets the free_shipping_specific_items of this Coupon.
:param free_shipping_specific_items: The free_shipping_specific_items of this Coupon. # noqa: E501
:type: CouponFreeShippingSpecificItems
"""
self._free_shipping_specific_items = free_shipping_specific_items
@property
def free_shipping_with_items_purchase(self):
"""Gets the free_shipping_with_items_purchase of this Coupon. # noqa: E501
:return: The free_shipping_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponFreeShippingWithItemsPurchase
"""
return self._free_shipping_with_items_purchase
@free_shipping_with_items_purchase.setter
def free_shipping_with_items_purchase(self, free_shipping_with_items_purchase):
"""Sets the free_shipping_with_items_purchase of this Coupon.
:param free_shipping_with_items_purchase: The free_shipping_with_items_purchase of this Coupon. # noqa: E501
:type: CouponFreeShippingWithItemsPurchase
"""
self._free_shipping_with_items_purchase = free_shipping_with_items_purchase
@property
def free_shipping_with_subtotal(self):
"""Gets the free_shipping_with_subtotal of this Coupon. # noqa: E501
:return: The free_shipping_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponFreeShippingWithSubtotal
"""
return self._free_shipping_with_subtotal
@free_shipping_with_subtotal.setter
def free_shipping_with_subtotal(self, free_shipping_with_subtotal):
"""Sets the free_shipping_with_subtotal of this Coupon.
:param free_shipping_with_subtotal: The free_shipping_with_subtotal of this Coupon. # noqa: E501
:type: CouponFreeShippingWithSubtotal
"""
self._free_shipping_with_subtotal = free_shipping_with_subtotal
@property
def hide_from_customer(self):
"""Gets the hide_from_customer of this Coupon. # noqa: E501
Hide coupon from customer during checkout. Often used when coupons are automatic discounting mechanisms. # noqa: E501
:return: The hide_from_customer of this Coupon. # noqa: E501
:rtype: bool
"""
return self._hide_from_customer
@hide_from_customer.setter
def hide_from_customer(self, hide_from_customer):
"""Sets the hide_from_customer of this Coupon.
Hide coupon from customer during checkout. Often used when coupons are automatic discounting mechanisms. # noqa: E501
:param hide_from_customer: The hide_from_customer of this Coupon. # noqa: E501
:type: bool
"""
self._hide_from_customer = hide_from_customer
@property
def merchant_code(self):
"""Gets the merchant_code of this Coupon. # noqa: E501
Merchant code of coupon up to 20 characters. # noqa: E501
:return: The merchant_code of this Coupon. # noqa: E501
:rtype: str
"""
return self._merchant_code
@merchant_code.setter
def merchant_code(self, merchant_code):
"""Sets the merchant_code of this Coupon.
Merchant code of coupon up to 20 characters. # noqa: E501
:param merchant_code: The merchant_code of this Coupon. # noqa: E501
:type: str
"""
if merchant_code is not None and len(merchant_code) > 20:
raise ValueError("Invalid value for `merchant_code`, length must be less than or equal to `20`") # noqa: E501
self._merchant_code = merchant_code
@property
def merchant_notes(self):
"""Gets the merchant_notes of this Coupon. # noqa: E501
Internal notes about this coupon. These are not visible to customer. # noqa: E501
:return: The merchant_notes of this Coupon. # noqa: E501
:rtype: str
"""
return self._merchant_notes
@merchant_notes.setter
def merchant_notes(self, merchant_notes):
"""Sets the merchant_notes of this Coupon.
Internal notes about this coupon. These are not visible to customer. # noqa: E501
:param merchant_notes: The merchant_notes of this Coupon. # noqa: E501
:type: str
"""
if merchant_notes is not None and len(merchant_notes) > 250:
raise ValueError("Invalid value for `merchant_notes`, length must be less than or equal to `250`") # noqa: E501
self._merchant_notes = merchant_notes
@property
def multiple_amounts_off_items(self):
"""Gets the multiple_amounts_off_items of this Coupon. # noqa: E501
:return: The multiple_amounts_off_items of this Coupon. # noqa: E501
:rtype: CouponMultipleAmountsOffItems
"""
return self._multiple_amounts_off_items
@multiple_amounts_off_items.setter
def multiple_amounts_off_items(self, multiple_amounts_off_items):
"""Sets the multiple_amounts_off_items of this Coupon.
:param multiple_amounts_off_items: The multiple_amounts_off_items of this Coupon. # noqa: E501
:type: CouponMultipleAmountsOffItems
"""
self._multiple_amounts_off_items = multiple_amounts_off_items
@property
def no_discount(self):
"""Gets the no_discount of this Coupon. # noqa: E501
:return: The no_discount of this Coupon. # noqa: E501
:rtype: CouponNoDiscount
"""
return self._no_discount
@no_discount.setter
def no_discount(self, no_discount):
"""Sets the no_discount of this Coupon.
:param no_discount: The no_discount of this Coupon. # noqa: E501
:type: CouponNoDiscount
"""
self._no_discount = no_discount
@property
def percent_off_item_with_items_quantity_purchase(self):
"""Gets the percent_off_item_with_items_quantity_purchase of this Coupon. # noqa: E501
:return: The percent_off_item_with_items_quantity_purchase of this Coupon. # noqa: E501
:rtype: CouponPercentOffItemWithItemsQuantityPurchase
"""
return self._percent_off_item_with_items_quantity_purchase
@percent_off_item_with_items_quantity_purchase.setter
def percent_off_item_with_items_quantity_purchase(self, percent_off_item_with_items_quantity_purchase):
"""Sets the percent_off_item_with_items_quantity_purchase of this Coupon.
:param percent_off_item_with_items_quantity_purchase: The percent_off_item_with_items_quantity_purchase of this Coupon. # noqa: E501
:type: CouponPercentOffItemWithItemsQuantityPurchase
"""
self._percent_off_item_with_items_quantity_purchase = percent_off_item_with_items_quantity_purchase
@property
def percent_off_items(self):
"""Gets the percent_off_items of this Coupon. # noqa: E501
:return: The percent_off_items of this Coupon. # noqa: E501
:rtype: CouponPercentOffItems
"""
return self._percent_off_items
@percent_off_items.setter
def percent_off_items(self, percent_off_items):
"""Sets the percent_off_items of this Coupon.
:param percent_off_items: The percent_off_items of this Coupon. # noqa: E501
:type: CouponPercentOffItems
"""
self._percent_off_items = percent_off_items
@property
def percent_off_items_and_free_shipping(self):
"""Gets the percent_off_items_and_free_shipping of this Coupon. # noqa: E501
:return: The percent_off_items_and_free_shipping of this Coupon. # noqa: E501
:rtype: CouponPercentOffItemsAndFreeShipping
"""
return self._percent_off_items_and_free_shipping
@percent_off_items_and_free_shipping.setter
def percent_off_items_and_free_shipping(self, percent_off_items_and_free_shipping):
"""Sets the percent_off_items_and_free_shipping of this Coupon.
:param percent_off_items_and_free_shipping: The percent_off_items_and_free_shipping of this Coupon. # noqa: E501
:type: CouponPercentOffItemsAndFreeShipping
"""
self._percent_off_items_and_free_shipping = percent_off_items_and_free_shipping
@property
def percent_off_items_with_items_purchase(self):
"""Gets the percent_off_items_with_items_purchase of this Coupon. # noqa: E501
:return: The percent_off_items_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponPercentOffItemsWithItemsPurchase
"""
return self._percent_off_items_with_items_purchase
@percent_off_items_with_items_purchase.setter
def percent_off_items_with_items_purchase(self, percent_off_items_with_items_purchase):
"""Sets the percent_off_items_with_items_purchase of this Coupon.
:param percent_off_items_with_items_purchase: The percent_off_items_with_items_purchase of this Coupon. # noqa: E501
:type: CouponPercentOffItemsWithItemsPurchase
"""
self._percent_off_items_with_items_purchase = percent_off_items_with_items_purchase
@property
def percent_off_msrp_items(self):
"""Gets the percent_off_msrp_items of this Coupon. # noqa: E501
:return: The percent_off_msrp_items of this Coupon. # noqa: E501
:rtype: CouponPercentOffMsrpItems
"""
return self._percent_off_msrp_items
@percent_off_msrp_items.setter
def percent_off_msrp_items(self, percent_off_msrp_items):
"""Sets the percent_off_msrp_items of this Coupon.
:param percent_off_msrp_items: The percent_off_msrp_items of this Coupon. # noqa: E501
:type: CouponPercentOffMsrpItems
"""
self._percent_off_msrp_items = percent_off_msrp_items
@property
def percent_off_retail_price_items(self):
"""Gets the percent_off_retail_price_items of this Coupon. # noqa: E501
:return: The percent_off_retail_price_items of this Coupon. # noqa: E501
:rtype: CouponPercentOffRetailPriceItems
"""
return self._percent_off_retail_price_items
@percent_off_retail_price_items.setter
def percent_off_retail_price_items(self, percent_off_retail_price_items):
"""Sets the percent_off_retail_price_items of this Coupon.
:param percent_off_retail_price_items: The percent_off_retail_price_items of this Coupon. # noqa: E501
:type: CouponPercentOffRetailPriceItems
"""
self._percent_off_retail_price_items = percent_off_retail_price_items
@property
def percent_off_shipping(self):
"""Gets the percent_off_shipping of this Coupon. # noqa: E501
:return: The percent_off_shipping of this Coupon. # noqa: E501
:rtype: CouponPercentOffShipping
"""
return self._percent_off_shipping
@percent_off_shipping.setter
def percent_off_shipping(self, percent_off_shipping):
"""Sets the percent_off_shipping of this Coupon.
:param percent_off_shipping: The percent_off_shipping of this Coupon. # noqa: E501
:type: CouponPercentOffShipping
"""
self._percent_off_shipping = percent_off_shipping
@property
def percent_off_subtotal(self):
"""Gets the percent_off_subtotal of this Coupon. # noqa: E501
:return: The percent_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotal
"""
return self._percent_off_subtotal
@percent_off_subtotal.setter
def percent_off_subtotal(self, percent_off_subtotal):
"""Sets the percent_off_subtotal of this Coupon.
:param percent_off_subtotal: The percent_off_subtotal of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotal
"""
self._percent_off_subtotal = percent_off_subtotal
@property
def percent_off_subtotal_and_free_shipping(self):
"""Gets the percent_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:return: The percent_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalAndFreeShipping
"""
return self._percent_off_subtotal_and_free_shipping
@percent_off_subtotal_and_free_shipping.setter
def percent_off_subtotal_and_free_shipping(self, percent_off_subtotal_and_free_shipping):
"""Sets the percent_off_subtotal_and_free_shipping of this Coupon.
:param percent_off_subtotal_and_free_shipping: The percent_off_subtotal_and_free_shipping of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalAndFreeShipping
"""
self._percent_off_subtotal_and_free_shipping = percent_off_subtotal_and_free_shipping
@property
def percent_off_subtotal_limit(self):
"""Gets the percent_off_subtotal_limit of this Coupon. # noqa: E501
:return: The percent_off_subtotal_limit of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalLimit
"""
return self._percent_off_subtotal_limit
@percent_off_subtotal_limit.setter
def percent_off_subtotal_limit(self, percent_off_subtotal_limit):
"""Sets the percent_off_subtotal_limit of this Coupon.
:param percent_off_subtotal_limit: The percent_off_subtotal_limit of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalLimit
"""
self._percent_off_subtotal_limit = percent_off_subtotal_limit
@property
def percent_off_subtotal_with_items_purchase(self):
"""Gets the percent_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:return: The percent_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalWithItemsPurchase
"""
return self._percent_off_subtotal_with_items_purchase
@percent_off_subtotal_with_items_purchase.setter
def percent_off_subtotal_with_items_purchase(self, percent_off_subtotal_with_items_purchase):
"""Sets the percent_off_subtotal_with_items_purchase of this Coupon.
:param percent_off_subtotal_with_items_purchase: The percent_off_subtotal_with_items_purchase of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalWithItemsPurchase
"""
self._percent_off_subtotal_with_items_purchase = percent_off_subtotal_with_items_purchase
@property
def percent_off_subtotal_with_subtotal(self):
"""Gets the percent_off_subtotal_with_subtotal of this Coupon. # noqa: E501
:return: The percent_off_subtotal_with_subtotal of this Coupon. # noqa: E501
:rtype: CouponPercentOffSubtotalWithSubtotal
"""
return self._percent_off_subtotal_with_subtotal
@percent_off_subtotal_with_subtotal.setter
def percent_off_subtotal_with_subtotal(self, percent_off_subtotal_with_subtotal):
"""Sets the percent_off_subtotal_with_subtotal of this Coupon.
:param percent_off_subtotal_with_subtotal: The percent_off_subtotal_with_subtotal of this Coupon. # noqa: E501
:type: CouponPercentOffSubtotalWithSubtotal
"""
self._percent_off_subtotal_with_subtotal = percent_off_subtotal_with_subtotal
@property
def quickbooks_code(self):
"""Gets the quickbooks_code of this Coupon. # noqa: E501
Quickbooks accounting code. # noqa: E501
:return: The quickbooks_code of this Coupon. # noqa: E501
:rtype: str
"""
return self._quickbooks_code
@quickbooks_code.setter
def quickbooks_code(self, quickbooks_code):
"""Sets the quickbooks_code of this Coupon.
Quickbooks accounting code. # noqa: E501
:param quickbooks_code: The quickbooks_code of this Coupon. # noqa: E501
:type: str
"""
if quickbooks_code is not None and len(quickbooks_code) > 20:
raise ValueError("Invalid value for `quickbooks_code`, length must be less than or equal to `20`") # noqa: E501
self._quickbooks_code = quickbooks_code
@property
def restrict_by_postal_codes(self):
"""Gets the restrict_by_postal_codes of this Coupon. # noqa: E501
Optional list of postal codes which restrict a coupon to within these postal codes. # noqa: E501
:return: The restrict_by_postal_codes of this Coupon. # noqa: E501
:rtype: list[str]
"""
return self._restrict_by_postal_codes
@restrict_by_postal_codes.setter
def restrict_by_postal_codes(self, restrict_by_postal_codes):
"""Sets the restrict_by_postal_codes of this Coupon.
Optional list of postal codes which restrict a coupon to within these postal codes. # noqa: E501
:param restrict_by_postal_codes: The restrict_by_postal_codes of this Coupon. # noqa: E501
:type: list[str]
"""
self._restrict_by_postal_codes = restrict_by_postal_codes
@property
def restrict_by_screen_branding_theme_codes(self):
"""Gets the restrict_by_screen_branding_theme_codes of this Coupon. # noqa: E501
Optional list of legacy screen branding theme codes to limit coupon use to only those themes. # noqa: E501
:return: The restrict_by_screen_branding_theme_codes of this Coupon. # noqa: E501
:rtype: list[CouponRestriction]
"""
return self._restrict_by_screen_branding_theme_codes
@restrict_by_screen_branding_theme_codes.setter
def restrict_by_screen_branding_theme_codes(self, restrict_by_screen_branding_theme_codes):
"""Sets the restrict_by_screen_branding_theme_codes of this Coupon.
Optional list of legacy screen branding theme codes to limit coupon use to only those themes. # noqa: E501
:param restrict_by_screen_branding_theme_codes: The restrict_by_screen_branding_theme_codes of this Coupon. # noqa: E501
:type: list[CouponRestriction]
"""
self._restrict_by_screen_branding_theme_codes = restrict_by_screen_branding_theme_codes
@property
def restrict_by_storefronts(self):
"""Gets the restrict_by_storefronts of this Coupon. # noqa: E501
Optional list of storefronts to limit coupon use to only those storefronts. # noqa: E501
:return: The restrict_by_storefronts of this Coupon. # noqa: E501
:rtype: list[CouponRestriction]
"""
return self._restrict_by_storefronts
@restrict_by_storefronts.setter
def restrict_by_storefronts(self, restrict_by_storefronts):
"""Sets the restrict_by_storefronts of this Coupon.
Optional list of storefronts to limit coupon use to only those storefronts. # noqa: E501
:param restrict_by_storefronts: The restrict_by_storefronts of this Coupon. # noqa: E501
:type: list[CouponRestriction]
"""
self._restrict_by_storefronts = restrict_by_storefronts
@property
def start_dts(self):
"""Gets the start_dts of this Coupon. # noqa: E501
Date/time when coupon is valid # noqa: E501
:return: The start_dts of this Coupon. # noqa: E501
:rtype: str
"""
return self._start_dts
@start_dts.setter
def start_dts(self, start_dts):
"""Sets the start_dts of this Coupon.
Date/time when coupon is valid # noqa: E501
:param start_dts: The start_dts of this Coupon. # noqa: E501
:type: str
"""
self._start_dts = start_dts
@property
def super_coupon(self):
"""Gets the super_coupon of this Coupon. # noqa: E501
If true, this coupon can be used with ANY other coupon regardless of the other coupons configuration # noqa: E501
:return: The super_coupon of this Coupon. # noqa: E501
:rtype: bool
"""
return self._super_coupon
@super_coupon.setter
def super_coupon(self, super_coupon):
"""Sets the super_coupon of this Coupon.
If true, this coupon can be used with ANY other coupon regardless of the other coupons configuration # noqa: E501
:param super_coupon: The super_coupon of this Coupon. # noqa: E501
:type: bool
"""
self._super_coupon = super_coupon
@property
def tiered_amount_off_items(self):
"""Gets the tiered_amount_off_items of this Coupon. # noqa: E501
:return: The tiered_amount_off_items of this Coupon. # noqa: E501
:rtype: CouponTieredAmountOffItems
"""
return self._tiered_amount_off_items
@tiered_amount_off_items.setter
def tiered_amount_off_items(self, tiered_amount_off_items):
"""Sets the tiered_amount_off_items of this Coupon.
:param tiered_amount_off_items: The tiered_amount_off_items of this Coupon. # noqa: E501
:type: CouponTieredAmountOffItems
"""
self._tiered_amount_off_items = tiered_amount_off_items
@property
def tiered_amount_off_subtotal(self):
"""Gets the tiered_amount_off_subtotal of this Coupon. # noqa: E501
:return: The tiered_amount_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponTieredAmountOffSubtotal
"""
return self._tiered_amount_off_subtotal
@tiered_amount_off_subtotal.setter
def tiered_amount_off_subtotal(self, tiered_amount_off_subtotal):
"""Sets the tiered_amount_off_subtotal of this Coupon.
:param tiered_amount_off_subtotal: The tiered_amount_off_subtotal of this Coupon. # noqa: E501
:type: CouponTieredAmountOffSubtotal
"""
self._tiered_amount_off_subtotal = tiered_amount_off_subtotal
@property
def tiered_percent_off_items(self):
"""Gets the tiered_percent_off_items of this Coupon. # noqa: E501
:return: The tiered_percent_off_items of this Coupon. # noqa: E501
:rtype: CouponTieredPercentOffItems
"""
return self._tiered_percent_off_items
@tiered_percent_off_items.setter
def tiered_percent_off_items(self, tiered_percent_off_items):
"""Sets the tiered_percent_off_items of this Coupon.
:param tiered_percent_off_items: The tiered_percent_off_items of this Coupon. # noqa: E501
:type: CouponTieredPercentOffItems
"""
self._tiered_percent_off_items = tiered_percent_off_items
@property
def tiered_percent_off_shipping(self):
"""Gets the tiered_percent_off_shipping of this Coupon. # noqa: E501
:return: The tiered_percent_off_shipping of this Coupon. # noqa: E501
:rtype: CouponTieredPercentOffShipping
"""
return self._tiered_percent_off_shipping
@tiered_percent_off_shipping.setter
def tiered_percent_off_shipping(self, tiered_percent_off_shipping):
"""Sets the tiered_percent_off_shipping of this Coupon.
:param tiered_percent_off_shipping: The tiered_percent_off_shipping of this Coupon. # noqa: E501
:type: CouponTieredPercentOffShipping
"""
self._tiered_percent_off_shipping = tiered_percent_off_shipping
@property
def tiered_percent_off_subtotal(self):
"""Gets the tiered_percent_off_subtotal of this Coupon. # noqa: E501
:return: The tiered_percent_off_subtotal of this Coupon. # noqa: E501
:rtype: CouponTieredPercentOffSubtotal
"""
return self._tiered_percent_off_subtotal
@tiered_percent_off_subtotal.setter
def tiered_percent_off_subtotal(self, tiered_percent_off_subtotal):
"""Sets the tiered_percent_off_subtotal of this Coupon.
:param tiered_percent_off_subtotal: The tiered_percent_off_subtotal of this Coupon. # noqa: E501
:type: CouponTieredPercentOffSubtotal
"""
self._tiered_percent_off_subtotal = tiered_percent_off_subtotal
@property
def usable_by(self):
"""Gets the usable_by of this Coupon. # noqa: E501
Who may use this coupon. # noqa: E501
:return: The usable_by of this Coupon. # noqa: E501
:rtype: str
"""
return self._usable_by
@usable_by.setter
def usable_by(self, usable_by):
"""Sets the usable_by of this Coupon.
Who may use this coupon. # noqa: E501
:param usable_by: The usable_by of this Coupon. # noqa: E501
:type: str
"""
if usable_by is not None and len(usable_by) > 50:
raise ValueError("Invalid value for `usable_by`, length must be less than or equal to `50`") # noqa: E501
self._usable_by = usable_by
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Coupon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Coupon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | |
timehelpers.py | from datetime import datetime
import datetime
| yesterday = today - datetime.timedelta(days=1)
yesterday_timestamp = int(yesterday.timestamp()) * 1000
return yesterday_timestamp
def extractDate(name, prefix, fileType):
prefixLen = len(prefix)
fileTypeLen = len(fileType)
return name[prefixLen+1:-fileTypeLen] | def yesterday(today=datetime.datetime.now()): |
cors.rs | //! CORS handling utility functions
use std::{fmt, ops};
use hosts::{Host, Port};
use matcher::{Matcher, Pattern};
/// Origin Protocol
#[derive(Clone, Hash, Debug, PartialEq, Eq)]
pub enum OriginProtocol {
/// Http protocol
Http,
/// Https protocol
Https,
/// Custom protocol
Custom(String),
}
/// Request Origin
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
pub struct Origin {
protocol: OriginProtocol,
host: Host,
as_string: String,
matcher: Matcher,
}
impl<T: AsRef<str>> From<T> for Origin {
fn from(string: T) -> Self {
Origin::parse(string.as_ref())
}
}
impl Origin {
fn with_host(protocol: OriginProtocol, host: Host) -> Self {
let string = Self::to_string(&protocol, &host);
let matcher = Matcher::new(&string);
Origin {
protocol: protocol,
host: host,
as_string: string,
matcher: matcher,
}
}
/// Creates new origin given protocol, hostname and port parts.
/// Pre-processes input data if necessary.
pub fn new<T: Into<Port>>(protocol: OriginProtocol, host: &str, port: T) -> Self {
Self::with_host(protocol, Host::new(host, port))
}
/// Attempts to parse given string as a `Origin`.
/// NOTE: This method always succeeds and falls back to sensible defaults.
pub fn parse(data: &str) -> Self {
let mut it = data.split("://");
let proto = it.next().expect("split always returns non-empty iterator.");
let hostname = it.next();
let (proto, hostname) = match hostname {
None => (None, proto),
Some(hostname) => (Some(proto), hostname),
};
let proto = proto.map(str::to_lowercase);
let hostname = Host::parse(hostname);
let protocol = match proto {
None => OriginProtocol::Http,
Some(ref p) if p == "http" => OriginProtocol::Http,
Some(ref p) if p == "https" => OriginProtocol::Https,
Some(other) => OriginProtocol::Custom(other),
};
Origin::with_host(protocol, hostname)
}
fn to_string(protocol: &OriginProtocol, host: &Host) -> String {
format!(
"{}://{}",
match *protocol {
OriginProtocol::Http => "http",
OriginProtocol::Https => "https",
OriginProtocol::Custom(ref protocol) => protocol,
},
&**host,
)
}
}
impl Pattern for Origin {
fn matches<T: AsRef<str>>(&self, other: T) -> bool {
self.matcher.matches(other)
}
}
impl ops::Deref for Origin {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.as_string
}
}
/// Origins allowed to access
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum AccessControlAllowOrigin {
/// Specific hostname
Value(Origin),
/// null-origin (file:///, sandboxed iframe)
Null,
/// Any non-null origin
Any,
}
impl fmt::Display for AccessControlAllowOrigin {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", match *self {
AccessControlAllowOrigin::Any => "*",
AccessControlAllowOrigin::Null => "null",
AccessControlAllowOrigin::Value(ref val) => val,
})
}
}
impl<T: Into<String>> From<T> for AccessControlAllowOrigin {
fn from(s: T) -> AccessControlAllowOrigin {
match s.into().as_str() {
"all" | "*" | "any" => AccessControlAllowOrigin::Any,
"null" => AccessControlAllowOrigin::Null,
origin => AccessControlAllowOrigin::Value(origin.into()),
}
}
}
/// CORS Header Result.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CorsHeader<T = AccessControlAllowOrigin> {
/// CORS header was not required. Origin is not present in the request.
NotRequired,
/// CORS header is not returned, Origin is not allowed to access the resource.
Invalid,
/// CORS header to include in the response. Origin is allowed to access the resource.
Ok(T),
}
impl<T> CorsHeader<T> {
/// Maps `Ok` variant of `CorsHeader`.
pub fn map<F, O>(self, f: F) -> CorsHeader<O> where
F: FnOnce(T) -> O,
{
use self::CorsHeader::*;
match self {
NotRequired => NotRequired,
Invalid => Invalid,
Ok(val) => Ok(f(val)),
}
}
}
impl<T> Into<Option<T>> for CorsHeader<T> {
fn into(self) -> Option<T> {
use self::CorsHeader::*;
match self {
NotRequired | Invalid => None,
Ok(header) => Some(header),
}
}
}
/// Returns correct CORS header (if any) given list of allowed origins and current origin.
pub fn get_cors_header(origin: Option<&str>, host: Option<&str>, allowed: &Option<Vec<AccessControlAllowOrigin>>) -> CorsHeader {
match origin {
None => CorsHeader::NotRequired,
Some(ref origin) => {
if let Some(host) = host {
// Request initiated from the same server.
if origin.ends_with(host) {
// Additional check
let origin = Origin::parse(origin);
if &*origin.host == host {
return CorsHeader::NotRequired;
}
}
}
match allowed.as_ref() {
None => CorsHeader::Ok(AccessControlAllowOrigin::Value(Origin::parse(origin))),
Some(ref allowed) if *origin == "null" => {
allowed.iter().find(|cors| **cors == AccessControlAllowOrigin::Null).cloned()
.map(CorsHeader::Ok)
.unwrap_or(CorsHeader::Invalid)
},
Some(ref allowed) => {
allowed.iter().find(|cors| {
match **cors {
AccessControlAllowOrigin::Any => true,
AccessControlAllowOrigin::Value(ref val) if val.matches(origin) => true,
_ => false
}
})
.map(|_| AccessControlAllowOrigin::Value(Origin::parse(origin)))
.map(CorsHeader::Ok).unwrap_or(CorsHeader::Invalid)
},
}
},
}
}
#[cfg(test)]
mod tests {
use hosts::Host;
use super::{get_cors_header, CorsHeader, AccessControlAllowOrigin, Origin, OriginProtocol};
#[test]
fn should_parse_origin() {
use self::OriginProtocol::*;
assert_eq!(Origin::parse("http://parity.io"), Origin::new(Http, "parity.io", None));
assert_eq!(Origin::parse("https://parity.io:8443"), Origin::new(Https, "parity.io", Some(8443)));
assert_eq!(Origin::parse("chrome-extension://124.0.0.1"), Origin::new(Custom("chrome-extension".into()), "124.0.0.1", None));
assert_eq!(Origin::parse("parity.io/somepath"), Origin::new(Http, "parity.io", None));
assert_eq!(Origin::parse("127.0.0.1:8545/somepath"), Origin::new(Http, "127.0.0.1", Some(8545)));
}
#[test]
fn should_not_allow_partially_matching_origin() {
// given
let origin1 = Origin::parse("http://subdomain.somedomain.io");
let origin2 = Origin::parse("http://somedomain.io:8080");
let host = Host::parse("http://somedomain.io");
let origin1 = Some(&*origin1);
let origin2 = Some(&*origin2);
let host = Some(&*host);
// when
let res1 = get_cors_header(origin1, host, &Some(vec![]));
let res2 = get_cors_header(origin2, host, &Some(vec![]));
// then
assert_eq!(res1, CorsHeader::Invalid);
assert_eq!(res2, CorsHeader::Invalid);
}
#[test]
fn should_allow_origins_that_matches_hosts() {
// given
let origin = Origin::parse("http://127.0.0.1:8080");
let host = Host::parse("http://127.0.0.1:8080");
let origin = Some(&*origin);
let host = Some(&*host);
// when
let res = get_cors_header(origin, host, &None);
// then
assert_eq!(res, CorsHeader::NotRequired);
}
#[test]
fn should_return_none_when_there_are_no_cors_domains_and_no_origin() {
// given
let origin = None;
let host = None;
// when
let res = get_cors_header(origin, host, &None);
// then
assert_eq!(res, CorsHeader::NotRequired);
}
#[test]
fn | () {
// given
let origin = Some("parity.io");
let host = None;
// when
let res = get_cors_header(origin, host, &None);
// then
assert_eq!(res, CorsHeader::Ok("parity.io".into()));
}
#[test]
fn should_return_none_for_empty_origin() {
// given
let origin = None;
let host = None;
// when
let res = get_cors_header(
origin,
host,
&Some(vec![AccessControlAllowOrigin::Value("http://ethereum.org".into())]),
);
// then
assert_eq!(res, CorsHeader::NotRequired);
}
#[test]
fn should_return_none_for_empty_list() {
// given
let origin = None;
let host = None;
// when
let res = get_cors_header(origin, host, &Some(Vec::new()));
// then
assert_eq!(res, CorsHeader::NotRequired);
}
#[test]
fn should_return_none_for_not_matching_origin() {
// given
let origin = Some("http://parity.io".into());
let host = None;
// when
let res = get_cors_header(
origin,
host,
&Some(vec![AccessControlAllowOrigin::Value("http://ethereum.org".into())]),
);
// then
assert_eq!(res, CorsHeader::Invalid);
}
#[test]
fn should_return_specific_origin_if_we_allow_any() {
// given
let origin = Some("http://parity.io".into());
let host = None;
// when
let res = get_cors_header(origin, host, &Some(vec![AccessControlAllowOrigin::Any]));
// then
assert_eq!(res, CorsHeader::Ok(AccessControlAllowOrigin::Value("http://parity.io".into())));
}
#[test]
fn should_return_none_if_origin_is_not_defined() {
// given
let origin = None;
let host = None;
// when
let res = get_cors_header(
origin,
host,
&Some(vec![AccessControlAllowOrigin::Null]),
);
// then
assert_eq!(res, CorsHeader::NotRequired);
}
#[test]
fn should_return_null_if_origin_is_null() {
// given
let origin = Some("null".into());
let host = None;
// when
let res = get_cors_header(
origin,
host,
&Some(vec![AccessControlAllowOrigin::Null]),
);
// then
assert_eq!(res, CorsHeader::Ok(AccessControlAllowOrigin::Null));
}
#[test]
fn should_return_specific_origin_if_there_is_a_match() {
// given
let origin = Some("http://parity.io".into());
let host = None;
// when
let res = get_cors_header(
origin,
host,
&Some(vec![AccessControlAllowOrigin::Value("http://ethereum.org".into()), AccessControlAllowOrigin::Value("http://parity.io".into())]),
);
// then
assert_eq!(res, CorsHeader::Ok(AccessControlAllowOrigin::Value("http://parity.io".into())));
}
#[test]
fn should_support_wildcards() {
// given
let origin1 = Some("http://parity.io".into());
let origin2 = Some("http://parity.iot".into());
let origin3 = Some("chrome-extension://test".into());
let host = None;
let allowed = Some(vec![
AccessControlAllowOrigin::Value("http://*.io".into()),
AccessControlAllowOrigin::Value("chrome-extension://*".into())
]);
// when
let res1 = get_cors_header(origin1, host, &allowed);
let res2 = get_cors_header(origin2, host, &allowed);
let res3 = get_cors_header(origin3, host, &allowed);
// then
assert_eq!(res1, CorsHeader::Ok(AccessControlAllowOrigin::Value("http://parity.io".into())));
assert_eq!(res2, CorsHeader::Invalid);
assert_eq!(res3, CorsHeader::Ok(AccessControlAllowOrigin::Value("chrome-extension://test".into())));
}
}
| should_return_domain_when_all_are_allowed |
horrible_perl_script.rs | use std::fs;
use std::process;
use std::io::Write;
use std::path::Path;
pub fn setup_plugins() -> Vec<String>{
let plugin_files = fs::read_dir("plugins").unwrap();
let plugin_target = Path::new("plugin_bin");
let mut plugin_lib_paths : Vec<String> = Vec::new();
if !plugin_target.exists() {
fs::create_dir(plugin_target).unwrap();
}
for plugin_file in plugin_files {
let file = plugin_file.unwrap();
setup_plugin(&file, &plugin_target, &mut plugin_lib_paths);
}
plugin_lib_paths
}
fn setup_plugin(
file : &fs::DirEntry,
plugin_target : &Path,
plugin_lib_paths : &mut Vec<String>) {
let os_file_name = file.file_name();
let file_name = os_file_name.to_str().unwrap();
if file_name.ends_with(".rs") {
let plugin_name = String::from(&file_name[..file_name.len() - 3]);
println!("Loading {} from {}", plugin_name, file.path().display());
// create dir
let plugin_dir = plugin_target.to_str().unwrap().to_owned() + "/" + &plugin_name + "/";
let plugin_path = Path::new(&plugin_dir);
let src_dir = plugin_dir.clone() + "/src";
// TODO: would it be better to just use rustc?
// TODO: have a way to update the cargo file
// TODO: include custom dependency specification
if !plugin_path.exists() {
create_plugin_crate(&plugin_path, &plugin_dir, &plugin_name);
fs::create_dir(&src_dir).unwrap();
}
update_plugin_file(file, &src_dir);
run_cargo(plugin_path, &plugin_dir, &plugin_name, plugin_lib_paths);
}
}
fn update_plugin_file(
file : &fs::DirEntry,
src_dir : &String) {
// TODO: don't update if the file wasn't changed.
fs::copy(file.path(), src_dir.to_owned() + "/lib.rs").unwrap();
}
fn run_cargo(
plugin_path : &Path,
plugin_dir : &String,
plugin_name : &String,
plugin_lib_paths : &mut Vec<String>) {
let output = process::Command::new("cargo")
.current_dir(plugin_path)
.args(&["build"]) | println!("{}", String::from_utf8_lossy(&output.stderr));
panic!("Could not compile {}", plugin_name);
} else {
let lib_ext = if cfg!(target_os = "windows") { ".dll" } else { ".so" };
let lib_path = plugin_dir.clone() + "target/debug/" + &plugin_name + lib_ext;
plugin_lib_paths.push(lib_path);
}
}
fn create_plugin_crate(plugin_path : &Path, plugin_dir : &String, plugin_name : &String) {
fs::create_dir(plugin_path).unwrap();
// write cargo file
let cargo_path = plugin_dir.clone() + "Cargo.toml";
let mut cargo_file = fs::File::create(cargo_path).unwrap();
write!(cargo_file, "
[package]
name = \"{}\"
version = \"0.1.0\"
authors = [\"Richard Warburton <[email protected]>\"]
[lib]
crate-type = [\"dylib\"]
[dependencies]
irc = {{ git = \"https://github.com/RichardWarburton/irc.git\" }}
regex = \"0.2\"
[dependencies.modules]
path = \"../../libs/modules\"
", plugin_name).unwrap();
cargo_file.flush().unwrap();
} | .output()
.expect("failed to execute process");
if !output.status.success() {
println!("{}", String::from_utf8_lossy(&output.stdout)); |
libbeat.go | package collector
import (
"github.com/prometheus/client_golang/prometheus"
)
//LibBeat json structure
type LibBeat struct {
Config struct {
Module struct {
Running float64 `json:"running"`
Starts float64 `json:"starts"`
Stops float64 `json:"stops"`
} `json:"module"`
Reloads float64 `json:"reloads"`
} `json:"config"`
Output LibBeatOutput `json:"output"`
Pipeline LibBeatPipeline `json:"pipeline"`
}
//LibBeatEvents json structure
type LibBeatEvents struct {
Acked float64 `json:"acked"`
Active float64 `json:"active"`
Batches float64 `json:"batches"`
Dropped float64 `json:"dropped"`
Duplicates float64 `json:"duplicates"`
Failed float64 `json:"failed"`
Filtered float64 `json:"filtered"`
Published float64 `json:"published"`
Retry float64 `json:"retry"`
}
//LibBeatOutputBytesErrors json structure
type LibBeatOutputBytesErrors struct {
Bytes float64 `json:"bytes"`
Errors float64 `json:"errors"`
}
//LibBeatOutput json structure
type LibBeatOutput struct {
Events LibBeatEvents `json:"events"`
Read LibBeatOutputBytesErrors `json:"read"`
Write LibBeatOutputBytesErrors `json:"write"`
Type string `json:"type"`
}
//LibBeatPipeline json structure
type LibBeatPipeline struct {
Clients float64 `json:"clients"`
Events LibBeatEvents `json:"events"`
Queue struct {
Acked float64 `json:"acked"`
} `json:"queue"`
}
type libbeatCollector struct {
beatInfo *BeatInfo
stats *Stats
metrics exportedMetrics
}
var libbeatOutputType *prometheus.Desc
// NewLibBeatCollector constructor
func NewLibBeatCollector(beatInfo *BeatInfo, stats *Stats) prometheus.Collector |
// Describe returns all descriptions of the collector.
func (c *libbeatCollector) Describe(ch chan<- *prometheus.Desc) {
for _, metric := range c.metrics {
ch <- metric.desc
}
libbeatOutputType = prometheus.NewDesc(
prometheus.BuildFQName(c.beatInfo.Beat, "libbeat", "output"),
"libbeat.output.type",
[]string{"type"}, nil,
)
ch <- libbeatOutputType
}
// Collect returns the current state of all metrics of the collector.
func (c *libbeatCollector) Collect(ch chan<- prometheus.Metric) {
for _, i := range c.metrics {
ch <- prometheus.MustNewConstMetric(i.desc, i.valType, i.eval(c.stats))
}
// output.type with dynamic label
ch <- prometheus.MustNewConstMetric(libbeatOutputType, prometheus.CounterValue, float64(1), c.stats.LibBeat.Output.Type)
}
| {
return &libbeatCollector{
beatInfo: beatInfo,
stats: stats,
metrics: exportedMetrics{
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat_config", "reloads"),
"libbeat.config.reloads",
nil, nil,
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Config.Reloads
},
valType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "config"),
"libbeat.config.module",
nil, prometheus.Labels{"module": "running"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Config.Module.Running
},
valType: prometheus.GaugeValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "config"),
"libbeat.config.module",
nil, prometheus.Labels{"module": "starts"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Config.Module.Starts
},
valType: prometheus.GaugeValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "config"),
"libbeat.config.module",
nil, prometheus.Labels{"module": "stops"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Config.Module.Stops
},
valType: prometheus.GaugeValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_read_bytes"),
"libbeat.output.read.bytes",
nil, nil,
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Read.Bytes
},
valType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_read_errors"),
"libbeat.output.read.errors",
nil, nil,
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Read.Errors
},
valType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_write_bytes"),
"libbeat.output.write.bytes",
nil, nil,
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Write.Bytes
},
valType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_write_errors"),
"libbeat.output.write.errors",
nil, nil,
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Write.Errors
},
valType: prometheus.CounterValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_events"),
"libbeat.output.events",
nil, prometheus.Labels{"type": "acked"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Events.Acked
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_events"),
"libbeat.output.events",
nil, prometheus.Labels{"type": "active"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Events.Active
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_events"),
"libbeat.output.events",
nil, prometheus.Labels{"type": "batches"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Events.Batches
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_events"),
"libbeat.output.events",
nil, prometheus.Labels{"type": "dropped"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Events.Dropped
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_events"),
"libbeat.output.events",
nil, prometheus.Labels{"type": "duplicates"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Events.Duplicates
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "output_events"),
"libbeat.output.events",
nil, prometheus.Labels{"type": "failed"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Output.Events.Failed
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_clients"),
"libbeat.pipeline.clients",
nil, nil,
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Clients
},
valType: prometheus.GaugeValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_queue"),
"libbeat.pipeline.queue",
nil, prometheus.Labels{"type": "acked"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Queue.Acked
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_events"),
"libbeat.pipeline.events",
nil, prometheus.Labels{"type": "active"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Events.Active
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_events"),
"libbeat.pipeline.events",
nil, prometheus.Labels{"type": "dropped"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Events.Dropped
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_events"),
"libbeat.pipeline.events",
nil, prometheus.Labels{"type": "failed"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Events.Failed
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_events"),
"libbeat.pipeline.events",
nil, prometheus.Labels{"type": "filtered"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Events.Filtered
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_events"),
"libbeat.pipeline.events",
nil, prometheus.Labels{"type": "published"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Events.Published
},
valType: prometheus.UntypedValue,
},
{
desc: prometheus.NewDesc(
prometheus.BuildFQName(beatInfo.Beat, "libbeat", "pipeline_events"),
"libbeat.pipeline.events",
nil, prometheus.Labels{"type": "retry"},
),
eval: func(stats *Stats) float64 {
return stats.LibBeat.Pipeline.Events.Retry
},
valType: prometheus.UntypedValue,
},
},
}
} |
apigatewayv2.rs | #![cfg(feature = "apigatewayv2")]
extern crate rusoto_apigatewayv2;
extern crate rusoto_core;
use rusoto_apigatewayv2::{ApiGatewayV2, ApiGatewayV2Client};
use rusoto_core::Region;
#[test]
fn should_work() {
let client = ApiGatewayV2Client::new(Region::UsEast1);
let response = client
.get_apis(Default::default()) | .expect("expected an ok response");
println!("response is {:#?}", response);
} | .sync() |
plane.rs | use rand::rngs::StdRng;
use super::{HitRecord, Ray, Shape};
/// A plane represented by the linear equation x • normal = value
#[derive(Copy, Clone)]
pub struct Plane {
/// The normal vector
pub normal: glm::DVec3,
/// The distance from the origin
pub value: f64,
}
impl Shape for Plane {
/// Ray-plane intersection
fn intersect(&self, ray: &Ray, t_min: f64, record: &mut HitRecord) -> bool {
let cosine = self.normal.dot(&ray.dir);
if cosine.abs() < 1e-8 {
// Parallel ray and plane
return false;
}
let time = (self.value - self.normal.dot(&ray.origin)) / cosine;
if time >= t_min && time < record.time {
record.time = time;
record.normal = -self.normal.normalize() * cosine.signum();
true
} else {
false
}
}
fn sample(&self, _target: &glm::DVec3, _rng: &mut StdRng) -> (glm::DVec3, glm::DVec3, f64) {
unimplemented!() | }
} |
|
download_IXI_HH.py | # -*- coding: utf-8 -*-
"""Download and extract the IXI Hammersmith Hospital 3T dataset
url: http://brain-development.org/ixi-dataset/
ref: IXI – Information eXtraction from Images (EPSRC GR/S21533/02)
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.standard_library import install_aliases # py 2/3 compatability
install_aliases()
from urllib.request import FancyURLopener
import os.path
import tarfile
import pandas as pd
import glob
import SimpleITK as sitk
import numpy as np
DOWNLOAD_IMAGES = True
EXTRACT_IMAGES = True
PROCESS_OTHER = True
RESAMPLE_IMAGES = True
CLEAN_UP = True
def resample_image(itk_image, out_spacing=(1.0, 1.0, 1.0), is_label=False):
original_spacing = itk_image.GetSpacing()
original_size = itk_image.GetSize()
out_size = [int(np.round(original_size[0]*(original_spacing[0]/out_spacing[0]))),
int(np.round(original_size[1]*(original_spacing[1]/out_spacing[1]))),
int(np.round(original_size[2]*(original_spacing[2]/out_spacing[2])))]
resample = sitk.ResampleImageFilter()
resample.SetOutputSpacing(out_spacing)
resample.SetSize(out_size)
resample.SetOutputDirection(itk_image.GetDirection())
resample.SetOutputOrigin(itk_image.GetOrigin())
resample.SetTransform(sitk.Transform())
resample.SetDefaultPixelValue(itk_image.GetPixelIDValue())
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
def reslice_image(itk_image, itk_ref, is_label=False):
re |
urls = {}
urls['t1'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T1.tar'
urls['t2'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-T2.tar'
urls['pd'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-PD.tar'
urls['mra'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI-MRA.tar'
urls['demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'
fnames = {}
fnames['t1'] = 't1.tar'
fnames['t2'] = 't2.tar'
fnames['pd'] = 'pd.tar'
fnames['mra'] = 'mra.tar'
fnames['demographic'] = 'demographic.xls'
if DOWNLOAD_IMAGES:
# Download all IXI data
for key, url in urls.items():
if not os.path.isfile(fnames[key]):
print('Downloading {} from {}'.format(fnames[key], url))
curr_file = FancyURLopener()
curr_file.retrieve(url, fnames[key])
else:
print('File {} already exists. Skipping download.'.format(
fnames[key]))
if EXTRACT_IMAGES:
# Extract the HH subset of IXI
for key, fname in fnames.items():
if (fname.endswith('.tar')):
print('Extracting IXI HH data from {}.'.format(fnames[key]))
output_dir = os.path.join('./orig/', key)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
t = tarfile.open(fname, 'r')
for member in t.getmembers():
if '-HH-' in member.name:
t.extract(member, output_dir)
if PROCESS_OTHER:
# Process the demographic xls data and save to csv
xls = pd.ExcelFile('demographic.xls')
print(xls.sheet_names)
df = xls.parse('Table')
for index, row in df.iterrows():
IXI_id = 'IXI{:03d}'.format(row['IXI_ID'])
df.loc[index, 'IXI_ID'] = IXI_id
t1_exists = len(glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id)))
t2_exists = len(glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id)))
pd_exists = len(glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id)))
mra_exists = len(glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id)))
# Check if each entry is complete and drop if not
# if not t1_exists and not t2_exists and not pd_exists and not mra
# exists:
if not (t1_exists and t2_exists and pd_exists and mra_exists):
df.drop(index, inplace=True)
# Write to csv file
df.to_csv('demographic_HH.csv', index=False)
if RESAMPLE_IMAGES:
# Resample the IXI HH T2 images to 1mm isotropic and reslice all
# others to it
df = pd.read_csv('demographic_HH.csv', dtype=object, keep_default_na=False,
na_values=[]).as_matrix()
for i in df:
IXI_id = i[0]
print('Resampling {}'.format(IXI_id))
t1_fn = glob.glob('./orig/t1/{}*.nii.gz'.format(IXI_id))[0]
t2_fn = glob.glob('./orig/t2/{}*.nii.gz'.format(IXI_id))[0]
pd_fn = glob.glob('./orig/pd/{}*.nii.gz'.format(IXI_id))[0]
mra_fn = glob.glob('./orig/mra/{}*.nii.gz'.format(IXI_id))[0]
t1 = sitk.ReadImage(t1_fn)
t2 = sitk.ReadImage(t2_fn)
pd = sitk.ReadImage(pd_fn)
mra = sitk.ReadImage(mra_fn)
# Resample to 1mm isotropic resolution
t2_1mm = resample_image(t2)
t1_1mm = reslice_image(t1, t2_1mm)
pd_1mm = reslice_image(pd, t2_1mm)
mra_1mm = reslice_image(mra, t2_1mm)
output_dir = os.path.join('./1mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t1_1mm.GetSize(), t1_1mm.GetSpacing()))
print('T2: {} {}'.format(t2_1mm.GetSize(), t2_1mm.GetSpacing()))
print('PD: {} {}'.format(pd_1mm.GetSize(), pd_1mm.GetSpacing()))
print('MRA: {} {}'.format(mra_1mm.GetSize(), mra_1mm.GetSpacing()))
sitk.WriteImage(t1_1mm, os.path.join(output_dir, 'T1_1mm.nii.gz'))
sitk.WriteImage(t2_1mm, os.path.join(output_dir, 'T2_1mm.nii.gz'))
sitk.WriteImage(pd_1mm, os.path.join(output_dir, 'PD_1mm.nii.gz'))
sitk.WriteImage(mra_1mm, os.path.join(output_dir, 'MRA_1mm.nii.gz'))
# Resample to 2mm isotropic resolution
t2_2mm = resample_image(t2, out_spacing=[2.0, 2.0, 2.0])
t1_2mm = reslice_image(t1, t2_2mm)
pd_2mm = reslice_image(pd, t2_2mm)
mra_2mm = reslice_image(mra, t2_2mm)
output_dir = os.path.join('./2mm/', IXI_id)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('T1: {} {}'.format(t2_2mm.GetSize(), t1_2mm.GetSpacing()))
print('T2: {} {}'.format(t2_2mm.GetSize(), t2_2mm.GetSpacing()))
print('PD: {} {}'.format(pd_2mm.GetSize(), pd_2mm.GetSpacing()))
print('MRA: {} {}'.format(mra_2mm.GetSize(), mra_2mm.GetSpacing()))
sitk.WriteImage(t1_2mm, os.path.join(output_dir, 'T1_2mm.nii.gz'))
sitk.WriteImage(t2_2mm, os.path.join(output_dir, 'T2_2mm.nii.gz'))
sitk.WriteImage(pd_2mm, os.path.join(output_dir, 'PD_2mm.nii.gz'))
sitk.WriteImage(mra_2mm, os.path.join(output_dir, 'MRA_2mm.nii.gz'))
if CLEAN_UP:
# Remove the .tar files
for key, fname in fnames.items():
if (fname.endswith('.tar')):
os.remove(fname)
# Remove all data in original resolution
os.system('rm -rf orig')
| sample = sitk.ResampleImageFilter()
resample.SetReferenceImage(itk_ref)
if is_label:
resample.SetInterpolator(sitk.sitkNearestNeighbor)
else:
resample.SetInterpolator(sitk.sitkBSpline)
return resample.Execute(itk_image)
|
prune_resnet_tools.py | '''
This file contains functions for pruning resnet-like model in layer level
1. prune_resconv_layer (resnet: conv layers)
2. prune_resnet_lconv_layer (resnet: lconv means identity layer)
3. prune_rbconv_by_indices (resnet: rbconv means right path's bottom layer)
4. prune_rbconv_by_number (resnet: used when you prune lconv but next block/layer cannot absorb your effect)
5. prune_ruconv1_layer (resnet: for resnet normal conv1 layers (i.e. right path's first upper layers))
6. prune_ruconv2_layer (resnet: for resnet normal conv2 layers (i.e. right path's second upper layers))
Author: xuhuahuang as intern in YouTu 07/2018
'''
import torch
from torch.autograd import Variable
from torchvision import models
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in DataLoader
# OpenCL may be enabled by default in OpenCV3;
# disable it because it because it's not thread safe and causes unwanted GPU memory allocations
cv2.ocl.setUseOpenCL(False)
import sys
import numpy as np
from models.resnet import BasicBlock, Bottleneck
def replace_layers(model, i, indexes, layers):
if i in indexes:
# layers and indexes store new layers used to update old layers
return layers[indexes.index(i)]
# if i not in indexes, use old layers
return model[i]
# helper function
'''
Helper function for updating immediate following layer/block's input channels
Args:
model: model after pruning current layer/block
layer_index: current layer index. Locate the block/layer being pruned filters NOW
filters_to_prune: the output channels indices being pruned
**Note**
Not handle case described by prune_rbconv_by_number()
Not handle case inside prune_ruconv1_layer() and prune_ruconv2_layer() because they are inside same block
'''
def update_next_layers(model, layer_index, filters_to_prune):
# only need to change in_channels for all following objects based on filters_to_prune
next_conv = None
next_blk = None
next_ds = None # if next one is a block, and this block has downsample path, you need to update both residual and downsample path
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
next_is_block = False
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_is_block = True
next_blk = res[1]
if res[1].downsample is None:
next_conv = res[1].conv1
next_ds = None
else:
next_conv = res[1].conv1
next_ds = res[1].downsample
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
if len(filters_to_prune) == 0:
print("No filter will be prunned for this layer")
return model
cut = len(filters_to_prune)
# next_conv must exists
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data
# next_ds exists or not is okay, no matter next_is_block is True or not
if next_ds is not None:
old_conv_in_next_ds = next_ds[0]
new_conv_in_next_new_ds = \
torch.nn.Conv2d(in_channels = old_conv_in_next_ds.in_channels - cut,\
out_channels = old_conv_in_next_ds.out_channels, \
kernel_size = old_conv_in_next_ds.kernel_size, \
stride = old_conv_in_next_ds.stride,
padding = old_conv_in_next_ds.padding,
dilation = old_conv_in_next_ds.dilation,
groups = old_conv_in_next_ds.groups,
bias = old_conv_in_next_ds.bias is not None)
old_weights = old_conv_in_next_ds.weight.data.cpu().numpy()
new_weights = new_conv_in_next_new_ds.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
new_conv_in_next_new_ds.weight.data = torch.from_numpy(new_weights).cuda()
if old_conv_in_next_ds.bias is not None:
new_conv_in_next_new_ds.bias.data = old_conv_in_next_ds.bias.data # bias won't change
next_new_ds = torch.nn.Sequential(new_conv_in_next_new_ds, next_ds[1]) # BN keeps unchanged
else:
next_new_ds = None
# next_new_ds and next_new_conv are ready now, create a next_new_block for replace_layers()
if next_is_block: #same as next_blk is not None:
if isinstance(next_blk, BasicBlock):
# rely on conv1 of old block to get in_planes, out_planes, tride
next_new_block = BasicBlock(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv # only update in_channels
next_new_block.bn1 = next_blk.bn1
next_new_block.relu = next_blk.relu
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
else:
next_new_block = Bottleneck(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv # only update in_channels
next_new_block.bn1 = next_blk.bn1
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
next_new_block.conv3 = next_blk.conv3
next_new_block.bn3 = next_blk.bn3
next_new_block.relu = next_blk.relu
if not next_is_block:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_conv]) for i, _ in enumerate(model.base)))
else:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_block]) for i, _ in enumerate(model.base)))
del model.base # delete and replace with brand new one
model.base = base
print("Finished update next layers.")
return model
'''
--------------------------------------------------------------------------------
1. Prune conv layers in resnet with/without BN (only support layers stored in model.base for now)
Args:
model: model for pruning
layer_index: index the pruned layer's location within model
cut_ratio: the ratio of filters you want to prune from this layer (e.g. 20% - cut 20% lowest weights layers)
Adapted from: https://github.com/jacobgil/pytorch-pruning
'''
def prune_resconv_layer(model, layer_index, cut_ratio=0.2, use_bn = True):
|
'''
--------------------------------------------------------------------------------
2. Prune identity conv layers without/with BN in a resnet block
(*Note: NOT used for normal layer, the 'layer' here must locate inside a block indexed by block_index)
Args:
block_index: a block also named as a 'layer' in torchvision implementation, locate lconv layer
*Note:
The index criteria based on 'one single block' unit, which means 1 index represents 1 BasicBlock/Bottleneck, instead of one layer (3-6 blocks)
Return:
cut_indices: the filters_to_prune in this layer, will be used in function 5.
'''
def prune_resnet_lconv_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
cut_indices = None
if not use_bn:
print("ResNet without BN is not supported for prunning")
return cut_indices, model
# check whether the left path has conv layer for prunning
if blk.downsample == None:
print("No filters will be prunned because lconv doesn't exist")
return cut_indices, model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return cut_indices, model
# get old conv and bn on the left
lconv = blk.downsample[0] # nn.Sequential for (lconv, lbn)
lbn = blk.downsample[1]
next_conv = None
offset = 1
# search for the next conv, can be conv1 within next block, or a normal conv layer
while block_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[block_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_conv = res[1].conv1
break
offset = offset + 1
if next_conv is None:
print("No filters will be prunned because this is the last block")
return cut_indices, model
num_filters = lconv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return cut_indices, model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return cut_indices, model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return cut_indices, model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(lconv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for old lconv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = lconv.in_channels, \
out_channels = lconv.out_channels - cut,
kernel_size = lconv.kernel_size, \
stride = lconv.stride,
padding = lconv.padding,
dilation = lconv.dilation,
groups = lconv.groups,
bias = lconv.bias is not None) #(out_channels)
old_weights = lconv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if lconv.bias is not None:
bias_numpy = lconv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=lbn.eps, momentum=lbn.momentum, affine=lbn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = lbn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = lbn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
# replace
# update current left conv + left BN layer, have BN by default
new_ds = torch.nn.Sequential(
*(replace_layers(blk.downsample, i, [0, 1], \
[new_conv, new_bn]) for i, _ in enumerate(blk.downsample)))
# delete current and replace with a brand new BLOCK
if isinstance(blk, BasicBlock):
# rely on conv1 of old block to get in_planes, out_planes, tride
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = new_ds)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
else:
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = new_ds)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = blk.conv3
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
# now new_blk is ready, it can act as a layer and replace old blk with replace_layers()
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace with brand new one
del model.base # delete the things pointed by pointer
del blk
model.base = base # update current layer
model = update_next_layers(model, block_index, filters_to_prune) # update following layers
cut_indices = filters_to_prune
message = str(100*float(cut) / num_filters) + "%"
print("Filters prunned", str(message))
return cut_indices, model
'''
--------------------------------------------------------------------------------
3. Prune residual conv layer, the one at the bottom of residual side with/without BN
(*Note: MUST call this after you prune identity path with downsample, the size won't fit because upper functions only update left path)
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
filters_to_prune: the filters' indices waiting for being pruned
use_bn: use Batch Norm or not
'''
def prune_rbconv_by_indices(model, block_index, filters_to_prune, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
# check whether the left path has conv layer for prunning
if blk.downsample == None:
print("Only support pruning for rbconv after lconv was pruned")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
if isinstance(blk, BasicBlock):
# when it is BasicBlock, the rbconv is conv2, and its bn is bn2
conv = blk.conv2
bn = blk.bn2
else:
# when it is Bottleneck, the rbconv is conv3, and its bn is bn3
conv = blk.conv3
bn = blk.bn3
# only need to update itself, no need to care about others such as next_ds/next_conv
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - len(filters_to_prune),
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - len(filters_to_prune)), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if isinstance(blk, BasicBlock):
# replace with new block
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = new_conv # update with new conv
new_blk.bn2 = new_bn # update with new bn
else:
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = new_conv
new_blk.bn3 = new_bn
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
model.base = base
print("Filters prunned for rb layer:", filters_to_prune)
return model
'''
--------------------------------------------------------------------------------
4. Prune residual conv layer, the one at the bottom of residual side with/without BN, based on its own weights
(*Note: MUST call this when you prune lconv layer,
the immediate following block/conv cannot absorb your effect due to its empty left path)
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
num_cut: the number of filters waiting for being pruned
use_bn: use Batch Norm or not
'''
def prune_rbconv_by_number(model, block_index, num_cut, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Only support for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
if isinstance(blk, BasicBlock):
# when it is BasicBlock, the rbconv is conv2, and its bn is bn2
conv = blk.conv2
bn = blk.bn2
else:
# when it is Bottleneck, the rbconv is conv3, and its bn is bn3
conv = blk.conv3
bn = blk.bn3
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
if num_cut < 1:
print("Error: No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - num_cut) < 1:
print("Error: No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:num_cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# only need to update itself, no need to care about others such as next_ds/next_conv
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - num_cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# new BN layer after new_conv
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# old_bn.affine == True, need to copy learnable gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - num_cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if isinstance(blk, BasicBlock):
# replace with new block
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.relu = blk.relu
new_blk.conv2 = new_conv # update with new conv
new_blk.bn2 = new_bn # update with new bn
else:
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = blk.conv2
new_blk.bn2 = blk.bn2
new_blk.conv3 = new_conv
new_blk.bn3 = new_bn
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
del blk
model.base = base
model = update_next_layers(model, block_index, filters_to_prune) # update following layers
print("Filters prunned for rb layer:", filters_to_prune)
return model
'''
--------------------------------------------------------------------------------
5. Prune normal residual conv layer, the FRIST one at the upper of residual side with/without BN
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
cut_ratio: the ratio of filters pruned from conv1 (and conv2 if Bottleneck)
use_bn: use Batch Norm or not
'''
def prune_ruconv1_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, (BasicBlock, Bottleneck)):
print("Conv1 only for ResNet with BasicBlock or Bottleneck defined in torchvision")
return model
# cut conv1, and next conv is conv2
conv = blk.conv1
bn = blk.bn1
next_conv = blk.conv2
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda() # new conv1
# BatchNorm layer
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda() # new bn1
# new conv for next_conv
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data # new conv2
# replace with new block
if isinstance(blk, BasicBlock):
new_blk = BasicBlock(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = new_conv
new_blk.bn1 = new_bn
new_blk.relu = blk.relu
new_blk.conv2 = next_new_conv # update with new conv
new_blk.bn2 = blk.bn2 # update with new bn
else:
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = new_conv
new_blk.bn1 = new_bn
new_blk.conv2 = next_new_conv
new_blk.bn2 = blk.bn2
new_blk.conv3 = blk.conv3
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
model.base = base
print("Filters prunned:", filters_to_prune)
return model
'''
--------------------------------------------------------------------------------
6. Prune normal residual conv layer, the SECOND one at the upper of residual side with/without BN
(*for Bottleneck only)
Args:
block_index: the BasicBlock or Bottleneck Block this layer locates
cut_ratio: the ratio of filters pruned from conv1 (and conv2 if Bottleneck)
use_bn: use Batch Norm or not
'''
def prune_ruconv2_layer(model, block_index, cut_ratio=0.2, use_bn = True):
_, blk = list(model.base._modules.items())[block_index]
if not use_bn:
print("ResNet without BN is not supported for prunning")
return model
if not isinstance(blk, Bottleneck):
print("Conv2 only for ResNet with Bottleneck defined in torchvision")
return model
# cut conv1, and next conv is conv2
conv = blk.conv2
bn = blk.bn2
next_conv = blk.conv3
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None:
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda() # new conv2
# BatchNorm layer
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=bn.eps, momentum=bn.momentum, affine=bn.affine)
# gamma: size = (num_features)
old_weights = bn.weight.data.cpu().numpy()
new_weights = bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda() # new bn2
# new conv for next_conv
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data # new conv3
# replace with new block
new_blk = Bottleneck(blk.conv1.in_channels, blk.conv1.out_channels, \
blk.stride, downsample = blk.downsample)
# keep all layers in residual path unchanged tempararily
new_blk.conv1 = blk.conv1
new_blk.bn1 = blk.bn1
new_blk.conv2 = new_conv
new_blk.bn2 = new_bn
new_blk.conv3 = next_new_conv
new_blk.bn3 = blk.bn3
new_blk.relu = blk.relu
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [block_index], \
[new_blk]) for i, _ in enumerate(model.base)))
# delete and replace
del model.base
model.base = base
print("Filters prunned:", filters_to_prune)
return model
| _, conv = list(model.base._modules.items())[layer_index]
if use_bn:
_, old_bn = list(model.base._modules.items())[layer_index + 1]
next_conv = None
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_conv = res[1].conv1
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None: # no bias for conv layers
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# BatchNorm modification
# TODO: Extract this function outside as a separate func.
if use_bn:
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=old_bn.eps, momentum=old_bn.momentum, affine=old_bn.affine)
# old_bn.affine == True, need to copy learning gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = old_bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = old_bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
if use_bn:
# BatchNorm modification
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index, layer_index+1], \
[new_conv, new_bn]) for i, _ in enumerate(model.base)))
del old_bn
else:
# replace current layer and next_conv with new_conv and next_new_conv respectively
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index], \
[new_conv]) for i, _ in enumerate(model.base)))
del model.base # delete and replace with brand new one
del conv
model.base = base # update current layer
model = update_next_layers(model, layer_index, filters_to_prune) # update following layers
message = str(100*float(cut) / num_filters) + "%"
print("Filters prunned", str(message))
return model |
context.rs | //! See `CompletionContext` structure.
use std::iter;
use base_db::SourceDatabaseExt;
use hir::{
HasAttrs, Local, Name, PathResolution, ScopeDef, Semantics, SemanticsScope, Type, TypeInfo,
};
use ide_db::{
active_parameter::ActiveParameter,
base_db::{FilePosition, SourceDatabase},
famous_defs::FamousDefs,
FxHashMap, FxHashSet, RootDatabase,
};
use syntax::{
algo::{find_node_at_offset, non_trivia_sibling},
ast::{self, AttrKind, HasName, NameOrNameRef},
match_ast, AstNode, NodeOrToken,
SyntaxKind::{self, *},
SyntaxNode, SyntaxToken, TextRange, TextSize, T,
};
use text_edit::Indel;
use crate::{
patterns::{
determine_location, determine_prev_sibling, is_in_loop_body, is_in_token_of_for_loop,
previous_token, ImmediateLocation, ImmediatePrevSibling,
},
CompletionConfig,
};
const COMPLETION_MARKER: &str = "intellijRulezz";
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(crate) enum PatternRefutability {
Refutable,
Irrefutable,
}
pub(crate) enum Visible {
Yes,
Editable,
No,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub(super) enum PathKind {
Expr,
Type,
Attr { kind: AttrKind, annotated_item_kind: Option<SyntaxKind> },
Derive,
// This should be removed in favor of `has_macro_bang` in PathCompletionContext
Mac,
Pat,
Vis { has_in_token: bool },
Use,
}
#[derive(Debug)]
pub(crate) struct PathCompletionCtx {
/// If this is a call with () already there (or {} in case of record patterns)
pub(super) has_call_parens: bool,
/// Whether this path stars with a `::`.
pub(super) is_absolute_path: bool,
/// The qualifier of the current path if it exists.
pub(super) qualifier: Option<PathQualifierCtx>,
pub(super) kind: Option<PathKind>,
/// Whether the path segment has type args or not.
pub(super) has_type_args: bool,
/// `true` if we are a statement or a last expr in the block.
pub(super) can_be_stmt: bool,
pub(super) in_loop_body: bool,
}
#[derive(Debug)]
pub(crate) struct PathQualifierCtx {
pub(crate) path: ast::Path,
pub(crate) resolution: Option<PathResolution>,
/// Whether this path consists solely of `super` segments
pub(crate) is_super_chain: bool,
/// Whether the qualifier comes from a use tree parent or not
pub(crate) use_tree_parent: bool,
}
#[derive(Debug)]
pub(super) struct PatternContext {
pub(super) refutability: PatternRefutability,
pub(super) param_ctx: Option<(ast::ParamList, ast::Param, ParamKind)>,
pub(super) has_type_ascription: bool,
pub(super) parent_pat: Option<ast::Pat>,
pub(super) ref_token: Option<SyntaxToken>,
pub(super) mut_token: Option<SyntaxToken>,
}
#[derive(Debug)]
pub(super) enum LifetimeContext {
LifetimeParam { is_decl: bool, param: ast::LifetimeParam },
Lifetime,
LabelRef,
LabelDef,
}
#[derive(Debug)]
#[allow(dead_code)]
pub(super) enum NameContext {
Const,
ConstParam,
Enum,
Function,
IdentPat,
MacroDef,
MacroRules,
/// Fake node
Module(ast::Module),
RecordField,
Rename,
SelfParam,
Static,
Struct,
Trait,
TypeAlias,
TypeParam,
Union,
Variant,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum ParamKind {
Function(ast::Fn),
Closure(ast::ClosureExpr),
}
/// `CompletionContext` is created early during completion to figure out, where
/// exactly is the cursor, syntax-wise.
#[derive(Debug)]
pub(crate) struct CompletionContext<'a> {
pub(super) sema: Semantics<'a, RootDatabase>,
pub(super) scope: SemanticsScope<'a>,
pub(super) db: &'a RootDatabase,
pub(super) config: &'a CompletionConfig,
pub(super) position: FilePosition,
/// The token before the cursor, in the original file.
pub(super) original_token: SyntaxToken,
/// The token before the cursor, in the macro-expanded file.
pub(super) token: SyntaxToken,
/// The crate of the current file.
pub(super) krate: hir::Crate,
/// The module of the `scope`.
pub(super) module: hir::Module,
/// The expected name of what we are completing.
/// This is usually the parameter name of the function argument we are completing.
pub(super) expected_name: Option<NameOrNameRef>,
/// The expected type of what we are completing.
pub(super) expected_type: Option<Type>,
/// The parent function of the cursor position if it exists.
pub(super) function_def: Option<ast::Fn>,
/// The parent impl of the cursor position if it exists.
pub(super) impl_def: Option<ast::Impl>,
/// The NameLike under the cursor in the original file if it exists.
pub(super) name_syntax: Option<ast::NameLike>,
/// Are we completing inside a let statement with a missing semicolon?
pub(super) incomplete_let: bool,
pub(super) completion_location: Option<ImmediateLocation>,
pub(super) prev_sibling: Option<ImmediatePrevSibling>,
pub(super) fake_attribute_under_caret: Option<ast::Attr>,
pub(super) previous_token: Option<SyntaxToken>,
pub(super) name_ctx: Option<NameContext>,
pub(super) lifetime_ctx: Option<LifetimeContext>,
pub(super) pattern_ctx: Option<PatternContext>,
pub(super) path_context: Option<PathCompletionCtx>,
pub(super) existing_derives: FxHashSet<hir::Macro>,
pub(super) locals: FxHashMap<Name, Local>,
}
impl<'a> CompletionContext<'a> {
/// The range of the identifier that is being completed.
pub(crate) fn source_range(&self) -> TextRange {
// check kind of macro-expanded token, but use range of original token
let kind = self.token.kind();
match kind {
CHAR => {
// assume we are completing a lifetime but the user has only typed the '
cov_mark::hit!(completes_if_lifetime_without_idents);
TextRange::at(self.original_token.text_range().start(), TextSize::from(1))
}
IDENT | LIFETIME_IDENT | UNDERSCORE => self.original_token.text_range(),
_ if kind.is_keyword() => self.original_token.text_range(),
_ => TextRange::empty(self.position.offset),
}
}
pub(crate) fn name_ref(&self) -> Option<&ast::NameRef> {
self.name_syntax.as_ref().and_then(ast::NameLike::as_name_ref)
}
pub(crate) fn lifetime(&self) -> Option<&ast::Lifetime> {
self.name_syntax.as_ref().and_then(ast::NameLike::as_lifetime)
}
pub(crate) fn previous_token_is(&self, kind: SyntaxKind) -> bool {
self.previous_token.as_ref().map_or(false, |tok| tok.kind() == kind)
}
pub(crate) fn famous_defs(&self) -> FamousDefs {
FamousDefs(&self.sema, self.krate)
}
pub(crate) fn dot_receiver(&self) -> Option<&ast::Expr> {
match &self.completion_location {
Some(
ImmediateLocation::MethodCall { receiver, .. }
| ImmediateLocation::FieldAccess { receiver, .. },
) => receiver.as_ref(),
_ => None,
}
}
pub(crate) fn has_dot_receiver(&self) -> bool {
matches!(
&self.completion_location,
Some(ImmediateLocation::FieldAccess { receiver, .. } | ImmediateLocation::MethodCall { receiver,.. })
if receiver.is_some()
)
}
pub(crate) fn expects_assoc_item(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::Trait | ImmediateLocation::Impl))
}
pub(crate) fn expects_variant(&self) -> bool {
matches!(self.name_ctx, Some(NameContext::Variant))
}
pub(crate) fn expects_non_trait_assoc_item(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::Impl))
}
pub(crate) fn expects_item(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::ItemList))
}
pub(crate) fn expects_generic_arg(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::GenericArgList(_)))
}
pub(crate) fn has_block_expr_parent(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::StmtList))
}
pub(crate) fn expects_ident_ref_expr(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::RefExpr))
}
pub(crate) fn expect_field(&self) -> bool {
matches!(self.completion_location, Some(ImmediateLocation::TupleField))
|| matches!(self.name_ctx, Some(NameContext::RecordField))
}
pub(crate) fn has_impl_or_trait_prev_sibling(&self) -> bool {
matches!(
self.prev_sibling,
Some(ImmediatePrevSibling::ImplDefType | ImmediatePrevSibling::TraitDefName)
)
}
pub(crate) fn has_impl_prev_sibling(&self) -> bool {
matches!(self.prev_sibling, Some(ImmediatePrevSibling::ImplDefType))
}
pub(crate) fn has_visibility_prev_sibling(&self) -> bool {
matches!(self.prev_sibling, Some(ImmediatePrevSibling::Visibility))
}
pub(crate) fn after_if(&self) -> bool {
matches!(self.prev_sibling, Some(ImmediatePrevSibling::IfExpr))
}
pub(crate) fn is_path_disallowed(&self) -> bool {
self.previous_token_is(T![unsafe])
|| matches!(
self.prev_sibling,
Some(ImmediatePrevSibling::Attribute | ImmediatePrevSibling::Visibility)
)
|| matches!(
self.completion_location,
Some(ImmediateLocation::RecordPat(_) | ImmediateLocation::RecordExpr(_))
)
|| matches!(self.name_ctx, Some(NameContext::Module(_) | NameContext::Rename))
}
pub(crate) fn expects_expression(&self) -> bool {
matches!(self.path_context, Some(PathCompletionCtx { kind: Some(PathKind::Expr), .. }))
}
pub(crate) fn expects_type(&self) -> bool {
matches!(self.path_context, Some(PathCompletionCtx { kind: Some(PathKind::Type), .. }))
}
pub(crate) fn path_is_call(&self) -> bool {
self.path_context.as_ref().map_or(false, |it| it.has_call_parens)
}
pub(crate) fn is_non_trivial_path(&self) -> bool {
matches!(
self.path_context,
Some(
PathCompletionCtx { is_absolute_path: true, .. }
| PathCompletionCtx { qualifier: Some(_), .. }
)
)
}
pub(crate) fn path_qual(&self) -> Option<&ast::Path> {
self.path_context.as_ref().and_then(|it| it.qualifier.as_ref().map(|it| &it.path))
}
pub(crate) fn path_kind(&self) -> Option<PathKind> {
self.path_context.as_ref().and_then(|it| it.kind)
}
pub(crate) fn is_immediately_after_macro_bang(&self) -> bool {
self.token.kind() == BANG && self.token.parent().map_or(false, |it| it.kind() == MACRO_CALL)
}
/// Checks if an item is visible and not `doc(hidden)` at the completion site.
pub(crate) fn is_visible<I>(&self, item: &I) -> Visible
where
I: hir::HasVisibility + hir::HasAttrs + hir::HasCrate + Copy,
{
self.is_visible_impl(&item.visibility(self.db), &item.attrs(self.db), item.krate(self.db))
}
pub(crate) fn is_scope_def_hidden(&self, scope_def: ScopeDef) -> bool {
if let (Some(attrs), Some(krate)) = (scope_def.attrs(self.db), scope_def.krate(self.db)) {
return self.is_doc_hidden(&attrs, krate);
}
false
}
/// Check if an item is `#[doc(hidden)]`.
pub(crate) fn is_item_hidden(&self, item: &hir::ItemInNs) -> bool {
let attrs = item.attrs(self.db);
let krate = item.krate(self.db);
match (attrs, krate) {
(Some(attrs), Some(krate)) => self.is_doc_hidden(&attrs, krate),
_ => false,
}
}
/// Whether the given trait is an operator trait or not.
pub(crate) fn is_ops_trait(&self, trait_: hir::Trait) -> bool {
match trait_.attrs(self.db).lang() {
Some(lang) => OP_TRAIT_LANG_NAMES.contains(&lang.as_str()),
None => false,
}
}
/// A version of [`SemanticsScope::process_all_names`] that filters out `#[doc(hidden)]` items.
pub(crate) fn process_all_names(&self, f: &mut dyn FnMut(Name, ScopeDef)) {
let _p = profile::span("CompletionContext::process_all_names");
self.scope.process_all_names(&mut |name, def| {
if self.is_scope_def_hidden(def) {
return;
}
f(name, def);
});
}
pub(crate) fn process_all_names_raw(&self, f: &mut dyn FnMut(Name, ScopeDef)) {
let _p = profile::span("CompletionContext::process_all_names_raw");
self.scope.process_all_names(&mut |name, def| f(name, def));
}
fn is_visible_impl(
&self,
vis: &hir::Visibility,
attrs: &hir::Attrs,
defining_crate: hir::Crate,
) -> Visible {
if !vis.is_visible_from(self.db, self.module.into()) {
if !self.config.enable_private_editable {
return Visible::No;
}
// If the definition location is editable, also show private items
let root_file = defining_crate.root_file(self.db);
let source_root_id = self.db.file_source_root(root_file);
let is_editable = !self.db.source_root(source_root_id).is_library;
return if is_editable { Visible::Editable } else { Visible::No };
}
if self.is_doc_hidden(attrs, defining_crate) {
Visible::No
} else {
Visible::Yes
}
}
fn is_doc_hidden(&self, attrs: &hir::Attrs, defining_crate: hir::Crate) -> bool {
// `doc(hidden)` items are only completed within the defining crate.
self.krate != defining_crate && attrs.has_doc_hidden()
}
}
// CompletionContext construction
impl<'a> CompletionContext<'a> {
pub(super) fn new(
db: &'a RootDatabase,
position @ FilePosition { file_id, offset }: FilePosition,
config: &'a CompletionConfig,
) -> Option<CompletionContext<'a>> {
let _p = profile::span("CompletionContext::new");
let sema = Semantics::new(db);
let original_file = sema.parse(file_id);
// Insert a fake ident to get a valid parse tree. We will use this file
// to determine context, though the original_file will be used for
// actual completion.
let file_with_fake_ident = {
let parse = db.parse(file_id);
let edit = Indel::insert(offset, COMPLETION_MARKER.to_string());
parse.reparse(&edit).tree()
};
let fake_ident_token =
file_with_fake_ident.syntax().token_at_offset(offset).right_biased()?;
let original_token = original_file.syntax().token_at_offset(offset).left_biased()?;
let token = sema.descend_into_macros_single(original_token.clone());
let scope = sema.scope_at_offset(&token.parent()?, offset)?;
let krate = scope.krate();
let module = scope.module();
let mut locals = FxHashMap::default();
scope.process_all_names(&mut |name, scope| {
if let ScopeDef::Local(local) = scope {
locals.insert(name, local);
}
});
let mut ctx = CompletionContext {
sema,
scope,
db,
config,
position,
original_token,
token,
krate,
module,
expected_name: None,
expected_type: None,
function_def: None,
impl_def: None,
name_syntax: None,
lifetime_ctx: None,
pattern_ctx: None,
name_ctx: None,
completion_location: None,
prev_sibling: None,
fake_attribute_under_caret: None,
previous_token: None,
path_context: None,
locals,
incomplete_let: false,
existing_derives: Default::default(),
};
ctx.expand_and_fill(
original_file.syntax().clone(),
file_with_fake_ident.syntax().clone(),
offset,
fake_ident_token,
);
Some(ctx)
}
/// Expand attributes and macro calls at the current cursor position for both the original file
/// and fake file repeatedly. As soon as one of the two expansions fail we stop so the original
/// and speculative states stay in sync.
fn expand_and_fill(
&mut self,
mut original_file: SyntaxNode,
mut speculative_file: SyntaxNode,
mut offset: TextSize,
mut fake_ident_token: SyntaxToken,
) {
let _p = profile::span("CompletionContext::expand_and_fill");
let mut derive_ctx = None;
'expansion: loop {
let parent_item =
|item: &ast::Item| item.syntax().ancestors().skip(1).find_map(ast::Item::cast);
let ancestor_items = iter::successors(
Option::zip(
find_node_at_offset::<ast::Item>(&original_file, offset),
find_node_at_offset::<ast::Item>(&speculative_file, offset),
),
|(a, b)| parent_item(a).zip(parent_item(b)),
);
// first try to expand attributes as these are always the outermost macro calls
'ancestors: for (actual_item, item_with_fake_ident) in ancestor_items {
match (
self.sema.expand_attr_macro(&actual_item),
self.sema.speculative_expand_attr_macro(
&actual_item,
&item_with_fake_ident,
fake_ident_token.clone(),
),
) {
// maybe parent items have attributes, so continue walking the ancestors
(None, None) => continue 'ancestors,
// successful expansions
(Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => {
let new_offset = fake_mapped_token.text_range().start();
if new_offset > actual_expansion.text_range().end() {
// offset outside of bounds from the original expansion,
// stop here to prevent problems from happening
break 'expansion;
}
original_file = actual_expansion;
speculative_file = fake_expansion;
fake_ident_token = fake_mapped_token;
offset = new_offset;
continue 'expansion;
}
// exactly one expansion failed, inconsistent state so stop expanding completely
_ => break 'expansion,
}
}
// No attributes have been expanded, so look for macro_call! token trees or derive token trees
let orig_tt = match find_node_at_offset::<ast::TokenTree>(&original_file, offset) {
Some(it) => it,
None => break 'expansion,
};
let spec_tt = match find_node_at_offset::<ast::TokenTree>(&speculative_file, offset) {
Some(it) => it,
None => break 'expansion,
};
// Expand pseudo-derive expansion
if let (Some(orig_attr), Some(spec_attr)) = (
orig_tt.syntax().parent().and_then(ast::Meta::cast).and_then(|it| it.parent_attr()),
spec_tt.syntax().parent().and_then(ast::Meta::cast).and_then(|it| it.parent_attr()),
) {
if let (Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) = (
self.sema.expand_derive_as_pseudo_attr_macro(&orig_attr),
self.sema.speculative_expand_derive_as_pseudo_attr_macro(
&orig_attr,
&spec_attr,
fake_ident_token.clone(),
),
) {
derive_ctx = Some((
actual_expansion,
fake_expansion,
fake_mapped_token.text_range().start(),
orig_attr,
));
}
// at this point we won't have any more successful expansions, so stop
break 'expansion;
}
// Expand fn-like macro calls
if let (Some(actual_macro_call), Some(macro_call_with_fake_ident)) = (
orig_tt.syntax().ancestors().find_map(ast::MacroCall::cast),
spec_tt.syntax().ancestors().find_map(ast::MacroCall::cast),
) {
let mac_call_path0 = actual_macro_call.path().as_ref().map(|s| s.syntax().text());
let mac_call_path1 =
macro_call_with_fake_ident.path().as_ref().map(|s| s.syntax().text());
// inconsistent state, stop expanding
if mac_call_path0 != mac_call_path1 {
break 'expansion;
}
let speculative_args = match macro_call_with_fake_ident.token_tree() {
Some(tt) => tt,
None => break 'expansion,
};
match (
self.sema.expand(&actual_macro_call),
self.sema.speculative_expand(
&actual_macro_call,
&speculative_args,
fake_ident_token.clone(),
),
) {
// successful expansions
(Some(actual_expansion), Some((fake_expansion, fake_mapped_token))) => {
let new_offset = fake_mapped_token.text_range().start();
if new_offset > actual_expansion.text_range().end() {
// offset outside of bounds from the original expansion,
// stop here to prevent problems from happening
break 'expansion;
}
original_file = actual_expansion;
speculative_file = fake_expansion;
fake_ident_token = fake_mapped_token;
offset = new_offset;
continue 'expansion;
}
// at least on expansion failed, we won't have anything to expand from this point
// onwards so break out
_ => break 'expansion,
}
}
// none of our states have changed so stop the loop
break 'expansion;
}
self.fill(&original_file, speculative_file, offset, derive_ctx);
}
/// Calculate the expected type and name of the cursor position.
fn expected_type_and_name(&self) -> (Option<Type>, Option<NameOrNameRef>) {
let mut node = match self.token.parent() {
Some(it) => it,
None => return (None, None),
};
loop {
break match_ast! {
match node {
ast::LetStmt(it) => {
cov_mark::hit!(expected_type_let_with_leading_char);
cov_mark::hit!(expected_type_let_without_leading_char);
let ty = it.pat()
.and_then(|pat| self.sema.type_of_pat(&pat))
.or_else(|| it.initializer().and_then(|it| self.sema.type_of_expr(&it)))
.map(TypeInfo::original);
let name = match it.pat() {
Some(ast::Pat::IdentPat(ident)) => ident.name().map(NameOrNameRef::Name),
Some(_) | None => None,
};
(ty, name)
},
ast::LetExpr(it) => {
cov_mark::hit!(expected_type_if_let_without_leading_char);
let ty = it.pat()
.and_then(|pat| self.sema.type_of_pat(&pat))
.or_else(|| it.expr().and_then(|it| self.sema.type_of_expr(&it)))
.map(TypeInfo::original);
(ty, None)
},
ast::ArgList(_) => {
cov_mark::hit!(expected_type_fn_param);
ActiveParameter::at_token(
&self.sema,
self.token.clone(),
).map(|ap| {
let name = ap.ident().map(NameOrNameRef::Name);
let ty = if has_ref(&self.token) {
cov_mark::hit!(expected_type_fn_param_ref);
ap.ty.remove_ref()
} else {
Some(ap.ty)
};
(ty, name)
})
.unwrap_or((None, None))
},
ast::RecordExprFieldList(it) => {
// wouldn't try {} be nice...
(|| {
if self.token.kind() == T![..]
|| self.token.prev_token().map(|t| t.kind()) == Some(T![..])
{
cov_mark::hit!(expected_type_struct_func_update);
let record_expr = it.syntax().parent().and_then(ast::RecordExpr::cast)?;
let ty = self.sema.type_of_expr(&record_expr.into())?;
Some((
Some(ty.original),
None
))
} else {
cov_mark::hit!(expected_type_struct_field_without_leading_char);
let expr_field = self.token.prev_sibling_or_token()?
.into_node()
.and_then(ast::RecordExprField::cast)?;
let (_, _, ty) = self.sema.resolve_record_field(&expr_field)?;
Some((
Some(ty),
expr_field.field_name().map(NameOrNameRef::NameRef),
))
}
})().unwrap_or((None, None))
},
ast::RecordExprField(it) => {
if let Some(expr) = it.expr() {
cov_mark::hit!(expected_type_struct_field_with_leading_char);
(
self.sema.type_of_expr(&expr).map(TypeInfo::original),
it.field_name().map(NameOrNameRef::NameRef),
)
} else {
cov_mark::hit!(expected_type_struct_field_followed_by_comma);
let ty = self.sema.resolve_record_field(&it)
.map(|(_, _, ty)| ty);
(
ty,
it.field_name().map(NameOrNameRef::NameRef),
)
}
},
ast::MatchExpr(it) => {
cov_mark::hit!(expected_type_match_arm_without_leading_char);
let ty = it.expr().and_then(|e| self.sema.type_of_expr(&e)).map(TypeInfo::original);
(ty, None)
},
ast::IfExpr(it) => {
let ty = it.condition()
.and_then(|e| self.sema.type_of_expr(&e))
.map(TypeInfo::original);
(ty, None)
},
ast::IdentPat(it) => {
cov_mark::hit!(expected_type_if_let_with_leading_char);
cov_mark::hit!(expected_type_match_arm_with_leading_char);
let ty = self.sema.type_of_pat(&ast::Pat::from(it)).map(TypeInfo::original);
(ty, None)
},
ast::Fn(it) => {
cov_mark::hit!(expected_type_fn_ret_with_leading_char);
cov_mark::hit!(expected_type_fn_ret_without_leading_char);
let def = self.sema.to_def(&it);
(def.map(|def| def.ret_type(self.db)), None)
},
ast::ClosureExpr(it) => {
let ty = self.sema.type_of_expr(&it.into());
ty.and_then(|ty| ty.original.as_callable(self.db))
.map(|c| (Some(c.return_type()), None))
.unwrap_or((None, None))
},
ast::ParamList(_) => (None, None),
ast::Stmt(_) => (None, None),
ast::Item(_) => (None, None),
_ => {
match node.parent() {
Some(n) => {
node = n;
continue;
},
None => (None, None),
}
},
}
};
}
}
/// Fill the completion context, this is what does semantic reasoning about the surrounding context
/// of the completion location.
fn fill(
&mut self,
original_file: &SyntaxNode,
file_with_fake_ident: SyntaxNode,
offset: TextSize,
derive_ctx: Option<(SyntaxNode, SyntaxNode, TextSize, ast::Attr)>,
) {
let fake_ident_token = file_with_fake_ident.token_at_offset(offset).right_biased().unwrap();
let syntax_element = NodeOrToken::Token(fake_ident_token);
if is_in_token_of_for_loop(syntax_element.clone()) {
// for pat $0
// there is nothing to complete here except `in` keyword
// don't bother populating the context
// FIXME: the completion calculations should end up good enough
// such that this special case becomes unnecessary
return;
}
self.previous_token = previous_token(syntax_element.clone());
self.fake_attribute_under_caret = syntax_element.ancestors().find_map(ast::Attr::cast);
self.incomplete_let =
syntax_element.ancestors().take(6).find_map(ast::LetStmt::cast).map_or(false, |it| {
it.syntax().text_range().end() == syntax_element.text_range().end()
});
(self.expected_type, self.expected_name) = self.expected_type_and_name();
// Overwrite the path kind for derives
if let Some((original_file, file_with_fake_ident, offset, origin_attr)) = derive_ctx {
self.existing_derives = self
.sema
.resolve_derive_macro(&origin_attr)
.into_iter()
.flatten()
.flatten()
.collect();
if let Some(ast::NameLike::NameRef(name_ref)) =
find_node_at_offset(&file_with_fake_ident, offset)
{
self.name_syntax =
find_node_at_offset(&original_file, name_ref.syntax().text_range().start());
if let Some((path_ctx, _)) =
Self::classify_name_ref(&self.sema, &original_file, name_ref)
{
self.path_context =
Some(PathCompletionCtx { kind: Some(PathKind::Derive), ..path_ctx });
}
}
return;
}
let name_like = match find_node_at_offset(&file_with_fake_ident, offset) {
Some(it) => it,
None => return,
};
self.completion_location =
determine_location(&self.sema, original_file, offset, &name_like);
self.prev_sibling = determine_prev_sibling(&name_like);
self.name_syntax =
find_node_at_offset(original_file, name_like.syntax().text_range().start());
self.impl_def = self
.sema
.token_ancestors_with_macros(self.token.clone())
.take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE)
.find_map(ast::Impl::cast);
self.function_def = self
.sema
.token_ancestors_with_macros(self.token.clone())
.take_while(|it| it.kind() != SOURCE_FILE && it.kind() != MODULE)
.find_map(ast::Fn::cast);
match name_like {
ast::NameLike::Lifetime(lifetime) => {
self.lifetime_ctx = Self::classify_lifetime(&self.sema, original_file, lifetime);
}
ast::NameLike::NameRef(name_ref) => {
if let Some((path_ctx, pat_ctx)) =
Self::classify_name_ref(&self.sema, original_file, name_ref)
{
self.path_context = Some(path_ctx);
self.pattern_ctx = pat_ctx;
}
}
ast::NameLike::Name(name) => {
if let Some((name_ctx, pat_ctx)) =
Self::classify_name(&self.sema, original_file, name)
{
self.pattern_ctx = pat_ctx;
self.name_ctx = Some(name_ctx);
}
}
}
}
fn classify_lifetime(
_sema: &Semantics<RootDatabase>,
_original_file: &SyntaxNode,
lifetime: ast::Lifetime,
) -> Option<LifetimeContext> {
let parent = lifetime.syntax().parent()?;
if parent.kind() == ERROR {
return None;
}
Some(match_ast! {
match parent {
ast::LifetimeParam(param) => LifetimeContext::LifetimeParam {
is_decl: param.lifetime().as_ref() == Some(&lifetime),
param
},
ast::BreakExpr(_) => LifetimeContext::LabelRef,
ast::ContinueExpr(_) => LifetimeContext::LabelRef,
ast::Label(_) => LifetimeContext::LabelDef,
_ => LifetimeContext::Lifetime,
}
})
}
fn classify_name(
_sema: &Semantics<RootDatabase>,
original_file: &SyntaxNode,
name: ast::Name,
) -> Option<(NameContext, Option<PatternContext>)> {
let parent = name.syntax().parent()?;
let mut pat_ctx = None;
let name_ctx = match_ast! {
match parent {
ast::Const(_) => NameContext::Const,
ast::ConstParam(_) => NameContext::ConstParam,
ast::Enum(_) => NameContext::Enum,
ast::Fn(_) => NameContext::Function,
ast::IdentPat(bind_pat) => {
let is_name_in_field_pat = bind_pat
.syntax()
.parent()
.and_then(ast::RecordPatField::cast)
.map_or(false, |pat_field| pat_field.name_ref().is_none());
if !is_name_in_field_pat {
pat_ctx = Some(pattern_context_for(original_file, bind_pat.into()));
}
NameContext::IdentPat
},
ast::MacroDef(_) => NameContext::MacroDef,
ast::MacroRules(_) => NameContext::MacroRules,
ast::Module(module) => NameContext::Module(module),
ast::RecordField(_) => NameContext::RecordField,
ast::Rename(_) => NameContext::Rename,
ast::SelfParam(_) => NameContext::SelfParam,
ast::Static(_) => NameContext::Static,
ast::Struct(_) => NameContext::Struct,
ast::Trait(_) => NameContext::Trait,
ast::TypeAlias(_) => NameContext::TypeAlias,
ast::TypeParam(_) => NameContext::TypeParam,
ast::Union(_) => NameContext::Union,
ast::Variant(_) => NameContext::Variant,
_ => return None,
}
};
Some((name_ctx, pat_ctx))
}
fn classify_name_ref(
sema: &Semantics<RootDatabase>,
original_file: &SyntaxNode,
name_ref: ast::NameRef,
) -> Option<(PathCompletionCtx, Option<PatternContext>)> {
let parent = name_ref.syntax().parent()?;
let segment = ast::PathSegment::cast(parent)?;
let path = segment.parent_path();
let mut path_ctx = PathCompletionCtx {
has_call_parens: false,
is_absolute_path: false,
qualifier: None,
has_type_args: false,
can_be_stmt: false,
in_loop_body: false,
kind: None,
};
let mut pat_ctx = None;
path_ctx.in_loop_body = is_in_loop_body(name_ref.syntax());
path_ctx.kind = path.syntax().ancestors().find_map(|it| {
// using Option<Option<PathKind>> as extra controlflow
let kind = match_ast! {
match it {
ast::PathType(_) => Some(PathKind::Type),
ast::PathExpr(it) => {
path_ctx.has_call_parens = it.syntax().parent().map_or(false, |it| ast::CallExpr::can_cast(it.kind()));
Some(PathKind::Expr)
},
ast::TupleStructPat(it) => {
path_ctx.has_call_parens = true;
pat_ctx = Some(pattern_context_for(original_file, it.into()));
Some(PathKind::Pat)
},
ast::RecordPat(it) => {
path_ctx.has_call_parens = true;
pat_ctx = Some(pattern_context_for(original_file, it.into()));
Some(PathKind::Pat)
},
ast::PathPat(it) => {
pat_ctx = Some(pattern_context_for(original_file, it.into()));
Some(PathKind::Pat)
},
ast::MacroCall(it) => it.excl_token().and(Some(PathKind::Mac)),
ast::Meta(meta) => (|| {
let attr = meta.parent_attr()?;
let kind = attr.kind();
let attached = attr.syntax().parent()?;
let is_trailing_outer_attr = kind != AttrKind::Inner
&& non_trivia_sibling(attr.syntax().clone().into(), syntax::Direction::Next).is_none();
let annotated_item_kind = if is_trailing_outer_attr {
None
} else {
Some(attached.kind())
};
Some(PathKind::Attr {
kind,
annotated_item_kind,
})
})(),
ast::Visibility(it) => Some(PathKind::Vis { has_in_token: it.in_token().is_some() }),
ast::UseTree(_) => Some(PathKind::Use),
_ => return None,
}
};
Some(kind)
}).flatten();
path_ctx.has_type_args = segment.generic_arg_list().is_some();
if let Some((path, use_tree_parent)) = path_or_use_tree_qualifier(&path) {
if !use_tree_parent {
path_ctx.is_absolute_path =
path.top_path().segment().map_or(false, |it| it.coloncolon_token().is_some());
}
let path = path
.segment()
.and_then(|it| find_node_in_file(original_file, &it))
.map(|it| it.parent_path());
path_ctx.qualifier = path.map(|path| {
let res = sema.resolve_path(&path);
let is_super_chain = iter::successors(Some(path.clone()), |p| p.qualifier())
.all(|p| p.segment().and_then(|s| s.super_token()).is_some());
PathQualifierCtx { path, resolution: res, is_super_chain, use_tree_parent }
});
return Some((path_ctx, pat_ctx));
}
| }
}
// Find either enclosing expr statement (thing with `;`) or a
// block. If block, check that we are the last expr.
path_ctx.can_be_stmt = name_ref
.syntax()
.ancestors()
.find_map(|node| {
if let Some(stmt) = ast::ExprStmt::cast(node.clone()) {
return Some(stmt.syntax().text_range() == name_ref.syntax().text_range());
}
if let Some(stmt_list) = ast::StmtList::cast(node) {
return Some(
stmt_list.tail_expr().map(|e| e.syntax().text_range())
== Some(name_ref.syntax().text_range()),
);
}
None
})
.unwrap_or(false);
Some((path_ctx, pat_ctx))
}
}
fn pattern_context_for(original_file: &SyntaxNode, pat: ast::Pat) -> PatternContext {
let mut is_param = None;
let (refutability, has_type_ascription) =
pat
.syntax()
.ancestors()
.skip_while(|it| ast::Pat::can_cast(it.kind()))
.next()
.map_or((PatternRefutability::Irrefutable, false), |node| {
let refutability = match_ast! {
match node {
ast::LetStmt(let_) => return (PatternRefutability::Irrefutable, let_.ty().is_some()),
ast::Param(param) => {
let has_type_ascription = param.ty().is_some();
is_param = (|| {
let fake_param_list = param.syntax().parent().and_then(ast::ParamList::cast)?;
let param_list = find_node_in_file_compensated(original_file, &fake_param_list)?;
let param_list_owner = param_list.syntax().parent()?;
let kind = match_ast! {
match param_list_owner {
ast::ClosureExpr(closure) => ParamKind::Closure(closure),
ast::Fn(fn_) => ParamKind::Function(fn_),
_ => return None,
}
};
Some((param_list, param, kind))
})();
return (PatternRefutability::Irrefutable, has_type_ascription)
},
ast::MatchArm(_) => PatternRefutability::Refutable,
ast::LetExpr(_) => PatternRefutability::Refutable,
ast::ForExpr(_) => PatternRefutability::Irrefutable,
_ => PatternRefutability::Irrefutable,
}
};
(refutability, false)
});
let (ref_token, mut_token) = match &pat {
ast::Pat::IdentPat(it) => (it.ref_token(), it.mut_token()),
_ => (None, None),
};
PatternContext {
refutability,
param_ctx: is_param,
has_type_ascription,
parent_pat: pat.syntax().parent().and_then(ast::Pat::cast),
mut_token,
ref_token,
}
}
/// Attempts to find `node` inside `syntax` via `node`'s text range.
fn find_node_in_file<N: AstNode>(syntax: &SyntaxNode, node: &N) -> Option<N> {
let syntax_range = syntax.text_range();
let range = node.syntax().text_range();
let intersection = range.intersect(syntax_range)?;
syntax.covering_element(intersection).ancestors().find_map(N::cast)
}
/// Attempts to find `node` inside `syntax` via `node`'s text range while compensating
/// for the offset introduced by the fake ident.
/// This is wrong if `node` comes before the insertion point! Use `find_node_in_file` instead.
fn find_node_in_file_compensated<N: AstNode>(syntax: &SyntaxNode, node: &N) -> Option<N> {
let syntax_range = syntax.text_range();
let range = node.syntax().text_range();
let end = range.end().checked_sub(TextSize::try_from(COMPLETION_MARKER.len()).ok()?)?;
if end < range.start() {
return None;
}
let range = TextRange::new(range.start(), end);
// our inserted ident could cause `range` to be go outside of the original syntax, so cap it
let intersection = range.intersect(syntax_range)?;
syntax.covering_element(intersection).ancestors().find_map(N::cast)
}
fn path_or_use_tree_qualifier(path: &ast::Path) -> Option<(ast::Path, bool)> {
if let Some(qual) = path.qualifier() {
return Some((qual, false));
}
let use_tree_list = path.syntax().ancestors().find_map(ast::UseTreeList::cast)?;
let use_tree = use_tree_list.syntax().parent().and_then(ast::UseTree::cast)?;
Some((use_tree.path()?, true))
}
fn has_ref(token: &SyntaxToken) -> bool {
let mut token = token.clone();
for skip in [IDENT, WHITESPACE, T![mut]] {
if token.kind() == skip {
token = match token.prev_token() {
Some(it) => it,
None => return false,
}
}
}
token.kind() == T![&]
}
const OP_TRAIT_LANG_NAMES: &[&str] = &[
"add_assign",
"add",
"bitand_assign",
"bitand",
"bitor_assign",
"bitor",
"bitxor_assign",
"bitxor",
"deref_mut",
"deref",
"div_assign",
"div",
"eq",
"fn_mut",
"fn_once",
"fn",
"index_mut",
"index",
"mul_assign",
"mul",
"neg",
"not",
"partial_ord",
"rem_assign",
"rem",
"shl_assign",
"shl",
"shr_assign",
"shr",
"sub",
];
#[cfg(test)]
mod tests {
use expect_test::{expect, Expect};
use hir::HirDisplay;
use crate::tests::{position, TEST_CONFIG};
use super::CompletionContext;
fn check_expected_type_and_name(ra_fixture: &str, expect: Expect) {
let (db, pos) = position(ra_fixture);
let config = TEST_CONFIG;
let completion_context = CompletionContext::new(&db, pos, &config).unwrap();
let ty = completion_context
.expected_type
.map(|t| t.display_test(&db).to_string())
.unwrap_or("?".to_owned());
let name = completion_context
.expected_name
.map_or_else(|| "?".to_owned(), |name| name.to_string());
expect.assert_eq(&format!("ty: {}, name: {}", ty, name));
}
#[test]
fn expected_type_let_without_leading_char() {
cov_mark::check!(expected_type_let_without_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
let x: u32 = $0;
}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_let_with_leading_char() {
cov_mark::check!(expected_type_let_with_leading_char);
check_expected_type_and_name(
r#"
fn foo() {
let x: u32 = c$0;
}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_let_pat() {
check_expected_type_and_name(
r#"
fn foo() {
let x$0 = 0u32;
}
"#,
expect![[r#"ty: u32, name: ?"#]],
);
check_expected_type_and_name(
r#"
fn foo() {
let $0 = 0u32;
}
"#,
expect![[r#"ty: u32, name: ?"#]],
);
}
#[test]
fn expected_type_fn_param() {
cov_mark::check!(expected_type_fn_param);
check_expected_type_and_name(
r#"
fn foo() { bar($0); }
fn bar(x: u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(c$0); }
fn bar(x: u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_fn_param_ref() {
cov_mark::check!(expected_type_fn_param_ref);
check_expected_type_and_name(
r#"
fn foo() { bar(&$0); }
fn bar(x: &u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(&mut $0); }
fn bar(x: &mut u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(& c$0); }
fn bar(x: &u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(&mut c$0); }
fn bar(x: &mut u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
check_expected_type_and_name(
r#"
fn foo() { bar(&c$0); }
fn bar(x: &u32) {}
"#,
expect![[r#"ty: u32, name: x"#]],
);
}
#[test]
fn expected_type_struct_field_without_leading_char() {
cov_mark::check!(expected_type_struct_field_without_leading_char);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: $0 };
}
"#,
expect![[r#"ty: u32, name: a"#]],
)
}
#[test]
fn expected_type_struct_field_followed_by_comma() {
cov_mark::check!(expected_type_struct_field_followed_by_comma);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: $0, };
}
"#,
expect![[r#"ty: u32, name: a"#]],
)
}
#[test]
fn expected_type_generic_struct_field() {
check_expected_type_and_name(
r#"
struct Foo<T> { a: T }
fn foo() -> Foo<u32> {
Foo { a: $0 }
}
"#,
expect![[r#"ty: u32, name: a"#]],
)
}
#[test]
fn expected_type_struct_field_with_leading_char() {
cov_mark::check!(expected_type_struct_field_with_leading_char);
check_expected_type_and_name(
r#"
struct Foo { a: u32 }
fn foo() {
Foo { a: c$0 };
}
"#,
expect![[r#"ty: u32, name: a"#]],
);
}
#[test]
fn expected_type_match_arm_without_leading_char() {
cov_mark::check!(expected_type_match_arm_without_leading_char);
check_expected_type_and_name(
r#"
enum E { X }
fn foo() {
match E::X { $0 }
}
"#,
expect![[r#"ty: E, name: ?"#]],
);
}
#[test]
fn expected_type_match_arm_with_leading_char() {
cov_mark::check!(expected_type_match_arm_with_leading_char);
check_expected_type_and_name(
r#"
enum E { X }
fn foo() {
match E::X { c$0 }
}
"#,
expect![[r#"ty: E, name: ?"#]],
);
}
#[test]
fn expected_type_if_let_without_leading_char() {
cov_mark::check!(expected_type_if_let_without_leading_char);
check_expected_type_and_name(
r#"
enum Foo { Bar, Baz, Quux }
fn foo() {
let f = Foo::Quux;
if let $0 = f { }
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
)
}
#[test]
fn expected_type_if_let_with_leading_char() {
cov_mark::check!(expected_type_if_let_with_leading_char);
check_expected_type_and_name(
r#"
enum Foo { Bar, Baz, Quux }
fn foo() {
let f = Foo::Quux;
if let c$0 = f { }
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_without_leading_char() {
cov_mark::check!(expected_type_fn_ret_without_leading_char);
check_expected_type_and_name(
r#"
fn foo() -> u32 {
$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_with_leading_char() {
cov_mark::check!(expected_type_fn_ret_with_leading_char);
check_expected_type_and_name(
r#"
fn foo() -> u32 {
c$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_fn_ret_fn_ref_fully_typed() {
check_expected_type_and_name(
r#"
fn foo() -> u32 {
foo$0
}
"#,
expect![[r#"ty: u32, name: ?"#]],
)
}
#[test]
fn expected_type_closure_param_return() {
// FIXME: make this work with `|| $0`
check_expected_type_and_name(
r#"
//- minicore: fn
fn foo() {
bar(|| a$0);
}
fn bar(f: impl FnOnce() -> u32) {}
"#,
expect![[r#"ty: u32, name: ?"#]],
);
}
#[test]
fn expected_type_generic_function() {
check_expected_type_and_name(
r#"
fn foo() {
bar::<u32>($0);
}
fn bar<T>(t: T) {}
"#,
expect![[r#"ty: u32, name: t"#]],
);
}
#[test]
fn expected_type_generic_method() {
check_expected_type_and_name(
r#"
fn foo() {
S(1u32).bar($0);
}
struct S<T>(T);
impl<T> S<T> {
fn bar(self, t: T) {}
}
"#,
expect![[r#"ty: u32, name: t"#]],
);
}
#[test]
fn expected_type_functional_update() {
cov_mark::check!(expected_type_struct_func_update);
check_expected_type_and_name(
r#"
struct Foo { field: u32 }
fn foo() {
Foo {
..$0
}
}
"#,
expect![[r#"ty: Foo, name: ?"#]],
);
}
#[test]
fn expected_type_param_pat() {
check_expected_type_and_name(
r#"
struct Foo { field: u32 }
fn foo(a$0: Foo) {}
"#,
expect![[r#"ty: Foo, name: ?"#]],
);
check_expected_type_and_name(
r#"
struct Foo { field: u32 }
fn foo($0: Foo) {}
"#,
// FIXME make this work, currently fails due to pattern recovery eating the `:`
expect![[r#"ty: ?, name: ?"#]],
);
}
} | if let Some(segment) = path.segment() {
if segment.coloncolon_token().is_some() {
path_ctx.is_absolute_path = true;
return Some((path_ctx, pat_ctx)); |
dietcharts.routing.ts | import { Routes, RouterModule } from '@angular/router';
import { ModuleWithProviders } from '@angular/core';
import { DietchartsComponent } from './dietcharts.component';
// noinspection TypeScriptValidateTypes | component: DietchartsComponent,
}
];
export const dietChartsRouting: ModuleWithProviders = RouterModule.forChild(routes); | export const routes: Routes = [
{
path: '', |
test_star.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 9 16:50:42 2019
@author: dberke
Tests for star.py.
"""
import datetime as dt
from pathlib import Path
import numpy as np
import pytest
import unyt as u
import varconlib as vcl
from varconlib.exceptions import StarDirectoryNotFoundError
from varconlib.star import Star
pytestmark = pytest.mark.filterwarnings(("ignore::DeprecationWarning"))
base_test_dir = vcl.data_dir / f'spectra/HD117618'
if not base_test_dir.exists():
pytest.skip('Test directory not available.', allow_module_level=True)
@pytest.fixture(scope='module')
def test_dir():
return base_test_dir
@pytest.fixture(scope='module')
def tmp_dir(tmp_path_factory):
tmpdir = Path(tmp_path_factory.mktemp('test_star'))
return tmpdir
class TestStar(object):
@pytest.fixture(scope='class')
def test_star(self, test_dir):
return Star('HD117618', star_dir=test_dir,
load_data=False, init_params="Casagrande2011",
perform_model_correction=True)
def testNonExistentDir(self):
with pytest.raises(StarDirectoryNotFoundError):
Star('HD117618', star_dir='/nonsensical_dir_that_should_not_exist')
def testIndexMethods(self):
s = Star('HD1111')
with pytest.raises(KeyError):
s.p_index('')
with pytest.raises(KeyError):
s.t_index('')
with pytest.raises(KeyError):
s.od_index('')
def testLabelMethods(self):
s = Star('HD1111')
with pytest.raises(KeyError):
s.p_label('')
with pytest.raises(KeyError):
s.t_label('')
with pytest.raises(KeyError):
s.od_date('')
def testName(self, test_star):
assert test_star.name == 'HD117618'
def testFiberSplitIndex(self, test_star):
assert test_star.fiberSplitIndex is None
def testObsevationDateBidict(self, test_star):
assert test_star.od_index('2005-05-02T03:49:08.735') == 0
assert test_star.od_index(dt.datetime(year=2005, month=5,
day=2, hour=3,
minute=49, second=8,
microsecond=735000)) == 0
with pytest.raises(KeyError):
test_star.od_index(dt.datetime(year=2000, month=1,
day=1, hour=0,
minute=0, second=0))
assert test_star.od_date(2) == '2010-04-21T03:55:19.107'
with pytest.raises(KeyError):
test_star.od_date(3)
def testNumObs(self, test_star):
assert test_star.getNumObs(slice(None, None)) == 3
@pytest.mark.parametrize("norm", [True, False])
def testTransitionOffsetPattern(self, test_star, norm):
assert len(test_star.getTransitionOffsetPattern(slice(None, None),
normalized=norm)[0])\
== len(test_star._transition_bidict.keys())
@pytest.mark.parametrize('obs_num,expected',
[(0, -0.13005375),
(1, 24.92201306),
(2, 4.58199186)])
def testBERV(self, test_star, obs_num, expected):
assert test_star.bervArray[obs_num] ==\
pytest.approx(expected * u.km / u.s)
def testSaveAndRestoreData(self, test_star, tmp_dir):
star_name = test_star.name
tmp_file_path = tmp_dir
test_star.saveDataToDisk(tmp_file_path)
new_star = Star(star_name, star_dir=tmp_dir,
init_params='Nordstrom2004',
load_data=True)
| if np.any(np.isnan(getattr(test_star, name))):
assert np.all(np.isnan(getattr(test_star, name)) ==
np.isnan(getattr(new_star, name)))
else:
assert u.array.allclose_units(getattr(new_star, name),
getattr(test_star, name))
assert getattr(new_star, name).units == getattr(test_star,
name).units
assert new_star._obs_date_bidict == test_star._obs_date_bidict
assert new_star._transition_bidict == test_star.\
_transition_bidict
assert new_star._pair_bidict == test_star._pair_bidict | for name in test_star.unyt_arrays.values(): |
generate.go | package dns
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
// Parse the $GENERATE statement as used in BIND9 zones.
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
// We are called after '$GENERATE '. After which we expect:
// * the range (12-24/2)
// * lhs (ownername)
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
func (zp *ZoneParser) generate(l lex) (RR, bool) {
token := l.token
step := int64(1)
if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(token) {
return zp.setParseError("bad step in $GENERATE range", l)
}
s, err := strconv.ParseInt(token[i+1:], 10, 64)
if err != nil || s <= 0 {
return zp.setParseError("bad step in $GENERATE range", l)
}
step = s
token = token[:i]
}
sx := strings.SplitN(token, "-", 2)
if len(sx) != 2 {
return zp.setParseError("bad start-stop in $GENERATE range", l)
}
start, err := strconv.ParseInt(sx[0], 10, 64)
if err != nil {
return zp.setParseError("bad start in $GENERATE range", l)
}
end, err := strconv.ParseInt(sx[1], 10, 64)
if err != nil {
return zp.setParseError("bad stop in $GENERATE range", l)
}
if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
return zp.setParseError("bad range in $GENERATE range", l)
}
// _BLANK
l, ok := zp.c.Next()
if !ok || l.value != zBlank {
return zp.setParseError("garbage after $GENERATE range", l)
}
// Create a complete new string, which we then parse again.
var s string
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
if l.err {
return zp.setParseError("bad data in $GENERATE directive", l)
}
if l.value == zNewline {
break
}
s += l.token
}
r := &generateReader{
s: s,
cur: int(start),
start: int(start),
end: int(end),
step: int(step),
file: zp.file,
lex: &l,
}
zp.sub = NewZoneParser(r, zp.origin, zp.file)
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
zp.sub.generateDisallowed = true
zp.sub.SetDefaultTTL(defaultTtl)
return zp.subNext()
}
type generateReader struct {
s string
si int
cur int
start int
end int
step int
mod bytes.Buffer
escape bool
eof bool
file string
lex *lex
}
func (r *generateReader) parseError(msg string, end int) *ParseError {
r.eof = true // Make errors sticky.
l := *r.lex
l.token = r.s[r.si-1 : end]
l.column += r.si // l.column starts one zBLANK before r.s
return &ParseError{r.file, msg, l}
}
func (r *generateReader) Read(p []byte) (int, error) {
// NewZLexer, through NewZoneParser, should use ReadByte and
// not end up here.
panic("not implemented")
}
func (r *generateReader) ReadByte() (byte, error) {
if r.eof {
return 0, io.EOF
}
if r.mod.Len() > 0 {
return r.mod.ReadByte()
}
if r.si >= len(r.s) {
r.si = 0
r.cur += r.step
r.eof = r.cur > r.end || r.cur < 0
return '\n', nil
}
si := r.si
r.si++
switch r.s[si] {
case '\\':
if r.escape {
r.escape = false
return '\\', nil
}
r.escape = true
return r.ReadByte()
case '$':
if r.escape {
r.escape = false
return '$', nil
}
mod := "%d"
if si >= len(r.s)-1 {
// End of the string
fmt.Fprintf(&r.mod, mod, r.cur)
return r.mod.ReadByte()
}
if r.s[si+1] == '$' {
r.si++
return '$', nil
}
var offset int
// Search for { and }
if r.s[si+1] == '{' {
// Modifier block
sep := strings.Index(r.s[si+2:], "}")
if sep < 0 {
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
}
var errMsg string
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
if r.start+offset < 0 || int64(r.end) + int64(offset) > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
}
r.si += 2 + sep // Jump to it
}
fmt.Fprintf(&r.mod, mod, r.cur+offset)
return r.mod.ReadByte()
default:
if r.escape { // Pretty useless here
r.escape = false
return r.ReadByte()
}
return r.s[si], nil
}
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int, string) {
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary.
var offStr, widthStr, base string
switch xs := strings.Split(s, ","); len(xs) {
case 1:
offStr, widthStr, base = xs[0], "0", "d"
case 2:
offStr, widthStr, base = xs[0], xs[1], "d"
case 3:
offStr, widthStr, base = xs[0], xs[1], xs[2]
default:
return "", 0, "bad modifier in $GENERATE"
}
switch base { | return "", 0, "bad base in $GENERATE"
}
offset, err := strconv.ParseInt(offStr, 10, 64)
if err != nil {
return "", 0, "bad offset in $GENERATE"
}
width, err := strconv.ParseInt(widthStr, 10, 64)
if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE"
}
if width == 0 {
return "%" + base, int(offset), ""
}
return "%0" + widthStr + base, int(offset), ""
} | case "o", "d", "x", "X":
default: |
ast.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
use crate::media_type::MediaType;
use crate::tsc_config;
use deno_core::error::AnyError;
use deno_core::resolve_url_or_path;
use deno_core::serde_json;
use deno_core::ModuleSpecifier;
use std::error::Error;
use std::fmt;
use std::ops::Range;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::Mutex;
use swc_common::chain;
use swc_common::comments::Comment;
use swc_common::comments::CommentKind;
use swc_common::comments::SingleThreadedComments;
use swc_common::errors::Diagnostic;
use swc_common::errors::DiagnosticBuilder;
use swc_common::errors::Emitter;
use swc_common::errors::Handler;
use swc_common::errors::HandlerFlags;
use swc_common::FileName;
use swc_common::Globals;
use swc_common::Loc;
use swc_common::SourceFile;
use swc_common::SourceMap;
use swc_common::Span;
use swc_ecmascript::ast::Module;
use swc_ecmascript::ast::Program;
use swc_ecmascript::codegen::text_writer::JsWriter;
use swc_ecmascript::codegen::Node;
use swc_ecmascript::dep_graph::analyze_dependencies;
use swc_ecmascript::dep_graph::DependencyDescriptor;
use swc_ecmascript::parser::lexer::Lexer;
use swc_ecmascript::parser::token::Token;
use swc_ecmascript::parser::EsConfig;
use swc_ecmascript::parser::JscTarget;
use swc_ecmascript::parser::StringInput;
use swc_ecmascript::parser::Syntax;
use swc_ecmascript::parser::TsConfig;
use swc_ecmascript::transforms::fixer;
use swc_ecmascript::transforms::helpers;
use swc_ecmascript::transforms::hygiene;
use swc_ecmascript::transforms::pass::Optional;
use swc_ecmascript::transforms::proposals;
use swc_ecmascript::transforms::react;
use swc_ecmascript::transforms::typescript;
use swc_ecmascript::visit::FoldWith;
static TARGET: JscTarget = JscTarget::Es2020;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Location {
pub filename: String,
pub line: usize,
pub col: usize,
}
impl From<swc_common::Loc> for Location {
fn from(swc_loc: swc_common::Loc) -> Self {
use swc_common::FileName::*;
let filename = match &swc_loc.file.name {
Real(path_buf) => path_buf.to_string_lossy().to_string(),
Custom(str_) => str_.to_string(),
_ => panic!("invalid filename"),
};
Location {
filename,
line: swc_loc.line,
col: swc_loc.col_display,
}
}
}
impl From<Location> for ModuleSpecifier {
fn from(loc: Location) -> Self {
resolve_url_or_path(&loc.filename).unwrap()
}
}
impl std::fmt::Display for Location {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}:{}:{}", self.filename, self.line, self.col)
}
}
/// A buffer for collecting diagnostic messages from the AST parser.
#[derive(Debug)]
pub struct DiagnosticBuffer(Vec<String>);
impl Error for DiagnosticBuffer {}
impl fmt::Display for DiagnosticBuffer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = self.0.join(",");
f.pad(&s)
}
}
impl DiagnosticBuffer {
pub fn from_error_buffer<F>(error_buffer: ErrorBuffer, get_loc: F) -> Self
where
F: Fn(Span) -> Loc,
{
let s = error_buffer.0.lock().unwrap().clone();
let diagnostics = s
.iter()
.map(|d| {
let mut msg = d.message();
if let Some(span) = d.span.primary_span() {
let loc = get_loc(span);
let file_name = match &loc.file.name {
FileName::Custom(n) => n,
_ => unreachable!(),
};
msg = format!(
"{} at {}:{}:{}",
msg, file_name, loc.line, loc.col_display
);
}
msg
})
.collect::<Vec<String>>();
Self(diagnostics)
}
}
/// A buffer for collecting errors from the AST parser.
#[derive(Debug, Clone, Default)]
pub struct ErrorBuffer(Arc<Mutex<Vec<Diagnostic>>>);
impl Emitter for ErrorBuffer {
fn emit(&mut self, db: &DiagnosticBuilder) {
self.0.lock().unwrap().push((**db).clone());
}
}
fn get_es_config(jsx: bool) -> EsConfig {
EsConfig {
class_private_methods: true,
class_private_props: true,
class_props: true,
dynamic_import: true,
export_default_from: true,
export_namespace_from: true,
import_meta: true,
jsx,
nullish_coalescing: true,
num_sep: true,
optional_chaining: true,
top_level_await: true,
..EsConfig::default()
}
}
fn get_ts_config(tsx: bool, dts: bool) -> TsConfig {
TsConfig {
decorators: true,
dts,
dynamic_import: true,
tsx,
..TsConfig::default()
}
}
pub fn get_syntax(media_type: &MediaType) -> Syntax {
match media_type {
MediaType::JavaScript => Syntax::Es(get_es_config(false)),
MediaType::Jsx => Syntax::Es(get_es_config(true)),
MediaType::TypeScript => Syntax::Typescript(get_ts_config(false, false)),
MediaType::Dts => Syntax::Typescript(get_ts_config(false, true)),
MediaType::Tsx => Syntax::Typescript(get_ts_config(true, false)),
_ => Syntax::Es(get_es_config(false)),
}
}
#[derive(Debug, Clone)]
pub enum ImportsNotUsedAsValues {
Remove,
Preserve,
Error,
}
/// Options which can be adjusted when transpiling a module.
#[derive(Debug, Clone)]
pub struct EmitOptions {
/// Indicate if JavaScript is being checked/transformed as well, or if it is
/// only TypeScript.
pub check_js: bool,
/// When emitting a legacy decorator, also emit experimental decorator meta
/// data. Defaults to `false`.
pub emit_metadata: bool,
/// What to do with import statements that only import types i.e. whether to
/// remove them (`Remove`), keep them as side-effect imports (`Preserve`)
/// or error (`Error`). Defaults to `Remove`.
pub imports_not_used_as_values: ImportsNotUsedAsValues,
/// Should the source map be inlined in the emitted code file, or provided
/// as a separate file. Defaults to `true`.
pub inline_source_map: bool,
/// When transforming JSX, what value should be used for the JSX factory.
/// Defaults to `React.createElement`.
pub jsx_factory: String,
/// When transforming JSX, what value should be used for the JSX fragment
/// factory. Defaults to `React.Fragment`.
pub jsx_fragment_factory: String,
/// Should JSX be transformed or preserved. Defaults to `true`.
pub transform_jsx: bool,
}
impl Default for EmitOptions {
fn default() -> Self {
EmitOptions {
check_js: false,
emit_metadata: false,
imports_not_used_as_values: ImportsNotUsedAsValues::Remove,
inline_source_map: true,
jsx_factory: "React.createElement".into(),
jsx_fragment_factory: "React.Fragment".into(),
transform_jsx: true,
}
}
}
impl From<tsc_config::TsConfig> for EmitOptions {
fn from(config: tsc_config::TsConfig) -> Self {
let options: tsc_config::EmitConfigOptions =
serde_json::from_value(config.0).unwrap();
let imports_not_used_as_values =
match options.imports_not_used_as_values.as_str() {
"preserve" => ImportsNotUsedAsValues::Preserve,
"error" => ImportsNotUsedAsValues::Error,
_ => ImportsNotUsedAsValues::Remove,
};
EmitOptions {
check_js: options.check_js,
emit_metadata: options.emit_decorator_metadata,
imports_not_used_as_values,
inline_source_map: options.inline_source_map,
jsx_factory: options.jsx_factory,
jsx_fragment_factory: options.jsx_fragment_factory,
transform_jsx: options.jsx == "react",
}
}
}
fn strip_config_from_emit_options(
options: &EmitOptions,
) -> typescript::strip::Config {
typescript::strip::Config {
import_not_used_as_values: match options.imports_not_used_as_values {
ImportsNotUsedAsValues::Remove => {
typescript::strip::ImportsNotUsedAsValues::Remove
}
ImportsNotUsedAsValues::Preserve => {
typescript::strip::ImportsNotUsedAsValues::Preserve
}
// `Error` only affects the type-checking stage. Fall back to `Remove` here.
ImportsNotUsedAsValues::Error => {
typescript::strip::ImportsNotUsedAsValues::Remove
}
},
use_define_for_class_fields: true,
}
}
/// A logical structure to hold the value of a parsed module for further
/// processing.
#[derive(Clone)]
pub struct ParsedModule {
comments: SingleThreadedComments,
leading_comments: Vec<Comment>,
pub module: Module,
pub source_map: Rc<SourceMap>,
source_file: Rc<SourceFile>,
}
impl fmt::Debug for ParsedModule {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ParsedModule")
.field("comments", &self.comments)
.field("leading_comments", &self.leading_comments)
.field("module", &self.module)
.finish()
}
}
impl ParsedModule {
/// Return a vector of dependencies for the module.
pub fn analyze_dependencies(&self) -> Vec<DependencyDescriptor> {
analyze_dependencies(&self.module, &self.source_map, &self.comments)
}
/// Get the module's leading comments, where triple slash directives might
/// be located.
pub fn get_leading_comments(&self) -> Vec<Comment> {
self.leading_comments.clone()
}
/// Get a location for a given span within the module.
pub fn get_location(&self, span: &Span) -> Location {
self.source_map.lookup_char_pos(span.lo).into()
}
/// Transform a TypeScript file into a JavaScript file, based on the supplied
/// options.
///
/// The result is a tuple of the code and optional source map as strings.
pub fn transpile(
self,
options: &EmitOptions,
) -> Result<(String, Option<String>), AnyError> {
let program = Program::Module(self.module);
let jsx_pass = react::react(
self.source_map.clone(),
Some(&self.comments),
react::Options {
pragma: options.jsx_factory.clone(),
pragma_frag: options.jsx_fragment_factory.clone(),
// this will use `Object.assign()` instead of the `_extends` helper
// when spreading props.
use_builtins: true,
..Default::default()
},
);
let mut passes = chain!(
Optional::new(jsx_pass, options.transform_jsx),
proposals::decorators::decorators(proposals::decorators::Config {
legacy: true,
emit_metadata: options.emit_metadata
}),
helpers::inject_helpers(),
typescript::strip::strip_with_config(strip_config_from_emit_options(
options
)),
fixer(Some(&self.comments)),
hygiene(),
);
let program = swc_common::GLOBALS.set(&Globals::new(), || {
helpers::HELPERS.set(&helpers::Helpers::new(false), || {
program.fold_with(&mut passes)
})
});
let mut src_map_buf = vec![];
let mut buf = vec![];
{
let writer = Box::new(JsWriter::new(
self.source_map.clone(),
"\n",
&mut buf,
Some(&mut src_map_buf),
));
let config = swc_ecmascript::codegen::Config { minify: false };
let mut emitter = swc_ecmascript::codegen::Emitter {
cfg: config,
comments: Some(&self.comments),
cm: self.source_map.clone(),
wr: writer,
};
program.emit_with(&mut emitter)?;
}
let mut src = String::from_utf8(buf)?;
let mut map: Option<String> = None;
{
let mut buf = Vec::new();
self
.source_map
.build_source_map_from(&mut src_map_buf, None)
.to_writer(&mut buf)?;
if options.inline_source_map {
src.push_str("//# sourceMappingURL=data:application/json;base64,");
let encoded_map = base64::encode(buf);
src.push_str(&encoded_map);
} else {
map = Some(String::from_utf8(buf)?);
}
}
Ok((src, map))
}
}
pub fn parse_with_source_map(
specifier: &str,
source: &str,
media_type: &MediaType,
source_map: Rc<SourceMap>,
) -> Result<ParsedModule, AnyError> {
let source_file = source_map.new_source_file(
FileName::Custom(specifier.to_string()),
source.to_string(),
);
let error_buffer = ErrorBuffer::default();
let syntax = get_syntax(media_type);
let input = StringInput::from(&*source_file);
let comments = SingleThreadedComments::default();
let handler = Handler::with_emitter_and_flags(
Box::new(error_buffer.clone()),
HandlerFlags {
can_emit_warnings: true,
dont_buffer_diagnostics: true,
..HandlerFlags::default()
},
);
let lexer = Lexer::new(syntax, TARGET, input, Some(&comments));
let mut parser = swc_ecmascript::parser::Parser::new_from(lexer);
let sm = &source_map;
let module = parser.parse_module().map_err(move |err| {
let mut diagnostic = err.into_diagnostic(&handler);
diagnostic.emit();
DiagnosticBuffer::from_error_buffer(error_buffer, |span| {
sm.lookup_char_pos(span.lo)
})
})?;
let leading_comments =
comments.with_leading(module.span.lo, |comments| comments.to_vec());
Ok(ParsedModule {
comments,
leading_comments,
module,
source_map,
source_file,
})
}
/// For a given specifier, source, and media type, parse the source of the
/// module and return a representation which can be further processed.
///
/// # Arguments
///
/// - `specifier` - The module specifier for the module.
/// - `source` - The source code for the module.
/// - `media_type` - The media type for the module.
///
// NOTE(bartlomieju): `specifier` has `&str` type instead of
// `&ModuleSpecifier` because runtime compiler APIs don't
// require valid module specifiers
pub fn parse(
specifier: &str,
source: &str,
media_type: &MediaType,
) -> Result<ParsedModule, AnyError> {
let source_map = Rc::new(SourceMap::default());
parse_with_source_map(specifier, source, media_type, source_map)
}
pub enum TokenOrComment {
Token(Token),
Comment { kind: CommentKind, text: String },
}
pub struct LexedItem {
pub span: Span,
pub inner: TokenOrComment,
}
impl LexedItem {
pub fn span_as_range(&self) -> Range<usize> {
self.span.lo.0 as usize..self.span.hi.0 as usize
}
}
fn flatten_comments(
comments: SingleThreadedComments,
) -> impl Iterator<Item = Comment> {
let (leading, trailing) = comments.take_all();
let mut comments = (*leading).clone().into_inner();
comments.extend((*trailing).clone().into_inner());
comments.into_iter().flat_map(|el| el.1)
}
pub fn lex(
specifier: &str,
source: &str,
media_type: &MediaType,
) -> Vec<LexedItem> {
let source_map = SourceMap::default();
let source_file = source_map.new_source_file(
FileName::Custom(specifier.to_string()),
source.to_string(),
);
let comments = SingleThreadedComments::default();
let lexer = Lexer::new(
get_syntax(media_type),
TARGET,
StringInput::from(source_file.as_ref()),
Some(&comments),
);
let mut tokens: Vec<LexedItem> = lexer
.map(|token| LexedItem {
span: token.span,
inner: TokenOrComment::Token(token.token),
})
.collect();
tokens.extend(flatten_comments(comments).map(|comment| LexedItem {
span: comment.span,
inner: TokenOrComment::Comment {
kind: comment.kind,
text: comment.text,
},
}));
tokens.sort_by_key(|item| item.span.lo.0);
tokens
}
/// A low level function which transpiles a source module into an swc
/// SourceFile.
pub fn transpile_module(
filename: &str,
src: &str,
media_type: &MediaType,
emit_options: &EmitOptions,
globals: &Globals,
cm: Rc<SourceMap>,
) -> Result<(Rc<SourceFile>, Module), AnyError> {
let parsed_module =
parse_with_source_map(filename, src, media_type, cm.clone())?;
let jsx_pass = react::react(
cm,
Some(&parsed_module.comments),
react::Options {
pragma: emit_options.jsx_factory.clone(),
pragma_frag: emit_options.jsx_fragment_factory.clone(),
// this will use `Object.assign()` instead of the `_extends` helper
// when spreading props.
use_builtins: true,
..Default::default()
},
);
let mut passes = chain!(
Optional::new(jsx_pass, emit_options.transform_jsx),
proposals::decorators::decorators(proposals::decorators::Config {
legacy: true,
emit_metadata: emit_options.emit_metadata
}),
helpers::inject_helpers(),
typescript::strip::strip_with_config(strip_config_from_emit_options(
emit_options
)),
fixer(Some(&parsed_module.comments)),
);
let source_file = parsed_module.source_file.clone();
let module = parsed_module.module;
let module = swc_common::GLOBALS.set(globals, || {
helpers::HELPERS.set(&helpers::Helpers::new(false), || {
module.fold_with(&mut passes)
})
});
Ok((source_file, module))
}
pub struct BundleHook;
impl swc_bundler::Hook for BundleHook {
fn get_import_meta_props(
&self,
span: swc_common::Span,
module_record: &swc_bundler::ModuleRecord,
) -> Result<Vec<swc_ecmascript::ast::KeyValueProp>, AnyError> {
use swc_ecmascript::ast;
// we use custom file names, and swc "wraps" these in `<` and `>` so, we
// want to strip those back out.
let mut value = module_record.file_name.to_string();
value.pop();
value.remove(0);
Ok(vec![
ast::KeyValueProp {
key: ast::PropName::Ident(ast::Ident::new("url".into(), span)),
value: Box::new(ast::Expr::Lit(ast::Lit::Str(ast::Str {
span,
value: value.into(),
kind: ast::StrKind::Synthesized,
has_escape: false,
}))),
},
ast::KeyValueProp {
key: ast::PropName::Ident(ast::Ident::new("main".into(), span)),
value: Box::new(if module_record.is_entry {
ast::Expr::Member(ast::MemberExpr {
span,
obj: ast::ExprOrSuper::Expr(Box::new(ast::Expr::MetaProp(
ast::MetaPropExpr {
meta: ast::Ident::new("import".into(), span),
prop: ast::Ident::new("meta".into(), span),
},
))),
prop: Box::new(ast::Expr::Ident(ast::Ident::new(
"main".into(),
span,
))),
computed: false,
})
} else {
ast::Expr::Lit(ast::Lit::Bool(ast::Bool { span, value: false }))
}),
},
])
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use swc_ecmascript::dep_graph::DependencyKind;
#[test]
fn test_parsed_module_analyze_dependencies() {
let specifier = resolve_url_or_path("https://deno.land/x/mod.js").unwrap();
let source = r#"import * as bar from "./test.ts";
const foo = await import("./foo.ts");
"#;
let parsed_module =
parse(specifier.as_str(), source, &MediaType::JavaScript)
.expect("could not parse module");
let actual = parsed_module.analyze_dependencies();
assert_eq!(
actual,
vec![
DependencyDescriptor {
kind: DependencyKind::Import,
is_dynamic: false,
leading_comments: Vec::new(),
col: 0,
line: 1,
specifier: "./test.ts".into(),
specifier_col: 21,
specifier_line: 1,
import_assertions: HashMap::default(),
},
DependencyDescriptor {
kind: DependencyKind::Import,
is_dynamic: true,
leading_comments: Vec::new(),
col: 22,
line: 2,
specifier: "./foo.ts".into(),
specifier_col: 29,
specifier_line: 2,
import_assertions: HashMap::default(),
}
]
);
}
#[test]
fn test_transpile() {
let specifier = resolve_url_or_path("https://deno.land/x/mod.ts")
.expect("could not resolve specifier");
let source = r#"
enum D {
A,
B,
C,
}
export class A {
private b: string;
protected c: number = 1;
e: "foo";
constructor (public d = D.A) {
const e = "foo" as const; | "#;
let module = parse(specifier.as_str(), source, &MediaType::TypeScript)
.expect("could not parse module");
let (code, maybe_map) = module
.transpile(&EmitOptions::default())
.expect("could not strip types");
assert!(code.starts_with("var D;\n(function(D) {\n"));
assert!(
code.contains("\n//# sourceMappingURL=data:application/json;base64,")
);
assert!(maybe_map.is_none());
}
#[test]
fn test_transpile_tsx() {
let specifier = resolve_url_or_path("https://deno.land/x/mod.ts")
.expect("could not resolve specifier");
let source = r#"
export class A {
render() {
return <div><span></span></div>
}
}
"#;
let module = parse(specifier.as_str(), source, &MediaType::Tsx)
.expect("could not parse module");
let (code, _) = module
.transpile(&EmitOptions::default())
.expect("could not strip types");
assert!(code.contains("React.createElement(\"div\", null"));
}
#[test]
fn test_transpile_decorators() {
let specifier = resolve_url_or_path("https://deno.land/x/mod.ts")
.expect("could not resolve specifier");
let source = r#"
function enumerable(value: boolean) {
return function (
_target: any,
_propertyKey: string,
descriptor: PropertyDescriptor,
) {
descriptor.enumerable = value;
};
}
export class A {
@enumerable(false)
a() {
Test.value;
}
}
"#;
let module = parse(specifier.as_str(), source, &MediaType::TypeScript)
.expect("could not parse module");
let (code, _) = module
.transpile(&EmitOptions::default())
.expect("could not strip types");
assert!(code.contains("_applyDecoratedDescriptor("));
}
} | this.e = e;
}
} |
sim.rs | use fighter_simulator::*;
use rayon::prelude::*;
use std::{sync::atomic::*, time::Instant};
fn main() {
const FIGHT_COUNT: usize = 1000;
let time = Instant::now();
let fighters = gen_fighters();
let results = {
let mut v = Vec::with_capacity(fighters.len());
for _ in 0..fighters.len() {
v.push((
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
));
}
v
};
println!("Simulating {} fighters.", fighters.len());
fighters.par_iter().enumerate().for_each(|(i1, f1)| {
for (i2, f2) in (i1 + 1..fighters.len()).map(|i2| (i2, &fighters[i2])) {
for _ in 0..FIGHT_COUNT {
let fight = Fight::new(f1, f2);
let mut report = WinnerLogger { winner: None };
fight.run(&mut report);
if let Some(winner) = report.winner {
if winner as *const _ == f1 as *const _ {
results[i1].0.fetch_add(1, Ordering::Relaxed);
results[i2].2.fetch_add(1, Ordering::Relaxed);
} else {
results[i2].0.fetch_add(1, Ordering::Relaxed);
results[i1].2.fetch_add(1, Ordering::Relaxed);
}
} else {
results[i1].1.fetch_add(1, Ordering::Relaxed);
results[i2].1.fetch_add(1, Ordering::Relaxed);
}
}
}
});
let fight_count = (fighters.len() - 1) * FIGHT_COUNT;
let mut final_results = fighters
.iter()
.zip(
results
.into_iter()
.map(|(w, t, l)| (w.into_inner(), t.into_inner(), l.into_inner())),
)
.collect::<Vec<_>>();
final_results.sort_by_key(|&(_, (w, _, _))| w);
let final_time = Instant::now() - time;
println!("health,skill,speed,strength,resist,wins,ties,losses");
for (f, (w, t, l)) in final_results {
assert!(w + t + l == fight_count);
//let win_rate = (w as f64) / (fight_count as f64) * 100.0;
println!("{},{},{},{}", f.name(), w, t, l);
}
println!("{:?}", final_time);
}
fn gen_fighters() -> Vec<Fighter> {
let mut fighters = Vec::new();
for health in 0..MAX_STAT_POINTS {
for skill in 0..MAX_STAT_POINTS {
for speed in 0..MAX_STAT_POINTS {
for strength in 0..MAX_STAT_POINTS {
for resist in 0..MAX_STAT_POINTS {
let name =
format!("{},{},{},{},{}", health, skill, speed, strength, resist);
if let Ok(fighter) =
Fighter::new(name, health, skill, speed, strength, resist)
{
fighters.push(fighter);
}
}
}
}
}
}
fighters
}
struct WinnerLogger<'a> {
winner: Option<&'a Fighter>,
}
impl<'a> FightObserver<'a> for WinnerLogger<'a> {
fn attack_starting(&mut self, _: &'a Fighter, _: &'a Fighter) {}
fn rolls(&mut self, _: &[StatValue]) |
fn adjusts(&mut self, _: &[StatValue]) {}
fn finalize_attack(&mut self, _: StatValue, _: SignedStatValue) {}
fn winner(&mut self, winner: Option<&'a Fighter>) {
self.winner = winner;
}
}
| {} |
repo.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"encoding/base64"
"errors"
"fmt"
"github.com/gogits/git"
"path"
"path/filepath"
"strings"
"github.com/go-martini/martini"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/auth"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/middleware"
)
func Create(ctx *middleware.Context) {
ctx.Data["Title"] = "Create repository"
ctx.Data["PageIsNewRepo"] = true
ctx.Data["LanguageIgns"] = models.LanguageIgns
ctx.Data["Licenses"] = models.Licenses
ctx.HTML(200, "repo/create")
}
func CreatePost(ctx *middleware.Context, form auth.CreateRepoForm) {
ctx.Data["Title"] = "Create repository"
ctx.Data["PageIsNewRepo"] = true
ctx.Data["LanguageIgns"] = models.LanguageIgns
ctx.Data["Licenses"] = models.Licenses
if ctx.HasError() {
ctx.HTML(200, "repo/create")
return
}
repo, err := models.CreateRepository(ctx.User, form.RepoName, form.Description,
form.Language, form.License, form.Private, false, form.InitReadme)
if err == nil {
log.Trace("%s Repository created: %s/%s", ctx.Req.RequestURI, ctx.User.LowerName, form.RepoName)
ctx.Redirect("/" + ctx.User.Name + "/" + form.RepoName)
return
} else if err == models.ErrRepoAlreadyExist {
ctx.RenderWithErr("Repository name has already been used", "repo/create", &form)
return
} else if err == models.ErrRepoNameIllegal {
ctx.RenderWithErr(models.ErrRepoNameIllegal.Error(), "repo/create", &form)
return
}
if repo != nil {
if errDelete := models.DeleteRepository(ctx.User.Id, repo.Id, ctx.User.Name); errDelete != nil {
log.Error("repo.MigratePost(CreatePost): %v", errDelete)
}
}
ctx.Handle(500, "repo.Create", err)
}
func Migrate(ctx *middleware.Context) {
ctx.Data["Title"] = "Migrate repository"
ctx.Data["PageIsNewRepo"] = true
ctx.HTML(200, "repo/migrate")
}
func MigratePost(ctx *middleware.Context, form auth.MigrateRepoForm) {
ctx.Data["Title"] = "Migrate repository"
ctx.Data["PageIsNewRepo"] = true
if ctx.HasError() {
ctx.HTML(200, "repo/migrate")
return
}
url := strings.Replace(form.Url, "://", fmt.Sprintf("://%s:%s@", form.AuthUserName, form.AuthPasswd), 1)
repo, err := models.MigrateRepository(ctx.User, form.RepoName, form.Description, form.Private,
form.Mirror, url)
if err == nil {
log.Trace("%s Repository migrated: %s/%s", ctx.Req.RequestURI, ctx.User.LowerName, form.RepoName)
ctx.Redirect("/" + ctx.User.Name + "/" + form.RepoName)
return
} else if err == models.ErrRepoAlreadyExist {
ctx.RenderWithErr("Repository name has already been used", "repo/migrate", &form)
return
} else if err == models.ErrRepoNameIllegal {
ctx.RenderWithErr(models.ErrRepoNameIllegal.Error(), "repo/migrate", &form)
return
}
if repo != nil {
if errDelete := models.DeleteRepository(ctx.User.Id, repo.Id, ctx.User.Name); errDelete != nil {
log.Error("repo.MigratePost(DeleteRepository): %v", errDelete)
}
}
if strings.Contains(err.Error(), "Authentication failed") {
ctx.RenderWithErr(err.Error(), "repo/migrate", &form)
return
}
ctx.Handle(500, "repo.Migrate", err)
}
func Single(ctx *middleware.Context, params martini.Params) {
branchName := ctx.Repo.BranchName
userName := ctx.Repo.Owner.Name
repoName := ctx.Repo.Repository.Name
repoLink := ctx.Repo.RepoLink
branchLink := ctx.Repo.RepoLink + "/src/" + branchName
rawLink := ctx.Repo.RepoLink + "/raw/" + branchName
// Get tree path
treename := params["_1"]
if len(treename) > 0 && treename[len(treename)-1] == '/' {
ctx.Redirect(repoLink + "/src/" + branchName + "/" + treename[:len(treename)-1])
return
}
ctx.Data["IsRepoToolbarSource"] = true
isViewBranch := ctx.Repo.IsBranch
ctx.Data["IsViewBranch"] = isViewBranch
treePath := treename
if len(treePath) != 0 {
treePath = treePath + "/"
}
entry, err := ctx.Repo.Commit.GetTreeEntryByPath(treename)
if err != nil && err != git.ErrNotExist {
ctx.Handle(404, "repo.Single(GetTreeEntryByPath)", err)
return
}
if len(treename) != 0 && entry == nil {
ctx.Handle(404, "repo.Single", nil)
return
}
if entry != nil && !entry.IsDir() {
blob := entry.Blob()
if data, err := blob.Data(); err != nil {
ctx.Handle(404, "repo.Single(blob.Data)", err)
} else {
ctx.Data["FileSize"] = blob.Size()
ctx.Data["IsFile"] = true
ctx.Data["FileName"] = blob.Name()
ext := path.Ext(blob.Name())
if len(ext) > 0 {
ext = ext[1:]
}
ctx.Data["FileExt"] = ext
ctx.Data["FileLink"] = rawLink + "/" + treename
_, isTextFile := base.IsTextFile(data)
_, isImageFile := base.IsImageFile(data)
ctx.Data["FileIsText"] = isTextFile
if isImageFile {
ctx.Data["IsImageFile"] = true
} else {
readmeExist := base.IsMarkdownFile(blob.Name()) || base.IsReadmeFile(blob.Name())
ctx.Data["ReadmeExist"] = readmeExist
if readmeExist {
ctx.Data["FileContent"] = string(base.RenderMarkdown(data, ""))
} else {
if isTextFile |
}
}
}
} else {
// Directory and file list.
tree, err := ctx.Repo.Commit.SubTree(treename)
if err != nil {
ctx.Handle(404, "repo.Single(SubTree)", err)
return
}
entries := tree.ListEntries()
entries.Sort()
files := make([][]interface{}, 0, len(entries))
for _, te := range entries {
c, err := ctx.Repo.Commit.GetCommitOfRelPath(filepath.Join(treePath, te.Name()))
if err != nil {
ctx.Handle(404, "repo.Single(SubTree)", err)
return
}
files = append(files, []interface{}{te, c})
}
ctx.Data["Files"] = files
var readmeFile *git.Blob
for _, f := range entries {
if f.IsDir() || !base.IsReadmeFile(f.Name()) {
continue
} else {
readmeFile = f.Blob()
break
}
}
if readmeFile != nil {
ctx.Data["ReadmeInSingle"] = true
ctx.Data["ReadmeExist"] = true
if data, err := readmeFile.Data(); err != nil {
ctx.Handle(404, "repo.Single(readmeFile.LookupBlob)", err)
return
} else {
ctx.Data["FileSize"] = readmeFile.Size
ctx.Data["FileLink"] = rawLink + "/" + treename
_, isTextFile := base.IsTextFile(data)
ctx.Data["FileIsText"] = isTextFile
ctx.Data["FileName"] = readmeFile.Name()
if isTextFile {
ctx.Data["FileContent"] = string(base.RenderMarkdown(data, branchLink))
}
}
}
}
ctx.Data["Username"] = userName
ctx.Data["Reponame"] = repoName
var treenames []string
Paths := make([]string, 0)
if len(treename) > 0 {
treenames = strings.Split(treename, "/")
for i, _ := range treenames {
Paths = append(Paths, strings.Join(treenames[0:i+1], "/"))
}
ctx.Data["HasParentPath"] = true
if len(Paths)-2 >= 0 {
ctx.Data["ParentPath"] = "/" + Paths[len(Paths)-2]
}
}
ctx.Data["LastCommit"] = ctx.Repo.Commit
ctx.Data["Paths"] = Paths
ctx.Data["Treenames"] = treenames
ctx.Data["TreePath"] = treePath
ctx.Data["BranchLink"] = branchLink
ctx.HTML(200, "repo/single")
}
func basicEncode(username, password string) string {
auth := username + ":" + password
return base64.StdEncoding.EncodeToString([]byte(auth))
}
func basicDecode(encoded string) (user string, name string, err error) {
var s []byte
s, err = base64.StdEncoding.DecodeString(encoded)
if err != nil {
return
}
a := strings.Split(string(s), ":")
if len(a) == 2 {
user, name = a[0], a[1]
} else {
err = errors.New("decode failed")
}
return
}
func authRequired(ctx *middleware.Context) {
ctx.ResponseWriter.Header().Set("WWW-Authenticate", "Basic realm=\".\"")
ctx.Data["ErrorMsg"] = "no basic auth and digit auth"
ctx.HTML(401, fmt.Sprintf("status/401"))
}
func Setting(ctx *middleware.Context, params martini.Params) {
if !ctx.Repo.IsOwner {
ctx.Handle(404, "repo.Setting", nil)
return
}
ctx.Data["IsRepoToolbarSetting"] = true
var title string
if t, ok := ctx.Data["Title"].(string); ok {
title = t
}
ctx.Data["Title"] = title + " - settings"
ctx.HTML(200, "repo/setting")
}
func SettingPost(ctx *middleware.Context) {
if !ctx.Repo.IsOwner {
ctx.Error(404)
return
}
ctx.Data["IsRepoToolbarSetting"] = true
switch ctx.Query("action") {
case "update":
newRepoName := ctx.Query("name")
// Check if repository name has been changed.
if ctx.Repo.Repository.Name != newRepoName {
isExist, err := models.IsRepositoryExist(ctx.Repo.Owner, newRepoName)
if err != nil {
ctx.Handle(500, "repo.SettingPost(update: check existence)", err)
return
} else if isExist {
ctx.RenderWithErr("Repository name has been taken in your repositories.", "repo/setting", nil)
return
} else if err = models.ChangeRepositoryName(ctx.Repo.Owner.Name, ctx.Repo.Repository.Name, newRepoName); err != nil {
ctx.Handle(500, "repo.SettingPost(change repository name)", err)
return
}
log.Trace("%s Repository name changed: %s/%s -> %s", ctx.Req.RequestURI, ctx.User.Name, ctx.Repo.Repository.Name, newRepoName)
ctx.Repo.Repository.Name = newRepoName
}
br := ctx.Query("branch")
if git.IsBranchExist(models.RepoPath(ctx.User.Name, ctx.Repo.Repository.Name), br) {
ctx.Repo.Repository.DefaultBranch = br
}
ctx.Repo.Repository.Description = ctx.Query("desc")
ctx.Repo.Repository.Website = ctx.Query("site")
ctx.Repo.Repository.IsPrivate = ctx.Query("private") == "on"
ctx.Repo.Repository.IsGoget = ctx.Query("goget") == "on"
if err := models.UpdateRepository(ctx.Repo.Repository); err != nil {
ctx.Handle(404, "repo.SettingPost(update)", err)
return
}
log.Trace("%s Repository updated: %s/%s", ctx.Req.RequestURI, ctx.Repo.Owner.Name, ctx.Repo.Repository.Name)
if ctx.Repo.Repository.IsMirror {
if len(ctx.Query("interval")) > 0 {
var err error
ctx.Repo.Mirror.Interval, err = base.StrTo(ctx.Query("interval")).Int()
if err != nil {
log.Error("repo.SettingPost(get mirror interval): %v", err)
} else if err = models.UpdateMirror(ctx.Repo.Mirror); err != nil {
log.Error("repo.SettingPost(UpdateMirror): %v", err)
}
}
}
ctx.Flash.Success("Repository options has been successfully updated.")
ctx.Redirect(fmt.Sprintf("/%s/%s/settings", ctx.Repo.Owner.Name, ctx.Repo.Repository.Name))
case "transfer":
if len(ctx.Repo.Repository.Name) == 0 || ctx.Repo.Repository.Name != ctx.Query("repository") {
ctx.RenderWithErr("Please make sure you entered repository name is correct.", "repo/setting", nil)
return
}
newOwner := ctx.Query("owner")
// Check if new owner exists.
isExist, err := models.IsUserExist(newOwner)
if err != nil {
ctx.Handle(500, "repo.SettingPost(transfer: check existence)", err)
return
} else if !isExist {
ctx.RenderWithErr("Please make sure you entered owner name is correct.", "repo/setting", nil)
return
} else if err = models.TransferOwnership(ctx.User, newOwner, ctx.Repo.Repository); err != nil {
ctx.Handle(500, "repo.SettingPost(transfer repository)", err)
return
}
log.Trace("%s Repository transfered: %s/%s -> %s", ctx.Req.RequestURI, ctx.User.Name, ctx.Repo.Repository.Name, newOwner)
ctx.Redirect("/")
case "delete":
if len(ctx.Repo.Repository.Name) == 0 || ctx.Repo.Repository.Name != ctx.Query("repository") {
ctx.RenderWithErr("Please make sure you entered repository name is correct.", "repo/setting", nil)
return
}
if err := models.DeleteRepository(ctx.User.Id, ctx.Repo.Repository.Id, ctx.User.LowerName); err != nil {
ctx.Handle(500, "repo.Delete", err)
return
}
log.Trace("%s Repository deleted: %s/%s", ctx.Req.RequestURI, ctx.User.LowerName, ctx.Repo.Repository.LowerName)
ctx.Redirect("/")
}
}
func Action(ctx *middleware.Context, params martini.Params) {
var err error
switch params["action"] {
case "watch":
err = models.WatchRepo(ctx.User.Id, ctx.Repo.Repository.Id, true)
case "unwatch":
err = models.WatchRepo(ctx.User.Id, ctx.Repo.Repository.Id, false)
case "desc":
if !ctx.Repo.IsOwner {
ctx.Error(404)
return
}
ctx.Repo.Repository.Description = ctx.Query("desc")
ctx.Repo.Repository.Website = ctx.Query("site")
err = models.UpdateRepository(ctx.Repo.Repository)
}
if err != nil {
log.Error("repo.Action(%s): %v", params["action"], err)
ctx.JSON(200, map[string]interface{}{
"ok": false,
"err": err.Error(),
})
return
}
ctx.JSON(200, map[string]interface{}{
"ok": true,
})
}
| {
ctx.Data["FileContent"] = string(data)
} |
store.rs | use crate::{
aggregate::{Aggregate, AggregateEvent, AggregateId},
types::{
CqrsError, EventNumber, Precondition, Since, SnapshotRecommendation, Version,
VersionedAggregate, VersionedEvent,
},
};
/// A source for reading/loading events.
pub trait EventSource<A, E>
where
A: Aggregate,
E: AggregateEvent<A>,
{
/// Represents the sequence of events read from the event source.
type Events: IntoIterator<Item = VersionedEvent<E>>;
/// The error type.
type Error: CqrsError;
/// Reads events from the event source for a given identifier.
///
/// Only loads events after the event number provided in `since` (See [Since]), and will only load a maximum of
/// `max_count` events, if given. If not given, will read all remaining events.
fn read_events<I>(
&self,
id: &I,
since: Since,
max_count: Option<u64>,
) -> Result<Option<Self::Events>, Self::Error>
where
I: AggregateId<A>;
}
/// A sink for writing/persisting events with associated metadata.
pub trait EventSink<A, E, M>
where
A: Aggregate,
E: AggregateEvent<A>,
{
/// The error type.
type Error: CqrsError;
/// Appends events to a given source, with an optional precondition, and associated metadata.
///
/// The associated metadata is applied to all events in the append group.
fn append_events<I>(
&self,
id: &I,
events: &[E],
precondition: Option<Precondition>,
metadata: M,
) -> Result<EventNumber, Self::Error>
where
I: AggregateId<A>;
}
/// A source for reading/loading snapshots of aggregates.
pub trait SnapshotSource<A>
where
A: Aggregate,
{
/// The error type.
type Error: CqrsError;
/// Loads a versioned aggregate from the snapshot source.
fn get_snapshot<I>(&self, id: &I) -> Result<Option<VersionedAggregate<A>>, Self::Error>
where
I: AggregateId<A>;
}
/// A sink for writing/persisting snapshots of aggregates.
pub trait SnapshotSink<A>
where
A: Aggregate,
{
/// The error type.
type Error: CqrsError;
/// Writes an aggregate with its version to the sink. Returns the version number of the latest snapshot.
fn persist_snapshot<I>(
&self,
id: &I,
aggregate: &A,
version: Version,
last_snapshot_version: Option<Version>,
) -> Result<Version, Self::Error>
where
I: AggregateId<A>;
}
/// A strategy determining when to recommend a snapshot be taken.
pub trait SnapshotStrategy {
/// Gives the sink's recommendation on whether or not to perform a snapshot
fn snapshot_recommendation(
&self,
version: Version,
last_snapshot_version: Option<Version>,
) -> SnapshotRecommendation;
}
/// A snapshot strategy that will never recommend taking a snapshot.
#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq)]
pub struct NeverSnapshot;
impl SnapshotStrategy for NeverSnapshot {
fn snapshot_recommendation(&self, _: Version, _: Option<Version>) -> SnapshotRecommendation {
SnapshotRecommendation::DoNotSnapshot
}
}
/// A snapshot strategy that will always recommend taking a snapshot.
#[derive(Clone, Copy, Debug, Default, Hash, PartialEq, Eq)]
pub struct AlwaysSnapshot;
impl SnapshotStrategy for AlwaysSnapshot {
fn snapshot_recommendation(&self, _: Version, _: Option<Version>) -> SnapshotRecommendation {
SnapshotRecommendation::ShouldSnapshot
}
}
| use void::Void;
/// A test aggregate with no state
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct TestAggregate;
/// A test event with no data
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct TestEvent;
/// A test command with no data
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct TestCommand;
/// A test metadata with no data
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct TestMetadata;
impl Aggregate for TestAggregate {
fn aggregate_type() -> &'static str {
"test"
}
}
impl AggregateEvent<TestAggregate> for TestEvent {
fn apply_to(self, _aggregate: &mut TestAggregate) {}
}
impl AggregateCommand<TestAggregate> for TestCommand {
type Error = Void;
type Event = TestEvent;
type Events = Vec<TestEvent>;
fn execute_on(self, _aggregate: &TestAggregate) -> Result<Self::Events, Self::Error> {
Ok(Vec::default())
}
}
impl Event for TestEvent {
fn event_type(&self) -> &'static str {
"test"
}
}
} | #[cfg(test)]
mod tests {
use super::*;
use crate::{AggregateCommand, AggregateEvent, Event}; |
main.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"strings"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/prometheus/common/log"
//cni "github.com/containernetworking/cni/pkg/types"
networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"
nmstate "github.com/nmstate/kubernetes-nmstate/api/v1alpha1"
virtv1 "kubevirt.io/client-go/api/v1"
metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
machinev1beta1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1"
sriovnetworkv1 "github.com/openshift/sriov-network-operator/api/v1"
ospdirectorv1beta1 "github.com/openstack-k8s-operators/osp-director-operator/api/v1beta1"
"github.com/openstack-k8s-operators/osp-director-operator/controllers"
//cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
//templatev1 "github.com/openshift/api/template/v1"
// +kubebuilder:scaffold:imports
)
const (
// WebhookPort -
WebhookPort = 4343
// WebhookCertDir -
WebhookCertDir = "/apiserver.local.config/certificates"
// WebhookCertName -
WebhookCertName = "apiserver.crt"
// WebhookKeyName -
WebhookKeyName = "apiserver.key"
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(ospdirectorv1beta1.AddToScheme(scheme))
//utilruntime.Must(templatev1.AddToScheme(scheme))
utilruntime.Must(virtv1.AddToScheme(scheme))
utilruntime.Must(nmstate.AddToScheme(scheme))
utilruntime.Must(networkv1.AddToScheme(scheme))
//utilruntime.Must(cdiv1.AddToScheme(scheme))
utilruntime.Must(metal3v1alpha1.AddToScheme(scheme))
utilruntime.Must(machinev1beta1.AddToScheme(scheme))
utilruntime.Must(sriovnetworkv1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() |
// getWatchNamespace returns the Namespace the operator should be watching for changes
func getWatchNamespace() (string, error) {
// WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE
// which specifies the Namespace to watch.
// An empty value means the operator is running with cluster scope.
var watchNamespaceEnvVar = "WATCH_NAMESPACE"
ns, found := os.LookupEnv(watchNamespaceEnvVar)
if !found {
return "", fmt.Errorf("%s must be set", watchNamespaceEnvVar)
}
return ns, nil
}
| {
var metricsAddr string
var enableLeaderElection bool
var enableWebhooks bool
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
namespace, err := getWatchNamespace()
if err != nil {
setupLog.Error(err, "failed to get WatchNamespace")
os.Exit(1)
}
options := ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "576d6738.openstack.org",
}
// create multi namespace cache if list of namespaces
if strings.Contains(namespace, ",") {
options.Namespace = ""
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
log.Info(fmt.Sprintf("Namespaces added to the cache: %s", namespace))
} else {
options.Namespace = namespace
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options)
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "")
os.Exit(1)
}
kclient, err := kubernetes.NewForConfig(cfg)
if err != nil {
log.Error(err, "")
os.Exit(1)
}
if strings.ToLower(os.Getenv("ENABLE_WEBHOOKS")) != "false" {
enableWebhooks = true
// We're just getting a pointer here and overriding the default values
srv := mgr.GetWebhookServer()
srv.CertDir = WebhookCertDir
srv.CertName = WebhookCertName
srv.KeyName = WebhookKeyName
srv.Port = WebhookPort
}
if err = (&controllers.OpenStackControlPlaneReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackControlPlane"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackControlPlane")
os.Exit(1)
}
if err = (&controllers.OpenStackVMSetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackVMSet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackVMSet")
os.Exit(1)
}
if err = (&controllers.OpenStackProvisionServerReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackProvisionServer"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackProvisionServer")
os.Exit(1)
}
if err = (&controllers.OpenStackBaremetalSetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackBaremetalSet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackBaremetalSet")
os.Exit(1)
}
if err = (&controllers.OpenStackClientReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackClient"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackClient")
os.Exit(1)
}
if err = (&controllers.OpenStackNetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackNet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackNet")
os.Exit(1)
}
if err = (&controllers.OpenStackIPSetReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackIPSet"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackIPSet")
os.Exit(1)
}
if err = (&controllers.OpenStackPlaybookGeneratorReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackPlaybookGenerator"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackPlaybookGenerator")
os.Exit(1)
}
if err = (&controllers.OpenStackEphemeralHeatReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackEphemeralHeat"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackEphemeralHeat")
os.Exit(1)
}
if err = (&controllers.OpenStackMACAddressReconciler{
Client: mgr.GetClient(),
Kclient: kclient,
Log: ctrl.Log.WithName("controllers").WithName("OpenStackMACAddress"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenStackMACAddress")
os.Exit(1)
}
if enableWebhooks {
if err = (&ospdirectorv1beta1.OpenStackBaremetalSet{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackBaremetalSet")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackControlPlane{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackControlPlane")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackVMSet{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackVMSet")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackNet{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackNet")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackEphemeralHeat{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackEphemeralHeat")
os.Exit(1)
}
if err = (&ospdirectorv1beta1.OpenStackProvisionServer{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenStackProvisionServer")
os.Exit(1)
}
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
} |
HealthServiceFamilyDoctorDrugDTO.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class HealthServiceFamilyDoctorDrugDTO(object):
def __init__(self):
self._catalogue_listed = None
self._dosage_forms = None
self._drug_classification = None
self._general_name = None
self._inventory = None
self._item_id = None
self._item_name = None
self._manufacturer_name = None
self._max_purchase_quantity = None
self._min_purchase_quantity = None
self._price = None
self._specifications = None
self._support_emergency_delivery = None
self._usage_dosage = None
@property
def catalogue_listed(self):
return self._catalogue_listed
@catalogue_listed.setter
def catalogue_listed(self, value):
self._catalogue_listed = value
@property
def dosage_forms(self):
return self._dosage_forms
@dosage_forms.setter
def dosage_forms(self, value):
self._dosage_forms = value
@property
def drug_classification(self):
return self._drug_classification
@drug_classification.setter
def drug_classification(self, value):
self._drug_classification = value
@property
def general_name(self):
return self._general_name
@general_name.setter
def general_name(self, value):
self._general_name = value
@property
def inventory(self):
return self._inventory
@inventory.setter
def inventory(self, value):
self._inventory = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def item_name(self):
return self._item_name
@item_name.setter
def item_name(self, value):
self._item_name = value
@property
def manufacturer_name(self):
return self._manufacturer_name
@manufacturer_name.setter
def manufacturer_name(self, value):
self._manufacturer_name = value
@property
def max_purchase_quantity(self):
return self._max_purchase_quantity
@max_purchase_quantity.setter
def max_purchase_quantity(self, value):
self._max_purchase_quantity = value
@property
def min_purchase_quantity(self):
return self._min_purchase_quantity
@min_purchase_quantity.setter
def min_purchase_quantity(self, value):
self._min_purchase_quantity = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def specifications(self):
return self._specifications
@specifications.setter
def specifications(self, value):
self._specifications = value
@property
def support_emergency_delivery(self):
return self._support_emergency_delivery
@support_emergency_delivery.setter
def support_emergency_delivery(self, value):
self._support_emergency_delivery = value
@property
def usage_dosage(self):
return self._usage_dosage
@usage_dosage.setter
def usage_dosage(self, value):
self._usage_dosage = value
def to_alipay_dict(self):
params = dict()
if self.catalogue_listed:
if hasattr(self.catalogue_listed, 'to_alipay_dict'):
params['catalogue_listed'] = self.catalogue_listed.to_alipay_dict()
else:
params['catalogue_listed'] = self.catalogue_listed
if self.dosage_forms:
if hasattr(self.dosage_forms, 'to_alipay_dict'):
params['dosage_forms'] = self.dosage_forms.to_alipay_dict()
else:
params['dosage_forms'] = self.dosage_forms
if self.drug_classification:
if hasattr(self.drug_classification, 'to_alipay_dict'):
params['drug_classification'] = self.drug_classification.to_alipay_dict()
else:
params['drug_classification'] = self.drug_classification
if self.general_name:
if hasattr(self.general_name, 'to_alipay_dict'):
params['general_name'] = self.general_name.to_alipay_dict()
else:
params['general_name'] = self.general_name | params['inventory'] = self.inventory
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.item_name:
if hasattr(self.item_name, 'to_alipay_dict'):
params['item_name'] = self.item_name.to_alipay_dict()
else:
params['item_name'] = self.item_name
if self.manufacturer_name:
if hasattr(self.manufacturer_name, 'to_alipay_dict'):
params['manufacturer_name'] = self.manufacturer_name.to_alipay_dict()
else:
params['manufacturer_name'] = self.manufacturer_name
if self.max_purchase_quantity:
if hasattr(self.max_purchase_quantity, 'to_alipay_dict'):
params['max_purchase_quantity'] = self.max_purchase_quantity.to_alipay_dict()
else:
params['max_purchase_quantity'] = self.max_purchase_quantity
if self.min_purchase_quantity:
if hasattr(self.min_purchase_quantity, 'to_alipay_dict'):
params['min_purchase_quantity'] = self.min_purchase_quantity.to_alipay_dict()
else:
params['min_purchase_quantity'] = self.min_purchase_quantity
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.specifications:
if hasattr(self.specifications, 'to_alipay_dict'):
params['specifications'] = self.specifications.to_alipay_dict()
else:
params['specifications'] = self.specifications
if self.support_emergency_delivery:
if hasattr(self.support_emergency_delivery, 'to_alipay_dict'):
params['support_emergency_delivery'] = self.support_emergency_delivery.to_alipay_dict()
else:
params['support_emergency_delivery'] = self.support_emergency_delivery
if self.usage_dosage:
if hasattr(self.usage_dosage, 'to_alipay_dict'):
params['usage_dosage'] = self.usage_dosage.to_alipay_dict()
else:
params['usage_dosage'] = self.usage_dosage
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = HealthServiceFamilyDoctorDrugDTO()
if 'catalogue_listed' in d:
o.catalogue_listed = d['catalogue_listed']
if 'dosage_forms' in d:
o.dosage_forms = d['dosage_forms']
if 'drug_classification' in d:
o.drug_classification = d['drug_classification']
if 'general_name' in d:
o.general_name = d['general_name']
if 'inventory' in d:
o.inventory = d['inventory']
if 'item_id' in d:
o.item_id = d['item_id']
if 'item_name' in d:
o.item_name = d['item_name']
if 'manufacturer_name' in d:
o.manufacturer_name = d['manufacturer_name']
if 'max_purchase_quantity' in d:
o.max_purchase_quantity = d['max_purchase_quantity']
if 'min_purchase_quantity' in d:
o.min_purchase_quantity = d['min_purchase_quantity']
if 'price' in d:
o.price = d['price']
if 'specifications' in d:
o.specifications = d['specifications']
if 'support_emergency_delivery' in d:
o.support_emergency_delivery = d['support_emergency_delivery']
if 'usage_dosage' in d:
o.usage_dosage = d['usage_dosage']
return o | if self.inventory:
if hasattr(self.inventory, 'to_alipay_dict'):
params['inventory'] = self.inventory.to_alipay_dict()
else: |
shared_mempool_test.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
mocks::MockSharedMempool,
shared_mempool::types::TransactionSummary,
tests::common::{batch_add_signed_txn, TestTransaction},
ConsensusRequest,
};
use diem_types::transaction::Transaction;
use futures::{channel::oneshot, executor::block_on, sink::SinkExt};
use mempool_notifications::MempoolNotificationSender;
use tokio::runtime::Builder;
#[test]
fn test_consensus_events_rejected_txns() {
let smp = MockSharedMempool::new(None);
// Add txns 1, 2, 3, 4
// Txn 1: committed successfully
// Txn 2: not committed but older than gc block timestamp
// Txn 3: not committed and newer than block timestamp
let committed_txn =
TestTransaction::new(0, 0, 1).make_signed_transaction_with_expiration_time(0);
let kept_txn = TestTransaction::new(1, 0, 1).make_signed_transaction(); // not committed or cleaned out by block timestamp gc
let txns = vec![
committed_txn.clone(),
TestTransaction::new(0, 1, 1).make_signed_transaction_with_expiration_time(0),
kept_txn.clone(),
];
// Add txns to mempool
{
let mut pool = smp.mempool.lock();
assert!(batch_add_signed_txn(&mut pool, txns).is_ok());
}
let transactions = vec![TransactionSummary {
sender: committed_txn.sender(),
sequence_number: committed_txn.sequence_number(),
}];
let (callback, callback_rcv) = oneshot::channel();
let req = ConsensusRequest::RejectNotification(transactions, callback);
let mut consensus_sender = smp.consensus_sender.clone();
block_on(async {
assert!(consensus_sender.send(req).await.is_ok());
assert!(callback_rcv.await.is_ok());
});
let mut pool = smp.mempool.lock();
let (timeline, _) = pool.read_timeline(0, 10);
assert_eq!(timeline.len(), 1);
assert_eq!(timeline.get(0).unwrap(), &kept_txn);
}
#[test]
fn test_mempool_notify_committed_txns() {
// Create runtime for the mempool notifier and listener
let runtime = Builder::new_multi_thread().enable_all().build().unwrap();
let _enter = runtime.enter();
// Create a new mempool notifier, listener and shared mempool
let (mempool_notifier, mempool_listener) =
mempool_notifications::new_mempool_notifier_listener_pair();
let smp = MockSharedMempool::new(Some(mempool_listener));
// Add txns 1, 2, 3, 4
// Txn 1: committed successfully
// Txn 2: not committed but older than gc block timestamp
// Txn 3: not committed and newer than block timestamp
let committed_txn =
TestTransaction::new(0, 0, 1).make_signed_transaction_with_expiration_time(0); | committed_txn.clone(),
TestTransaction::new(0, 1, 1).make_signed_transaction_with_expiration_time(0),
kept_txn.clone(),
];
// Add txns to mempool
{
let mut pool = smp.mempool.lock();
assert!(batch_add_signed_txn(&mut pool, txns).is_ok());
}
let committed_txns = vec![Transaction::UserTransaction(committed_txn)];
block_on(async {
assert!(mempool_notifier
.notify_new_commit(committed_txns, 1, 1000)
.await
.is_ok());
});
let mut pool = smp.mempool.lock();
let (timeline, _) = pool.read_timeline(0, 10);
assert_eq!(timeline.len(), 1);
assert_eq!(timeline.get(0).unwrap(), &kept_txn);
} | let kept_txn = TestTransaction::new(1, 0, 1).make_signed_transaction(); // not committed or cleaned out by block timestamp gc
let txns = vec![ |
info.rs | use std::{
cmp,
io::{self, Write},
};
use noodles_vcf as vcf;
use crate::{
header::string_maps::StringStringMap,
record::value::{Float, Int16, Int32, Int8, Value},
writer::{string_map::write_string_map_index, value::write_value},
};
const MISSING_VALUE: char = '.';
const DELIMITER: char = ',';
pub fn write_info<W>(
writer: &mut W,
string_string_map: &StringStringMap,
info: &vcf::record::Info,
) -> io::Result<()>
where
W: Write,
{
for field in info.values() {
write_info_field(writer, string_string_map, field)?;
}
Ok(())
}
fn write_info_field<W>(
writer: &mut W,
string_string_map: &StringStringMap,
field: &vcf::record::info::Field,
) -> io::Result<()>
where
W: Write,
{
write_info_field_key(writer, string_string_map, field.key())?;
write_info_field_value(writer, field.value())?;
Ok(())
}
fn write_info_field_key<W>(
writer: &mut W,
string_string_map: &StringStringMap,
key: &vcf::header::info::Key,
) -> io::Result<()>
where
W: Write,
{
string_string_map
.get_index_of(key.as_ref())
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("info key missing from string map: {:?}", key),
)
})
.and_then(|i| write_string_map_index(writer, i))
}
fn write_info_field_value<W>(
writer: &mut W,
value: Option<&vcf::record::info::field::Value>,
) -> io::Result<()>
where
W: Write,
{
use vcf::record::info::field;
match value {
Some(field::Value::Integer(n)) => write_info_field_integer_value(writer, *n),
Some(field::Value::Float(n)) => write_info_field_float_value(writer, *n),
Some(field::Value::Flag) => write_info_field_flag_value(writer),
Some(field::Value::Character(c)) => write_info_field_character_value(writer, *c),
Some(field::Value::String(s)) => write_info_field_string_value(writer, s),
Some(field::Value::IntegerArray(values)) => {
write_info_field_integer_array_value(writer, values)
}
Some(field::Value::FloatArray(values)) => {
write_info_field_float_array_value(writer, values)
}
Some(field::Value::CharacterArray(values)) => {
write_info_field_character_array_value(writer, values)
}
Some(field::Value::StringArray(values)) => {
write_info_field_string_array_value(writer, values)
}
_ => todo!("unhandled INFO field value: {:?}", value),
}
}
fn write_info_field_integer_value<W>(writer: &mut W, n: i32) -> io::Result<()>
where
W: Write,
{
if n >= 0 {
if n <= i32::from(Int8::MAX_VALUE) {
write_value(writer, Some(Value::Int8(Some(Int8::Value(n as i8)))))
} else if n <= i32::from(Int16::MAX_VALUE) {
write_value(writer, Some(Value::Int16(Some(Int16::Value(n as i16)))))
} else {
write_value(writer, Some(Value::Int32(Some(Int32::Value(n)))))
}
} else if n >= i32::from(Int8::MIN_VALUE) {
write_value(writer, Some(Value::Int8(Some(Int8::Value(n as i8)))))
} else if n >= i32::from(Int16::MIN_VALUE) {
write_value(writer, Some(Value::Int16(Some(Int16::Value(n as i16)))))
} else if n >= Int32::MIN_VALUE {
write_value(writer, Some(Value::Int32(Some(Int32::Value(n)))))
} else {
Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("invalid info field integer value: {}", n),
))
}
}
fn write_info_field_float_value<W>(writer: &mut W, n: f32) -> io::Result<()>
where
W: Write,
{
write_value(writer, Some(Value::Float(Some(Float::Value(n)))))
}
fn write_info_field_flag_value<W>(writer: &mut W) -> io::Result<()>
where
W: Write,
{
write_value(writer, None)
}
fn write_info_field_character_value<W>(writer: &mut W, c: char) -> io::Result<()>
where
W: Write,
{
write_value(writer, Some(Value::String(Some(c.into()))))
}
fn write_info_field_string_value<W>(writer: &mut W, s: &str) -> io::Result<()>
where
W: Write,
{
write_value(writer, Some(Value::String(Some(s.into()))))
}
fn write_info_field_integer_array_value<W>(writer: &mut W, values: &[Option<i32>]) -> io::Result<()>
where
W: Write,
{
if values.is_empty() {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"info field integer array cannot be empty",
));
}
let (mut min, mut max) = (i32::MAX, i32::MIN);
for value in values {
let n = value.unwrap_or_default();
min = cmp::min(min, n);
max = cmp::max(max, n);
}
if min >= i32::from(Int8::MIN_VALUE) {
if max <= i32::from(Int8::MAX_VALUE) {
write_info_field_int8_array_value(writer, values)
} else if max <= i32::from(Int16::MAX_VALUE) {
write_info_field_int16_array_value(writer, values)
} else {
write_info_field_int32_array_value(writer, values)
}
} else if min >= i32::from(Int16::MIN_VALUE) {
if max <= i32::from(Int16::MAX_VALUE) {
write_info_field_int16_array_value(writer, values)
} else {
write_info_field_int32_array_value(writer, values)
}
} else if min >= Int32::MIN_VALUE {
write_info_field_int32_array_value(writer, values)
} else {
Err(io::Error::new(
io::ErrorKind::InvalidInput,
format!("invalid info field integer array value: {}", min),
))
}
}
fn write_info_field_int8_array_value<W>(writer: &mut W, values: &[Option<i32>]) -> io::Result<()>
where
W: Write,
{
let mut vs = Vec::with_capacity(values.len());
for value in values {
let v = match value {
Some(n) => i8::try_from(*n)
.map(Int8::from)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?,
None => Int8::Missing,
};
let n = match v {
Int8::Value(n) => n,
Int8::Missing => i8::from(v),
_ => todo!("unhandled i16 array value: {:?}", v),
};
vs.push(n);
}
write_value(writer, Some(Value::Int8Array(vs)))
}
fn write_info_field_int16_array_value<W>(writer: &mut W, values: &[Option<i32>]) -> io::Result<()>
where
W: Write,
{
let mut vs = Vec::with_capacity(values.len());
for value in values {
let v = match value {
Some(n) => i16::try_from(*n)
.map(Int16::from)
.map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?,
None => Int16::Missing,
};
let n = match v {
Int16::Value(n) => n,
Int16::Missing => i16::from(v),
_ => todo!("unhandled i16 array value: {:?}", v),
};
vs.push(n); | }
fn write_info_field_int32_array_value<W>(writer: &mut W, values: &[Option<i32>]) -> io::Result<()>
where
W: Write,
{
let vs = values
.iter()
.map(|value| value.map(Int32::from).unwrap_or(Int32::Missing))
.map(|value| match value {
Int32::Value(n) => n,
Int32::Missing => i32::from(value),
_ => todo!("unhandled i32 array value: {:?}", value),
})
.collect();
write_value(writer, Some(Value::Int32Array(vs)))
}
fn write_info_field_float_array_value<W>(writer: &mut W, values: &[Option<f32>]) -> io::Result<()>
where
W: Write,
{
let vs = values
.iter()
.map(|value| value.map(Float::from).unwrap_or(Float::Missing))
.map(|value| match value {
Float::Value(n) => n,
Float::Missing => f32::from(value),
_ => todo!("unhandled f32 array value: {:?}", value),
})
.collect();
write_value(writer, Some(Value::FloatArray(vs)))
}
fn write_info_field_character_array_value<W>(
writer: &mut W,
values: &[Option<char>],
) -> io::Result<()>
where
W: Write,
{
let mut s = String::new();
for (i, value) in values.iter().enumerate() {
if i > 0 {
s.push(DELIMITER);
}
let c = value.unwrap_or(MISSING_VALUE);
s.push(c);
}
write_value(writer, Some(Value::String(Some(s))))
}
fn write_info_field_string_array_value<W>(
writer: &mut W,
values: &[Option<String>],
) -> io::Result<()>
where
W: Write,
{
let mut s = String::new();
for (i, value) in values.iter().enumerate() {
if i > 0 {
s.push(DELIMITER);
}
if let Some(t) = value {
s.push_str(t);
} else {
s.push(MISSING_VALUE);
}
}
write_value(writer, Some(Value::String(Some(s))))
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_write_info_field_value_with_integer_value() -> io::Result<()> {
use vcf::record::info::field;
fn t(buf: &mut Vec<u8>, value: &field::Value, expected: &[u8]) -> io::Result<()> {
buf.clear();
write_info_field_value(buf, Some(value))?;
assert_eq!(buf, expected);
Ok(())
}
let mut buf = Vec::new();
let value = field::Value::Integer(-2147483641);
buf.clear();
assert!(matches!(
write_info_field_value(&mut buf, Some(&value)),
Err(ref e) if e.kind() == io::ErrorKind::InvalidInput
));
let value = field::Value::Integer(-2147483640);
t(&mut buf, &value, &[0x13, 0x08, 0x00, 0x00, 0x80])?;
let value = field::Value::Integer(-32761);
t(&mut buf, &value, &[0x13, 0x07, 0x80, 0xff, 0xff])?;
let value = field::Value::Integer(-32760);
t(&mut buf, &value, &[0x12, 0x08, 0x80])?;
let value = field::Value::Integer(-121);
t(&mut buf, &value, &[0x12, 0x87, 0xff])?;
let value = field::Value::Integer(-120);
t(&mut buf, &value, &[0x11, 0x88])?;
let value = field::Value::Integer(0);
t(&mut buf, &value, &[0x11, 0x00])?;
let value = field::Value::Integer(127);
t(&mut buf, &value, &[0x11, 0x7f])?;
let value = field::Value::Integer(128);
t(&mut buf, &value, &[0x12, 0x80, 0x00])?;
let value = field::Value::Integer(32767);
t(&mut buf, &value, &[0x12, 0xff, 0x7f])?;
let value = field::Value::Integer(32768);
t(&mut buf, &value, &[0x13, 0x00, 0x80, 0x00, 0x00])?;
let value = field::Value::Integer(2147483647);
t(&mut buf, &value, &[0x13, 0xff, 0xff, 0xff, 0x7f])?;
Ok(())
}
#[test]
fn test_write_info_field_value_with_float_value() -> io::Result<()> {
use vcf::record::info::field;
let mut buf = Vec::new();
let value = field::Value::Float(0.0);
write_info_field_value(&mut buf, Some(&value))?;
let expected = [0x15, 0x00, 0x00, 0x00, 0x00];
assert_eq!(buf, expected);
Ok(())
}
#[test]
fn test_write_info_field_value_with_flag_value() -> io::Result<()> {
use vcf::record::info::field;
let mut buf = Vec::new();
let value = field::Value::Flag;
write_info_field_value(&mut buf, Some(&value))?;
let expected = [0x00];
assert_eq!(buf, expected);
Ok(())
}
#[test]
fn test_write_info_field_value_with_character_value() -> io::Result<()> {
use vcf::record::info::field;
let mut buf = Vec::new();
let value = field::Value::Character('n');
write_info_field_value(&mut buf, Some(&value))?;
let expected = [0x17, 0x6e];
assert_eq!(buf, expected);
Ok(())
}
#[test]
fn test_write_info_field_value_with_string_value() -> io::Result<()> {
use vcf::record::info::field;
let mut buf = Vec::new();
let value = field::Value::String(String::from("ndls"));
write_info_field_value(&mut buf, Some(&value))?;
let expected = [0x47, 0x6e, 0x64, 0x6c, 0x73];
assert_eq!(buf, expected);
Ok(())
}
#[test]
fn test_write_info_field_value_with_integer_array_value() -> io::Result<()> {
use vcf::record::info::field;
fn t(buf: &mut Vec<u8>, value: Option<&field::Value>, expected: &[u8]) -> io::Result<()> {
buf.clear();
write_info_field_value(buf, value)?;
assert_eq!(buf, expected);
Ok(())
}
let mut buf = Vec::new();
let value = field::Value::IntegerArray(vec![Some(-2147483641), Some(-2147483640)]);
buf.clear();
assert!(matches!(
write_info_field_value(&mut buf, Some(&value)),
Err(ref e) if e.kind() == io::ErrorKind::InvalidInput
));
let value = field::Value::IntegerArray(vec![Some(-2147483640), Some(-2147483639)]);
t(
&mut buf,
Some(&value),
&[0x23, 0x08, 0x00, 0x00, 0x80, 0x09, 0x00, 0x00, 0x80],
)?;
let value = field::Value::IntegerArray(vec![Some(-2147483640), None]);
t(
&mut buf,
Some(&value),
&[0x23, 0x08, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80],
)?;
let value = field::Value::IntegerArray(vec![Some(-32761), Some(-32760)]);
t(
&mut buf,
Some(&value),
&[0x23, 0x07, 0x80, 0xff, 0xff, 0x08, 0x80, 0xff, 0xff],
)?;
let value = field::Value::IntegerArray(vec![Some(-32761), None]);
t(
&mut buf,
Some(&value),
&[0x23, 0x07, 0x80, 0xff, 0xff, 0x00, 0x00, 0x00, 0x80],
)?;
let value = field::Value::IntegerArray(vec![Some(-32760), Some(-32759)]);
t(&mut buf, Some(&value), &[0x22, 0x08, 0x80, 0x09, 0x80])?;
let value = field::Value::IntegerArray(vec![Some(-32760), None]);
t(&mut buf, Some(&value), &[0x22, 0x08, 0x80, 0x00, 0x80])?;
let value = field::Value::IntegerArray(vec![Some(-121), Some(-120)]);
t(&mut buf, Some(&value), &[0x22, 0x87, 0xff, 0x88, 0xff])?;
let value = field::Value::IntegerArray(vec![Some(-121), None]);
t(&mut buf, Some(&value), &[0x22, 0x87, 0xff, 0x00, 0x80])?;
let value = field::Value::IntegerArray(vec![Some(-120), Some(-119)]);
t(&mut buf, Some(&value), &[0x21, 0x88, 0x89])?;
let value = field::Value::IntegerArray(vec![Some(-120), None]);
t(&mut buf, Some(&value), &[0x21, 0x88, 0x80])?;
let value = field::Value::IntegerArray(vec![None, Some(0), Some(1)]);
t(&mut buf, Some(&value), &[0x31, 0x80, 0x00, 0x01])?;
let value = field::Value::IntegerArray(vec![Some(-1), Some(0), Some(1)]);
t(&mut buf, Some(&value), &[0x31, 0xff, 0x00, 0x01])?;
let value = field::Value::IntegerArray(vec![Some(-1), Some(0), None]);
t(&mut buf, Some(&value), &[0x31, 0xff, 0x00, 0x80])?;
let value = field::Value::IntegerArray(vec![Some(126), Some(127)]);
t(&mut buf, Some(&value), &[0x21, 0x7e, 0x7f])?;
let value = field::Value::IntegerArray(vec![None, Some(127)]);
t(&mut buf, Some(&value), &[0x21, 0x80, 0x7f])?;
let value = field::Value::IntegerArray(vec![Some(127), Some(128)]);
t(&mut buf, Some(&value), &[0x22, 0x7f, 0x00, 0x80, 0x00])?;
let value = field::Value::IntegerArray(vec![None, Some(128)]);
t(&mut buf, Some(&value), &[0x22, 0x00, 0x80, 0x80, 0x00])?;
let value = field::Value::IntegerArray(vec![Some(32766), Some(32767)]);
t(&mut buf, Some(&value), &[0x22, 0xfe, 0x7f, 0xff, 0x7f])?;
let value = field::Value::IntegerArray(vec![None, Some(32767)]);
t(&mut buf, Some(&value), &[0x22, 0x00, 0x80, 0xff, 0x7f])?;
let value = field::Value::IntegerArray(vec![Some(32767), Some(32768)]);
t(
&mut buf,
Some(&value),
&[0x23, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00],
)?;
let value = field::Value::IntegerArray(vec![None, Some(32768)]);
t(
&mut buf,
Some(&value),
&[0x23, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00],
)?;
let value = field::Value::IntegerArray(vec![Some(2147483646), Some(2147483647)]);
t(
&mut buf,
Some(&value),
&[0x23, 0xfe, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f],
)?;
let value = field::Value::IntegerArray(vec![None, Some(2147483647)]);
t(
&mut buf,
Some(&value),
&[0x23, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x7f],
)?;
Ok(())
}
#[test]
fn test_write_info_field_value_with_float_array_value() -> io::Result<()> {
use vcf::record::info::field;
fn t(buf: &mut Vec<u8>, value: Option<&field::Value>, expected: &[u8]) -> io::Result<()> {
buf.clear();
write_info_field_value(buf, value)?;
assert_eq!(buf, expected);
Ok(())
}
let mut buf = Vec::new();
let value = field::Value::FloatArray(vec![Some(0.0), Some(1.0)]);
t(
&mut buf,
Some(&value),
&[0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f],
)?;
let value = field::Value::FloatArray(vec![Some(0.0), None]);
t(
&mut buf,
Some(&value),
&[0x25, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x80, 0x7f],
)?;
Ok(())
}
#[test]
fn test_write_info_field_value_with_character_array_value() -> io::Result<()> {
use vcf::record::info::field;
fn t(buf: &mut Vec<u8>, value: Option<&field::Value>, expected: &[u8]) -> io::Result<()> {
buf.clear();
write_info_field_value(buf, value)?;
assert_eq!(buf, expected);
Ok(())
}
let mut buf = Vec::new();
let value = field::Value::CharacterArray(vec![Some('n'), Some('d'), Some('l'), Some('s')]);
t(
&mut buf,
Some(&value),
&[0x77, 0x6e, 0x2c, 0x64, 0x2c, 0x6c, 0x2c, 0x73],
)?;
let value = field::Value::CharacterArray(vec![Some('n'), Some('d'), Some('l'), None]);
t(
&mut buf,
Some(&value),
&[0x77, 0x6e, 0x2c, 0x64, 0x2c, 0x6c, 0x2c, 0x2e],
)?;
Ok(())
}
#[test]
fn test_write_info_field_value_with_string_array_value() -> io::Result<()> {
use vcf::record::info::field;
fn t(buf: &mut Vec<u8>, value: Option<&field::Value>, expected: &[u8]) -> io::Result<()> {
buf.clear();
write_info_field_value(buf, value)?;
assert_eq!(buf, expected);
Ok(())
}
let mut buf = Vec::new();
let value =
field::Value::StringArray(vec![Some(String::from("nd")), Some(String::from("ls"))]);
t(
&mut buf,
Some(&value),
&[0x57, 0x6e, 0x64, 0x2c, 0x6c, 0x73],
)?;
let value = field::Value::StringArray(vec![Some(String::from("nd")), None]);
t(&mut buf, Some(&value), &[0x47, 0x6e, 0x64, 0x2c, 0x2e])?;
Ok(())
}
} | }
write_value(writer, Some(Value::Int16Array(vs))) |
roaring.rs | use crate::colors::storage::serializer::{ColorsFlushProcessing, ColorsIndexEntry};
use crate::colors::storage::ColorsSerializerImpl;
use crate::colors::ColorIndexType;
use crate::hashes::dummy_hasher::{DummyHasher, DummyHasherBuilder};
use crate::io::chunks_writer::ChunksWriter;
use crate::io::varint::{decode_varint, encode_varint};
use crate::utils::async_slice_queue::AsyncSliceQueue;
use crate::KEEP_FILES;
use byteorder::ReadBytesExt;
use dashmap::DashMap;
use desse::{Desse, DesseSized};
use parking_lot::Mutex;
use rand::{thread_rng, RngCore};
use roaring::RoaringBitmap;
use serde::{Deserialize, Serialize};
use siphasher::sip128::{Hash128, Hasher128, SipHasher13};
use std::cell::UnsafeCell;
use std::cmp::max;
use std::fs::File;
use std::hash::{Hash, Hasher};
use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
use std::mem::{swap, transmute};
use std::ops::{Deref, DerefMut};
use std::path::Path;
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
struct RoaringBitmapInstance {
bitmap: RoaringBitmap,
offset: ColorIndexType,
colors_count: u64,
checkpoint_distance: u64,
stride: ColorIndexType,
last_color: ColorIndexType,
}
impl RoaringBitmapInstance {
fn new(
colors_count: u64,
checkpoint_distance: u64,
offset: ColorIndexType,
stride: ColorIndexType,
) -> Self {
Self {
bitmap: RoaringBitmap::new(),
offset,
colors_count,
checkpoint_distance,
stride,
last_color: 0,
}
}
fn try_append(
&mut self,
color_index: ColorIndexType,
colors: impl Iterator<Item = ColorIndexType>,
writer: &ColorsFlushProcessing,
) -> bool {
let base_color = (color_index - self.offset);
// Another append is in queue and the current is not the first one
if base_color > self.last_color + self.stride |
self.last_color = base_color;
assert_eq!(base_color % self.stride, 0);
let strided_color = base_color / self.stride;
let local_position = strided_color * (self.colors_count as u32);
self.bitmap.append(colors.map(|c| local_position + c));
// Flush the partial bitmap
if strided_color >= self.checkpoint_distance as u32 {
println!("Flushing with offset: {}", self.offset);
self.flush(writer);
}
true
}
fn flush(&mut self, writer: &ColorsFlushProcessing) {
let mut pdata = writer.start_processing();
self.bitmap.serialize_into(writer.get_stream(&mut pdata));
writer.end_processing(pdata, self.offset, self.stride);
self.offset += self.last_color;
self.last_color = 0;
self.bitmap.clear();
}
}
pub struct RoaringColorsSerializer {
colors_count: u64,
roaring_bitmaps: Vec<Mutex<RoaringBitmapInstance>>,
writer: ColorsFlushProcessing,
colors_index: AtomicU32,
checkpoint_distance: usize,
}
impl ColorsSerializerImpl for RoaringColorsSerializer {
fn decode_color(
reader: impl Read,
entry_info: ColorsIndexEntry,
color: ColorIndexType,
) -> Vec<u32> {
todo!()
}
fn new(writer: ColorsFlushProcessing, checkpoint_distance: usize, colors_count: u64) -> Self {
let stride = rayon::current_num_threads() as ColorIndexType;
Self {
roaring_bitmaps: (0..stride)
.map(|off| {
Mutex::new(RoaringBitmapInstance::new(
colors_count,
checkpoint_distance as u64,
off,
stride,
))
})
.collect(),
writer,
colors_index: AtomicU32::new(0),
checkpoint_distance,
colors_count,
}
}
fn serialize_colors(&self, colors: &[ColorIndexType]) -> ColorIndexType {
let color_index = self.colors_index.fetch_add(1, Ordering::Relaxed);
let target_bitmap = color_index % self.roaring_bitmaps.len() as ColorIndexType;
loop {
let mut bitmap_lock = self.roaring_bitmaps[target_bitmap as usize].lock();
if bitmap_lock.try_append(color_index, colors.iter().copied(), &self.writer) {
break;
}
drop(bitmap_lock);
std::thread::yield_now();
}
color_index
}
fn get_subsets_count(&self) -> u64 {
self.colors_index.load(Ordering::Relaxed) as u64
}
fn print_stats(&self) {
println!("Subsets count: {}", self.get_subsets_count());
}
fn finalize(mut self) -> ColorsFlushProcessing {
for bitmap in self.roaring_bitmaps {
bitmap.lock().flush(&mut self.writer);
}
self.writer
}
}
| {
return false;
} |
zenodo.py | # script to upload a file to zenodo sandbox via api
# seperate sandbox- and real-zenodo accounts and ACCESS_TOKENs each need to be created
# to adapt this script to real-zenodo (from sandbox implementation):
# update urls to zenodo.org from sandbox.zenodo.org
# update SANDBOX_TOKEN to a ACCESS_TOKEN from real-zenodo
import sys, json, requests
import pandas as pd
studyid = sys.argv[1]
file_dir = sys.argv[2]
access_token = sys.argv[3]
data_dir = file_dir+'/ewas-sum-stats/to-add/'+studyid
zfile=data_dir+'/zenodo.csv'
try:
zdata = pd.read_csv(zfile)
except FileNotFoundError:
print("Can't find the file "+zfile)
sys.exit()
print('Starting Zenodo upload process')
# specify ACCESS_TOKEN
# this needs to be generated for each sanbox/real account
ACCESS_TOKEN = access_token
# create empty upload
headers = {"Content-Type": "application/json"}
r = requests.post('https://zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
r.status_code
r.json()
# Get the deposition id from the previous response
# Upload the file to be deposited to Zenodo
deposition_id = r.json()['id']
data = {'name': 'results.csv'}
files = {'file': open(data_dir+'/results.csv')}
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
r.status_code
r.json()
# specify and attach the metadata for the upload
title = zdata.loc[0, 'title']
authors = zdata.loc[0, 'authors']
desc = zdata.loc[0, 'desc']
desc = desc + '\n\n' + 'Upload of this dataset was completed by The EWAS Catalog team. The data can be queried along with hundreds of other EWAS at ewascatalog.org. To upload your EWAS summary statistics and have a zenodo DOI generated for you go to ewascatalog.org/upload'
data = {'metadata': | 'description': desc,
'creators': [{'name': authors}]}}
r = requests.put('https://zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
# r = requests.put('https://sandbox.zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
r.status_code
r.json()
# publish
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
status_code = r.status_code
if status_code != 202:
raise ValueError("Status code was" + str(status_code) + " and it should be 202. Check zenodo")
else:
print("Status code is 202. Happy days!")
# should be: 202 | {'title': title,
'upload_type': 'dataset', |
rdi.py | import numpy as np
import xarray as xr
from .. import time as tmlib
import warnings
from os.path import getsize
from ._read_bin import bin_reader
from .base import _find_userdata, _create_dataset, _abspath
from ..rotate.rdi import _calc_beam_orientmat, _calc_orientmat
from ..rotate.base import _set_coords
from ..rotate.api import set_declination
def read_rdi(fname, userdata=None, nens=None, debug=0):
"""Read a TRDI binary data file.
Parameters
----------
filename : string
Filename of TRDI file to read.
userdata : True, False, or string of userdata.json filename (default ``True``)
Whether to read the '<base-filename>.userdata.json' file.
nens : None (default: read entire file), int, or 2-element tuple (start, stop)
Number of pings to read from the file
Returns
-------
ds : xarray.Dataset
An xarray dataset from the binary instrument data
"""
# Reads into a dictionary of dictionaries using netcdf naming conventions
# Should be easier to debug
with _RdiReader(fname, debug_level=debug) as ldr:
dat = ldr.load_data(nens=nens)
# Read in userdata
userdata = _find_userdata(fname, userdata)
for nm in userdata:
dat['attrs'][nm] = userdata[nm]
if 'time_gps' in dat['coords']:
# GPS data not necessarily sampling at the same rate as ADCP DAQ.
dat = _remove_gps_duplicates(dat)
# Create xarray dataset from upper level dictionary
ds = _create_dataset(dat)
ds = _set_coords(ds, ref_frame=ds.coord_sys)
# Create orientation matrices
if 'beam2inst_orientmat' not in ds:
ds['beam2inst_orientmat'] = xr.DataArray(_calc_beam_orientmat(
ds.beam_angle,
ds.beam_pattern == 'convex'),
coords={'x': [1, 2, 3, 4],
'x*': [1, 2, 3, 4]},
dims=['x', 'x*'])
if 'orientmat' not in ds:
ds['orientmat'] = xr.DataArray(_calc_orientmat(ds),
coords={'earth': ['E', 'N', 'U'],
'inst': ['X', 'Y', 'Z'],
'time': ds['time']},
dims=['earth', 'inst', 'time'])
# Check magnetic declination if provided via software and/or userdata
_set_rdi_declination(ds, fname, inplace=True)
# VMDAS applies gps correction on velocity in .ENX files only
if fname.rsplit('.')[-1] == 'ENX':
ds.attrs['vel_gps_corrected'] = 1
else: # (not ENR or ENS) or WinRiver files
ds.attrs['vel_gps_corrected'] = 0
# Convert time coords to dt64
t_coords = [t for t in ds.coords if 'time' in t]
for ky in t_coords:
dt = tmlib.epoch2dt64(ds[ky])
ds = ds.assign_coords({ky: dt})
# Convert time vars to dt64
t_data = [t for t in ds.data_vars if 'time' in t]
for ky in t_data:
dt = tmlib.epoch2dt64(ds[ky])
ds[ky].data = dt
return ds
def _remove_gps_duplicates(dat):
"""
Removes duplicate and nan timestamp values in 'time_gps' coordinate, and
ads hardware (ADCP DAQ) timestamp corresponding to GPS acquisition
(in addition to the GPS unit's timestamp).
"""
dat['data_vars']['hdwtime_gps'] = dat['coords']['time']
dat['units']['hdwtime'] = 'seconds since 1970-01-01 00:00:00'
# Remove duplicate timestamp values, if applicable
dat['coords']['time_gps'], idx = np.unique(dat['coords']['time_gps'],
return_index=True)
# Remove nan values, if applicable
nan = np.zeros(dat['coords']['time'].shape, dtype=bool)
if any(np.isnan(dat['coords']['time_gps'])):
nan = np.isnan(dat['coords']['time_gps'])
dat['coords']['time_gps'] = dat['coords']['time_gps'][~nan]
for key in dat['data_vars']:
if 'gps' in key:
dat['data_vars'][key] = dat['data_vars'][key][idx]
if sum(nan) > 0:
dat['data_vars'][key] = dat['data_vars'][key][~nan]
return dat
def _set_rdi_declination(dat, fname, inplace):
# If magnetic_var_deg is set, this means that the declination is already
# included in the heading and in the velocity data.
declin = dat.attrs.pop('declination', None) # userdata declination
if dat.attrs['magnetic_var_deg'] != 0: # from TRDI software if set
dat.attrs['declination'] = dat.attrs['magnetic_var_deg']
dat.attrs['declination_in_orientmat'] = 1 # logical
if dat.attrs['magnetic_var_deg'] != 0 and declin is not None:
warnings.warn(
"'magnetic_var_deg' is set to {:.2f} degrees in the binary "
"file '{}', AND 'declination' is set in the 'userdata.json' "
"file. DOLfYN WILL USE THE VALUE of {:.2f} degrees in "
"userdata.json. If you want to use the value in "
"'magnetic_var_deg', delete the value from userdata.json and "
"re-read the file."
.format(dat.attrs['magnetic_var_deg'], fname, declin))
dat.attrs['declination'] = declin
if declin is not None:
set_declination(dat, declin, inplace)
century = 2000
data_defs = {'number': ([], 'data_vars', 'uint32', ''),
'rtc': ([7], 'sys', 'uint16', ''),
'builtin_test_fail': ([], 'data_vars', 'bool', ''),
'c_sound': ([], 'data_vars', 'float32', 'm/s'),
'depth': ([], 'data_vars', 'float32', 'm'),
'pitch': ([], 'data_vars', 'float32', 'deg'),
'roll': ([], 'data_vars', 'float32', 'deg'),
'heading': ([], 'data_vars', 'float32', 'deg'),
'temp': ([], 'data_vars', 'float32', 'C'),
'salinity': ([], 'data_vars', 'float32', 'psu'),
'min_preping_wait': ([], 'data_vars', 'float32', 's'),
'heading_std': ([], 'data_vars', 'float32', 'deg'),
'pitch_std': ([], 'data_vars', 'float32', 'deg'),
'roll_std': ([], 'data_vars', 'float32', 'deg'),
'adc': ([8], 'sys', 'uint8', ''),
'error_status_wd': ([], 'attrs', 'float32', ''),
'pressure': ([], 'data_vars', 'float32', 'dbar'),
'pressure_std': ([], 'data_vars', 'float32', 'dbar'),
'vel': (['nc', 4], 'data_vars', 'float32', 'm/s'),
'amp': (['nc', 4], 'data_vars', 'uint8', 'counts'),
'corr': (['nc', 4], 'data_vars', 'uint8', 'counts'),
'prcnt_gd': (['nc', 4], 'data_vars', 'uint8', '%'),
'status': (['nc', 4], 'data_vars', 'float32', ''),
'dist_bt': ([4], 'data_vars', 'float32', 'm'),
'vel_bt': ([4], 'data_vars', 'float32', 'm/s'),
'corr_bt': ([4], 'data_vars', 'uint8', 'counts'),
'amp_bt': ([4], 'data_vars', 'uint8', 'counts'),
'prcnt_gd_bt': ([4], 'data_vars', 'uint8', '%'),
'time': ([], 'coords', 'float64', ''),
'etime_gps': ([], 'coords', 'float64', ''),
'elatitude_gps': ([], 'data_vars', 'float64', 'deg'),
'elongitude_gps': ([], 'data_vars', 'float64', 'deg'),
'time_gps': ([], 'coords', 'float64', ''),
'latitude_gps': ([], 'data_vars', 'float64', 'deg'),
'longitude_gps': ([], 'data_vars', 'float64', 'deg'),
'ntime': ([], 'coords', 'float64', ''),
'flags': ([], 'data_vars', 'float32', ''),
}
def _get(dat, nm):
grp = data_defs[nm][1]
if grp is None:
return dat[nm]
else:
return dat[grp][nm]
def _in_group(dat, nm):
grp = data_defs[nm][1]
if grp is None:
return nm in dat
else:
return nm in dat[grp]
def _pop(dat, nm):
grp = data_defs[nm][1]
if grp is None:
dat.pop(nm)
else:
dat[grp].pop(nm)
def _setd(dat, nm, val):
grp = data_defs[nm][1]
if grp is None:
dat[nm] = val
else:
dat[grp][nm] = val
def _idata(dat, nm, sz):
group = data_defs[nm][1]
dtype = data_defs[nm][2]
units = data_defs[nm][3]
arr = np.empty(sz, dtype=dtype)
if dtype.startswith('float'):
arr[:] = np.NaN
dat[group][nm] = arr
dat['units'][nm] = units
return dat
def _get_size(name, n=None, ncell=0):
sz = list(data_defs[name][0]) # create a copy!
if 'nc' in sz:
sz.insert(sz.index('nc'), ncell)
sz.remove('nc')
if n is None:
return tuple(sz)
return tuple(sz + [n])
class _variable_setlist(set):
def __iadd__(self, vals):
if vals[0] not in self:
self |= set(vals)
return self
class _ensemble():
n_avg = 1
k = -1 # This is the counter for filling the ensemble object
def __getitem__(self, nm):
return getattr(self, nm)
def __init__(self, navg, n_cells):
if navg is None or navg == 0:
navg = 1
self.n_avg = navg
for nm in data_defs:
setattr(self, nm,
np.zeros(_get_size(nm, n=navg, ncell=n_cells),
dtype=data_defs[nm][2]))
def clean_data(self,):
self['vel'][self['vel'] == -32.768] = np.NaN
class _RdiReader():
_n_beams = 4 # Placeholder for 5-beam adcp, not currently used.
_pos = 0
progress = 0
_cfgnames = dict.fromkeys([4, 5], 'bb-adcp')
_cfgnames.update(dict.fromkeys([8, 9, 16], 'wh-adcp'))
_cfgnames.update(dict.fromkeys([14, 23], 'os-adcp'))
_cfac = 180 / 2 ** 31
_source = 0
_fixoffset = 0
_nbyte = 0
_winrivprob = False
_search_num = 30000 # Maximum distance? to search
_debug7f79 = None
extrabytes = 0
def __init__(self, fname, navg=1, debug_level=0):
self.fname = _abspath(fname)
print('\nReading file {} ...'.format(fname))
self._debug_level = debug_level
self.cfg = {}
self.cfg['name'] = 'wh-adcp'
self.cfg['sourceprog'] = 'instrument'
self.cfg['prog_ver'] = 0
self.hdr = {}
self.f = bin_reader(self.fname)
self.read_hdr()
self.read_cfg()
self.f.seek(self._pos, 0)
self.n_avg = navg
self.ensemble = _ensemble(self.n_avg, self.cfg['n_cells'])
self._filesize = getsize(self.fname)
self._npings = int(self._filesize / (self.hdr['nbyte'] + 2 +
self.extrabytes))
self.vars_read = _variable_setlist(['time'])
if self._debug_level > 0:
print(' %d pings estimated in this file' % self._npings)
def read_hdr(self,):
fd = self.f
cfgid = list(fd.read_ui8(2))
nread = 0
if self._debug_level > 2:
print(self.f.pos)
print(' cfgid0: [{:x}, {:x}]'.format(*cfgid))
while (cfgid[0] != 127 or cfgid[1] != 127) or not self.checkheader():
nextbyte = fd.read_ui8(1)
pos = fd.tell()
nread += 1
cfgid[1] = cfgid[0]
cfgid[0] = nextbyte
if not pos % 1000:
print(' Still looking for valid cfgid at file '
'position %d ...' % pos)
self._pos = self.f.tell() - 2
if self._debug_level > 0:
print(fd.tell())
self.read_hdrseg()
def read_cfg(self,):
cfgid = self.f.read_ui16(1)
self.read_cfgseg()
def init_data(self,):
outd = {'data_vars': {}, 'coords': {},
'attrs': {}, 'units': {}, 'sys': {}}
outd['attrs']['inst_make'] = 'TRDI'
outd['attrs']['inst_model'] = 'Workhorse'
outd['attrs']['inst_type'] = 'ADCP'
outd['attrs']['rotate_vars'] = ['vel', ]
# Currently RDI doesn't use IMUs
outd['attrs']['has_imu'] = 0
for nm in data_defs:
outd = _idata(outd, nm,
sz=_get_size(nm, self._nens, self.cfg['n_cells']))
self.outd = outd
def mean(self, dat):
if self.n_avg == 1:
return dat[..., 0]
return np.nanmean(dat, axis=-1)
def load_data(self, nens=None):
if nens is None:
self._nens = int(self._npings / self.n_avg)
self._ens_range = (0, self._nens)
elif (nens.__class__ is tuple or nens.__class__ is list) and \
len(nens) == 2:
nens = list(nens)
if nens[1] == -1:
nens[1] = self._npings
self._nens = int((nens[1] - nens[0]) / self.n_avg)
self._ens_range = nens
self.f.seek((self.hdr['nbyte'] + 2 + self.extrabytes) *
self._ens_range[0], 1)
else:
self._nens = nens
self._ens_range = (0, nens)
if self._debug_level > 0:
print(' taking data from pings %d - %d' % tuple(self._ens_range))
print(' %d ensembles will be produced.' % self._nens)
self.init_data()
dat = self.outd
dat['coords']['range'] = (self.cfg['bin1_dist_m'] +
np.arange(self.cfg['n_cells']) *
self.cfg['cell_size'])
for nm in self.cfg:
dat['attrs'][nm] = self.cfg[nm]
for iens in range(self._nens):
try:
self.read_buffer()
except:
self.remove_end(iens)
break
self.ensemble.clean_data()
# Fix the 'real-time-clock' century
clock = self.ensemble.rtc[:, :]
if clock[0, 0] < 100:
clock[0, :] += century
# Copy the ensemble to the dataset.
for nm in self.vars_read:
_get(dat, nm)[..., iens] = self.mean(self.ensemble[nm])
try:
dats = tmlib.date2epoch(
tmlib.datetime(*clock[:6, 0],
microsecond=clock[6, 0] * 10000))[0]
except ValueError:
warnings.warn("Invalid time stamp in ping {}.".format(
int(self.ensemble.number[0])))
dat['coords']['time'][iens] = np.NaN
else:
dat['coords']['time'][iens] = np.median(dats)
self.finalize()
if 'vel_bt' in dat['data_vars']:
dat['attrs']['rotate_vars'].append('vel_bt')
return dat
def read_buffer(self,):
fd = self.f
self.ensemble.k = -1 # so that k+=1 gives 0 on the first loop.
self.print_progress()
hdr = self.hdr
while self.ensemble.k < self.ensemble.n_avg - 1:
self.search_buffer()
startpos = fd.tell() - 2
self.read_hdrseg()
byte_offset = self._nbyte + 2
for n in range(len(hdr['dat_offsets'])):
id = fd.read_ui16(1)
self._winrivprob = False
self.print_pos()
retval = self.read_dat(id)
if retval == 'FAIL':
break
byte_offset += self._nbyte
if n < (len(hdr['dat_offsets']) - 1):
oset = hdr['dat_offsets'][n + 1] - byte_offset
if oset != 0:
if self._debug_level > 0:
print(' %s: Adjust location by %d\n' % (id, oset))
fd.seek(oset, 1)
byte_offset = hdr['dat_offsets'][n + 1]
else:
if hdr['nbyte'] - 2 != byte_offset:
if not self._winrivprob:
if self._debug_level > 0:
print(' {:d}: Adjust location by {:d}\n'
.format(id, hdr['nbyte'] - 2 - byte_offset))
self.f.seek(hdr['nbyte'] - 2 - byte_offset, 1)
byte_offset = hdr['nbyte'] - 2
readbytes = fd.tell() - startpos
offset = hdr['nbyte'] + 2 - byte_offset
self.check_offset(offset, readbytes)
self.print_pos(byte_offset=byte_offset)
def search_buffer(self):
"""
Check to see if the next bytes indicate the beginning of a
data block. If not, search for the next data block, up to
_search_num times.
"""
id1 = list(self.f.read_ui8(2))
search_cnt = 0
fd = self.f
if self._debug_level > 3:
print(' -->In search_buffer...')
while (search_cnt < self._search_num and
((id1[0] != 127 or id1[1] != 127) or
not self.checkheader())):
search_cnt += 1
nextbyte = fd.read_ui8(1)
id1[1] = id1[0]
id1[0] = nextbyte
if search_cnt == self._search_num:
raise Exception(
'Searched {} entries... Bad data encountered. -> {}'
.format(search_cnt, id1))
elif search_cnt > 0:
if self._debug_level > 0:
print(' WARNING: Searched {} bytes to find next '
'valid ensemble start [{:x}, {:x}]'.format(search_cnt,
*id1))
def checkheader(self,):
if self._debug_level > 1:
print(" ###In checkheader.")
fd = self.f
valid = 0
# print(self.f.pos)
numbytes = fd.read_i16(1)
if numbytes > 0:
fd.seek(numbytes - 2, 1)
cfgid = fd.read_ui8(2)
if len(cfgid) == 2:
fd.seek(-numbytes - 2, 1)
if cfgid[0] == 127 and cfgid[1] in [127, 121]:
if cfgid[1] == 121 and self._debug7f79 is None:
self._debug7f79 = True
valid = 1
else:
fd.seek(-2, 1)
if self._debug_level > 1:
print(" ###Leaving checkheader.")
return valid
def read_hdrseg(self,):
|
def print_progress(self,):
self.progress = self.f.tell()
if self._debug_level > 1:
print(' pos %0.0fmb/%0.0fmb\n' %
(self.f.tell() / 1048576., self._filesize / 1048576.))
if (self.f.tell() - self.progress) < 1048576:
return
def print_pos(self, byte_offset=-1):
"""Print the position in the file, used for debugging.
"""
if self._debug_level > 3:
if hasattr(self, 'ensemble'):
k = self.ensemble.k
else:
k = 0
print(' pos: %d, pos_: %d, nbyte: %d, k: %d, byte_offset: %d' %
(self.f.tell(), self._pos, self._nbyte, k, byte_offset))
def check_offset(self, offset, readbytes):
fd = self.f
if offset != 4 and self._fixoffset == 0:
if self._debug_level >= 1:
print('\n ********************************************\n')
if fd.tell() == self._filesize:
print(' EOF reached unexpectedly - discarding this last ensemble\n')
else:
print(" Adjust location by {:d} (readbytes={:d},hdr['nbyte']={:d}\n"
.format(offset, readbytes, self.hdr['nbyte']))
print("""
NOTE - If this appears at the beginning of the file, it may be
a dolfyn problem. Please report this message, with details here:
https://github.com/lkilcher/dolfyn/issues/8
- If this appears at the end of the file it means
The file is corrupted and only a partial record
has been read\n
""")
print('\n ********************************************\n')
self._fixoffset = offset - 4
fd.seek(4 + self._fixoffset, 1)
def read_dat(self, id):
function_map = {0: (self.read_fixed, []), # 0000
128: (self.read_var, []), # 0080
256: (self.read_vel, []), # 0100
512: (self.read_corr, []), # 0200
768: (self.read_amp, []), # 0300
1024: (self.read_prcnt_gd, []), # 0400
1280: (self.read_status, []), # 0500
1536: (self.read_bottom, []), # 0600
8192: (self.read_vmdas, []), # 2000
8226: (self.read_winriver2, []), # 2022
8448: (self.read_winriver, [38]), # 2100
8449: (self.read_winriver, [97]), # 2101
8450: (self.read_winriver, [45]), # 2102
8451: (self.read_winriver, [60]), # 2103
8452: (self.read_winriver, [38]), # 2104
# Loading of these data is currently not implemented:
1793: (self.skip_Ncol, [4]), # 0701 number of pings
1794: (self.skip_Ncol, [4]), # 0702 sum of squared vel
1795: (self.skip_Ncol, [4]), # 0703 sum of velocities
2560: (self.skip_Ncol, []), # 0A00 Beam 5 velocity
# 0301 Beam 5 Number of good pings
769: (self.skip_Ncol, []),
# 0302 Beam 5 Sum of squared velocities
770: (self.skip_Ncol, []),
# 0303 Beam 5 Sum of velocities
771: (self.skip_Ncol, []),
# 020C Ambient sound profile
524: (self.skip_Nbyte, [4]),
12288: (self.skip_Nbyte, [32]),
# 3000 Fixed attitude data format for OS-ADCPs
}
# Call the correct function:
if id in function_map:
if self._debug_level >= 2:
print(' Reading code {}...'.format(hex(id)), end='')
retval = function_map.get(id)[0](*function_map[id][1])
if retval:
return retval
if self._debug_level >= 2:
print(' success!')
else:
self.read_nocode(id)
def read_fixed(self,):
if hasattr(self, 'configsize'):
self.f.seek(self.configsize, 1)
self._nbyte = self.configsize
else:
self.read_cfgseg()
if self._debug_level >= 1:
print(self._pos)
self._nbyte += 2
def read_cfgseg(self,):
cfgstart = self.f.tell()
cfg = self.cfg
fd = self.f
tmp = fd.read_ui8(5)
prog_ver0 = tmp[0]
cfg['prog_ver'] = tmp[0] + tmp[1] / 100.
cfg['name'] = self._cfgnames.get(tmp[0],
'unrecognized firmware version')
config = tmp[2:4]
cfg['beam_angle'] = [15, 20, 30][(config[1] & 3)]
#cfg['numbeams'] = [4, 5][int((config[1] & 16) == 16)]
cfg['freq'] = ([75, 150, 300, 600, 1200, 2400, 38][(config[0] & 7)])
cfg['beam_pattern'] = (['concave',
'convex'][int((config[0] & 8) == 8)])
cfg['orientation'] = ['down', 'up'][int((config[0] & 128) == 128)]
#cfg['simflag'] = ['real', 'simulated'][tmp[4]]
fd.seek(1, 1)
cfg['n_beams'] = fd.read_ui8(1)
cfg['n_cells'] = fd.read_ui8(1)
cfg['pings_per_ensemble'] = fd.read_ui16(1)
cfg['cell_size'] = fd.read_ui16(1) * .01
cfg['blank'] = fd.read_ui16(1) * .01
cfg['prof_mode'] = fd.read_ui8(1)
cfg['corr_threshold'] = fd.read_ui8(1)
cfg['prof_codereps'] = fd.read_ui8(1)
cfg['min_pgood'] = fd.read_ui8(1)
cfg['evel_threshold'] = fd.read_ui16(1)
cfg['sec_between_ping_groups'] = (
np.sum(np.array(fd.read_ui8(3)) *
np.array([60., 1., .01])))
coord_sys = fd.read_ui8(1)
cfg['coord_sys'] = (['beam', 'inst',
'ship', 'earth'][((coord_sys >> 3) & 3)])
cfg['use_pitchroll'] = ['no', 'yes'][(coord_sys & 4) == 4]
cfg['use_3beam'] = ['no', 'yes'][(coord_sys & 2) == 2]
cfg['bin_mapping'] = ['no', 'yes'][(coord_sys & 1) == 1]
cfg['xducer_misalign_deg'] = fd.read_i16(1) * .01
cfg['magnetic_var_deg'] = fd.read_i16(1) * .01
cfg['sensors_src'] = np.binary_repr(fd.read_ui8(1), 8)
cfg['sensors_avail'] = np.binary_repr(fd.read_ui8(1), 8)
cfg['bin1_dist_m'] = fd.read_ui16(1) * .01
cfg['xmit_pulse'] = fd.read_ui16(1) * .01
cfg['water_ref_cells'] = list(fd.read_ui8(2)) # list for attrs
cfg['fls_target_threshold'] = fd.read_ui8(1)
fd.seek(1, 1)
cfg['xmit_lag_m'] = fd.read_ui16(1) * .01
self._nbyte = 40
self.configsize = self.f.tell() - cfgstart
def read_var(self,):
""" Read variable leader """
fd = self.f
self.ensemble.k += 1
ens = self.ensemble
k = ens.k
self.vars_read += ['number',
'rtc',
'number',
'builtin_test_fail',
'c_sound',
'depth',
'heading',
'pitch',
'roll',
'salinity',
'temp',
'min_preping_wait',
'heading_std',
'pitch_std',
'roll_std',
'adc']
ens.number[k] = fd.read_ui16(1)
ens.rtc[:, k] = fd.read_ui8(7)
ens.number[k] += 65535 * fd.read_ui8(1)
ens.builtin_test_fail[k] = fd.read_ui16(1)
ens.c_sound[k] = fd.read_ui16(1)
ens.depth[k] = fd.read_ui16(1) * 0.1
ens.heading[k] = fd.read_ui16(1) * 0.01
ens.pitch[k] = fd.read_i16(1) * 0.01
ens.roll[k] = fd.read_i16(1) * 0.01
ens.salinity[k] = fd.read_i16(1)
ens.temp[k] = fd.read_i16(1) * 0.01
ens.min_preping_wait[k] = (fd.read_ui8(
3) * np.array([60, 1, .01])).sum()
ens.heading_std[k] = fd.read_ui8(1)
ens.pitch_std[k] = fd.read_ui8(1) * 0.1
ens.roll_std[k] = fd.read_ui8(1) * 0.1
ens.adc[:, k] = fd.read_i8(8)
self._nbyte = 2 + 40
def read_vel(self,):
ens = self.ensemble
self.vars_read += ['vel']
k = ens.k
ens['vel'][:, :, k] = np.array(
self.f.read_i16(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4)) * .001
self._nbyte = 2 + 4 * self.cfg['n_cells'] * 2
def read_corr(self,):
k = self.ensemble.k
self.vars_read += ['corr']
self.ensemble.corr[:, :, k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_amp(self,):
k = self.ensemble.k
self.vars_read += ['amp']
self.ensemble.amp[:, :, k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_prcnt_gd(self,):
self.vars_read += ['prcnt_gd']
self.ensemble.prcnt_gd[:, :, self.ensemble.k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_status(self,):
self.vars_read += ['status']
self.ensemble.status[:, :, self.ensemble.k] = np.array(
self.f.read_ui8(4 * self.cfg['n_cells'])
).reshape((self.cfg['n_cells'], 4))
self._nbyte = 2 + 4 * self.cfg['n_cells']
def read_bottom(self,):
self.vars_read += ['dist_bt', 'vel_bt', 'corr_bt', 'amp_bt',
'prcnt_gd_bt']
fd = self.f
ens = self.ensemble
k = ens.k
cfg = self.cfg
if self._source == 2:
self.vars_read += ['latitude_gps', 'longitude_gps']
fd.seek(2, 1)
long1 = fd.read_ui16(1)
fd.seek(6, 1)
ens.latitude_gps[k] = fd.read_i32(1) * self._cfac
if ens.latitude_gps[k] == 0:
ens.latitude_gps[k] = np.NaN
else:
fd.seek(14, 1)
ens.dist_bt[:, k] = fd.read_ui16(4) * 0.01
ens.vel_bt[:, k] = fd.read_i16(4) * 0.001
ens.corr_bt[:, k] = fd.read_ui8(4)
ens.amp_bt[:, k] = fd.read_ui8(4)
ens.prcnt_gd_bt[:, k] = fd.read_ui8(4)
if self._source == 2:
fd.seek(2, 1)
ens.longitude_gps[k] = (
long1 + 65536 * fd.read_ui16(1)) * self._cfac
if ens.longitude_gps[k] > 180:
ens.longitude_gps[k] = ens.longitude_gps[k] - 360
if ens.longitude_gps[k] == 0:
ens.longitude_gps[k] = np.NaN
fd.seek(16, 1)
qual = fd.read_ui8(1)
if qual == 0:
print(' qual==%d,%f %f' % (qual,
ens.latitude_gps[k],
ens.longitude_gps[k]))
ens.latitude_gps[k] = np.NaN
ens.longitude_gps[k] = np.NaN
fd.seek(71 - 45 - 16 - 17, 1)
self._nbyte = 2 + 68
else:
fd.seek(71 - 45, 1)
self._nbyte = 2 + 68
if cfg['prog_ver'] >= 5.3:
fd.seek(78 - 71, 1)
ens.dist_bt[:, k] = ens.dist_bt[:, k] + fd.read_ui8(4) * 655.36
self._nbyte += 11
if cfg['name'] == 'wh-adcp':
if cfg['prog_ver'] >= 16.20:
fd.seek(4, 1)
self._nbyte += 4
def read_vmdas(self,):
""" Read something from VMDAS """
fd = self.f
# The raw files produced by VMDAS contain a binary navigation data
# block.
self.cfg['sourceprog'] = 'VMDAS'
ens = self.ensemble
k = ens.k
if self._source != 1 and self._debug_level >= 1:
print(' \n***** Apparently a VMDAS file \n\n')
self._source = 1
self.vars_read += ['time_gps',
'latitude_gps',
'longitude_gps',
'etime_gps',
'elatitude_gps',
'elongitude_gps',
'flags',
'ntime', ]
utim = fd.read_ui8(4)
date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])
# This byte is in hundredths of seconds (10s of milliseconds):
time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))
fd.seek(4, 1) # "PC clock offset from UTC" - clock drift in ms?
ens.time_gps[k] = tmlib.date2epoch(date + time)[0]
ens.latitude_gps[k] = fd.read_i32(1) * self._cfac
ens.longitude_gps[k] = fd.read_i32(1) * self._cfac
ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(
milliseconds=int(fd.read_ui32(1) * 10)))[0]
ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac
ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac
fd.seek(12, 1)
ens.flags[k] = fd.read_ui16(1)
fd.seek(6, 1)
utim = fd.read_ui8(4)
date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])
ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(
milliseconds=int(fd.read_ui32(1) / 10)))[0]
fd.seek(16, 1)
self._nbyte = 2 + 76
def read_winriver2(self, ):
startpos = self.f.tell()
self._winrivprob = True
self.cfg['sourceprog'] = 'WINRIVER'
ens = self.ensemble
k = ens.k
if self._source != 3 and self._debug_level >= 1:
warnings.warn(' \n***** Apparently a WINRIVER2 file\n'
'***** WARNING: Raw NMEA data '
'handler not yet fully implemented\n\n')
self._source = 3
spid = self.f.read_ui16(1)
if spid == 104:
sz = self.f.read_ui16(1)
dtime = self.f.read_f64(1)
start_string = self.f.reads(6)
_ = self.f.reads(1)
if start_string != '$GPGGA':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
gga_time = str(self.f.reads(9))
time = tmlib.timedelta(hours=int(gga_time[0:2]),
minutes=int(gga_time[2:4]),
seconds=int(gga_time[4:6]),
milliseconds=int(gga_time[7:])*100)
clock = self.ensemble.rtc[:, :]
if clock[0, 0] < 100:
clock[0, :] += century
ens.time_gps[k] = tmlib.date2epoch(tmlib.datetime(
*clock[:3, 0]) + time)[0]
self.f.seek(1, 1)
ens.latitude_gps[k] = self.f.read_f64(1)
tcNS = self.f.reads(1)
if tcNS == 'S':
ens.latitude_gps[k] *= -1
elif tcNS != 'N':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
ens.longitude_gps[k] = self.f.read_f64(1)
tcEW = self.f.reads(1)
if tcEW == 'W':
ens.longitude_gps[k] *= -1
elif tcEW != 'E':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
ucqual, n_sat = self.f.read_ui8(2)
tmp = self.f.read_float(2)
ens.hdop, ens.altitude = tmp
if self.f.reads(1) != 'M':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
ggeoid_sep = self.f.read_float(1)
if self.f.reads(1) != 'M':
if self._debug_level > 1:
warnings.warn(f'Invalid GPGGA string found in ensemble {k},'
' skipping...')
return 'FAIL'
gage = self.f.read_float(1)
gstation_id = self.f.read_ui16(1)
# 4 unknown bytes (2 reserved+2 checksum?)
# 78 bytes for GPGGA string (including \r\n)
# 2 reserved + 2 checksum
self.vars_read += ['longitude_gps', 'latitude_gps', 'time_gps']
self._nbyte = self.f.tell() - startpos + 2
if self._debug_level >= 5:
print('')
print(sz, ens.longitude_gps[k])
def read_winriver(self, nbt):
self._winrivprob = True
self.cfg['sourceprog'] = 'WINRIVER'
if self._source not in [2, 3]:
if self._debug_level >= 1:
warnings.warn('\n ***** Apparently a WINRIVER file - '
'Raw NMEA data handler not yet implemented\n\n')
self._source = 2
startpos = self.f.tell()
sz = self.f.read_ui16(1)
tmp = self.f.reads(sz)
self._nbyte = self.f.tell() - startpos + 2
def skip_Ncol(self, n_skip=1):
self.f.seek(n_skip * self.cfg['n_cells'], 1)
self._nbyte = 2 + n_skip * self.cfg['n_cells']
def skip_Nbyte(self, n_skip):
self.f.seek(n_skip, 1)
self._nbyte = self._nbyte = 2 + n_skip
def read_nocode(self, id):
# Skipping bytes from codes 0340-30FC, commented if needed
# hxid = hex(id)
# if hxid[2:4] == '30':
# raise Exception("")
# # I want to count the number of 1s in the middle 4 bits
# # of the 2nd two bytes.
# # 60 is a 0b00111100 mask
# nflds = (bin(int(hxid[3]) & 60).count('1') +
# bin(int(hxid[4]) & 60).count('1'))
# # I want to count the number of 1s in the highest
# # 2 bits of byte 3
# # 3 is a 0b00000011 mask:
# dfac = bin(int(hxid[3], 0) & 3).count('1')
# self.skip_Nbyte(12 * nflds * dfac)
# else:
print(' Unrecognized ID code: %0.4X\n' % id)
def remove_end(self, iens):
dat = self.outd
print(' Encountered end of file. Cleaning up data.')
for nm in self.vars_read:
_setd(dat, nm, _get(dat, nm)[..., :iens])
def finalize(self, ):
"""Remove the attributes from the data that were never loaded.
"""
dat = self.outd
for nm in set(data_defs.keys()) - self.vars_read:
_pop(dat, nm)
for nm in self.cfg:
dat['attrs'][nm] = self.cfg[nm]
dat['attrs']['fs'] = (dat['attrs']['sec_between_ping_groups'] *
dat['attrs']['pings_per_ensemble']) ** (-1)
for nm in data_defs:
shp = data_defs[nm][0]
if len(shp) and shp[0] == 'nc' and _in_group(dat, nm):
_setd(dat, nm, np.swapaxes(_get(dat, nm), 0, 1))
def __exit__(self, type, value, traceback):
self.f.close()
def __enter__(self,):
return self
| fd = self.f
hdr = self.hdr
hdr['nbyte'] = fd.read_i16(1)
if self._debug_level > 2:
print(fd.tell())
fd.seek(1, 1)
ndat = fd.read_i8(1)
hdr['dat_offsets'] = fd.read_i16(ndat)
self._nbyte = 4 + ndat * 2 |
api_op_CreateDefaultVpc.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package ec2
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a default VPC with a size /16 IPv4 CIDR block and a default subnet in
// each Availability Zone. For more information about the components of a default
// VPC, see Default VPC and default subnets
// (https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html) in the
// Amazon Virtual Private Cloud User Guide. You cannot specify the components of
// the default VPC yourself. If you deleted your previous default VPC, you can
// create a default VPC. You cannot have more than one default VPC per Region. If
// your account supports EC2-Classic, you cannot use this action to create a
// default VPC in a Region that supports EC2-Classic. If you want a default VPC in
// a Region that supports EC2-Classic, see "I really want a default VPC for my
// existing EC2 account. Is that possible?" in the Default VPCs FAQ
// (http://aws.amazon.com/vpc/faqs/#Default_VPCs).
func (c *Client) CreateDefaultVpc(ctx context.Context, params *CreateDefaultVpcInput, optFns ...func(*Options)) (*CreateDefaultVpcOutput, error) {
if params == nil {
params = &CreateDefaultVpcInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateDefaultVpc", params, optFns, c.addOperationCreateDefaultVpcMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateDefaultVpcOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateDefaultVpcInput struct {
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have the
// required permissions, the error response is DryRunOperation. Otherwise, it is
// UnauthorizedOperation.
DryRun *bool
noSmithyDocumentSerde
}
type CreateDefaultVpcOutput struct {
// Information about the VPC.
Vpc *types.Vpc
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateDefaultVpcMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpCreateDefaultVpc{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateDefaultVpc{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err | }
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDefaultVpc(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opCreateDefaultVpc(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "CreateDefaultVpc",
}
} | |
baas_sub_account_vo.py | # coding: utf-8
"""
Hydrogen Integration API
The Hydrogen Integration API # noqa: E501
OpenAPI spec version: 1.2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BaasSubAccountVO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_number': 'str',
'account_status': 'str',
'message': 'str',
'nucleus_portfolio_id': 'str',
'status': 'str',
'vendor_name': 'object',
'vendor_response': 'object'
}
attribute_map = {
'account_number': 'account_number',
'account_status': 'account_status',
'message': 'message',
'nucleus_portfolio_id': 'nucleus_portfolio_id',
'status': 'status',
'vendor_name': 'vendor_name',
'vendor_response': 'vendor_response'
}
def __init__(self, account_number=None, account_status=None, message=None, nucleus_portfolio_id=None, status=None, vendor_name=None, vendor_response=None): # noqa: E501
"""BaasSubAccountVO - a model defined in Swagger""" # noqa: E501
self._account_number = None
self._account_status = None
self._message = None
self._nucleus_portfolio_id = None
self._status = None
self._vendor_name = None
self._vendor_response = None
self.discriminator = None
if account_number is not None:
self.account_number = account_number
if account_status is not None:
self.account_status = account_status
if message is not None:
self.message = message
if nucleus_portfolio_id is not None:
self.nucleus_portfolio_id = nucleus_portfolio_id
if status is not None:
self.status = status
if vendor_name is not None:
self.vendor_name = vendor_name
if vendor_response is not None:
self.vendor_response = vendor_response
@property
def account_number(self):
"""Gets the account_number of this BaasSubAccountVO. # noqa: E501
:return: The account_number of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._account_number
@account_number.setter
def account_number(self, account_number):
"""Sets the account_number of this BaasSubAccountVO.
:param account_number: The account_number of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._account_number = account_number
@property
def account_status(self):
"""Gets the account_status of this BaasSubAccountVO. # noqa: E501
:return: The account_status of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._account_status
@account_status.setter
def account_status(self, account_status):
"""Sets the account_status of this BaasSubAccountVO.
:param account_status: The account_status of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._account_status = account_status
@property
def | (self):
"""Gets the message of this BaasSubAccountVO. # noqa: E501
:return: The message of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this BaasSubAccountVO.
:param message: The message of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._message = message
@property
def nucleus_portfolio_id(self):
"""Gets the nucleus_portfolio_id of this BaasSubAccountVO. # noqa: E501
:return: The nucleus_portfolio_id of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._nucleus_portfolio_id
@nucleus_portfolio_id.setter
def nucleus_portfolio_id(self, nucleus_portfolio_id):
"""Sets the nucleus_portfolio_id of this BaasSubAccountVO.
:param nucleus_portfolio_id: The nucleus_portfolio_id of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._nucleus_portfolio_id = nucleus_portfolio_id
@property
def status(self):
"""Gets the status of this BaasSubAccountVO. # noqa: E501
:return: The status of this BaasSubAccountVO. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BaasSubAccountVO.
:param status: The status of this BaasSubAccountVO. # noqa: E501
:type: str
"""
self._status = status
@property
def vendor_name(self):
"""Gets the vendor_name of this BaasSubAccountVO. # noqa: E501
:return: The vendor_name of this BaasSubAccountVO. # noqa: E501
:rtype: object
"""
return self._vendor_name
@vendor_name.setter
def vendor_name(self, vendor_name):
"""Sets the vendor_name of this BaasSubAccountVO.
:param vendor_name: The vendor_name of this BaasSubAccountVO. # noqa: E501
:type: object
"""
self._vendor_name = vendor_name
@property
def vendor_response(self):
"""Gets the vendor_response of this BaasSubAccountVO. # noqa: E501
:return: The vendor_response of this BaasSubAccountVO. # noqa: E501
:rtype: object
"""
return self._vendor_response
@vendor_response.setter
def vendor_response(self, vendor_response):
"""Sets the vendor_response of this BaasSubAccountVO.
:param vendor_response: The vendor_response of this BaasSubAccountVO. # noqa: E501
:type: object
"""
self._vendor_response = vendor_response
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BaasSubAccountVO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BaasSubAccountVO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| message |
sign.go | /*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
kmsv1alpha1 "kubeform.dev/provider-oci-api/apis/kms/v1alpha1"
versioned "kubeform.dev/provider-oci-api/client/clientset/versioned"
internalinterfaces "kubeform.dev/provider-oci-api/client/informers/externalversions/internalinterfaces"
v1alpha1 "kubeform.dev/provider-oci-api/client/listers/kms/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// SignInformer provides access to a shared informer and lister for
// Signs.
type SignInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.SignLister
}
type signInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewSignInformer constructs a new informer for Sign type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewSignInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredSignInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredSignInformer constructs a new informer for Sign type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredSignInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KmsV1alpha1().Signs(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KmsV1alpha1().Signs(namespace).Watch(context.TODO(), options)
},
},
&kmsv1alpha1.Sign{},
resyncPeriod,
indexers,
)
}
func (f *signInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredSignInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *signInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&kmsv1alpha1.Sign{}, f.defaultInformer)
}
func (f *signInformer) Lister() v1alpha1.SignLister { | return v1alpha1.NewSignLister(f.Informer().GetIndexer())
} |
|
loop-labeled-break-value.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() | {
'outer: loop {
let _: i32 = loop { break 'outer };
}
'outer: loop {
let _: i32 = loop { loop { break 'outer } };
}
} |
|
date.js | // cached date formatting instance.
// See https://github.com/hypothesis/h/issues/2820#issuecomment-166285361
let formatter;
/**
* Returns a standard human-readable representation
* of a date and time.
*/
export function format(date) {
if (typeof Intl !== 'undefined' && Intl.DateTimeFormat) {
if (!formatter) {
formatter = new Intl.DateTimeFormat(undefined, {
year: 'numeric',
month: 'short', | minute: '2-digit',
});
}
return formatter.format(date);
} else {
// IE < 11, Safari <= 9.0.
// In English, this generates the string most similar to
// the toLocaleDateString() result above.
return date.toDateString() + ' ' + date.toLocaleTimeString();
}
} | day: '2-digit',
weekday: 'long',
hour: '2-digit', |
adaptation_model_stage1.py | from models.base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import torch
import numpy as np
import itertools
from torch.autograd import Variable
from optimizers import get_optimizer
from schedulers import get_scheduler
from models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback
from models.deeplab_multimodal import DeepLab
from models.decoder import Decoder
from models.aspp import ASPP
from models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class
from loss import get_loss_function
from .utils import freeze_bn, GradReverse, normalisation_pooling
from metrics import runningScore
import pdb
def multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):
"""
[Func Handler] multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"feat_cls": feat_cls,
"output": output,
}
@Reture:
merge_out: dict.
examples: {
"feat_cls": feat_cls,
"output_comb": output_comb,
"output": output,
}
"""
feat_cls = multi_modal_data['feat_cls']
# merge class features
feat_cls_cat = torch.cat(feat_cls, 1) # concat
# merge output pred
output = multi_modal_data['output']
output_comb = 0
for _i in range(len(output)):
if is_upsample:
output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)
output_comb += output[_i]
merge_out = {
'feat_cls': feat_cls,
'feat_cls_cat': feat_cls_cat,
'output_comb': output_comb,
'output': output,
}
return merge_out
class CustomMetricsMultimodalMerger():
"""
[Func Handler] objective_vectors_multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"class_threshold_group": [model.class_threshold_group[modal_idx][i], ...]
"objective_vectors_group": [model.objective_vectors_group[modal_idx][i], ...],
}
cate_idx: int. 0 ~ 18
modal_ids: list.
examples: [0, 1] or [0,]
@Reture:
merge_out: dict.
examples: {
"class_threshold": class_threshold,
"objective_vectors": objective_vectors,
}
"""
def __init__(self, modal_num, category_num, model):
self.modal_num = modal_num
self.category_num = category_num
self._model = model
def initialize_model(model):
self._model = model
def merge_class_threshold(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_class_threshold_group = self._model.class_threshold_group[modal_ids]
return torch.mean(_class_threshold_group, dim=0) # modal_num x 19 --> 19
def merge_clu_threshold(self, clu_threshold, modal_ids=[]):
_clu_threshold_group = clu_threshold[modal_ids]
return torch.mean(_clu_threshold_group, dim=0)
def merge_objective_vectors(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()
_objective_vectors = self._model.objective_vectors_group[modal_ids]
# modal_num x 19 x 256 --> 19 x modal_num x 256 --> 19 x (modal_num x 256)
assert _objective_vectors.dim() == 4, "objective_vector dimension != 4"
_objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()
return _objective_vectors.view(_cate_num, -1)
class CustomMetrics():
def __init__(self, numbers=19, modal_num=3, model=None):
self.class_numbers = numbers
self.classes_recall_thr = np.zeros([19, 3])
self.classes_recall_thr_num = np.zeros([19])
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_clu_num = np.zeros([19])
self.running_metrics_val_threshold = runningScore(self.class_numbers)
self.running_metrics_val_clusters = runningScore(self.class_numbers)
self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()
self.multimodal_merger = CustomMetricsMultimodalMerger(
modal_num=modal_num + 1, category_num=numbers, model=model
)
def update(self, feat_cls, outputs, labels, modal_ids=[0,]):
'''calculate accuracy. caring about recall but not IoU'''
batch, width, height = labels.shape
labels = labels.reshape([batch, 1, width, height]).float()
labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')
outputs_threshold = outputs.clone()
outputs_threshold = F.softmax(outputs_threshold, dim=1)
#self.running_metrics_val_threshold.update(labels.cpu().numpy(), outputs_threshold.argmax(1).cpu().numpy())
self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))
_class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)
for i in range(19):
outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())
_batch, _channel, _w, _h = outputs_threshold.shape
_tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()
_tmp = torch.cat((outputs_threshold, _tmp), 1)
threshold_arg = _tmp.argmax(1, keepdim=True)
threshold_arg[threshold_arg == 19] = 250 #ignore index
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())
self.classes_recall_thr[:, 0] += truth
self.classes_recall_thr[:, 2] += pred_all
self.classes_recall_thr[:, 1] += truth_all
outputs_cluster = outputs.clone()
_objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(19):
outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)
outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)
outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)
if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:
raise NotImplementedError('wrong when computing L2 norm!!')
outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)
#self.running_metrics_val_clusters.update(labels.cpu().numpy(), outputs_cluster_arg.cpu().numpy())
self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)
tmp_arg = outputs_cluster_arg.clone()
pdb.set_trace()
_clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)
outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())
self.classes_recall_clu[:, 0] += truth
self.classes_recall_clu[:, 2] += pred_all
self.classes_recall_clu[:, 1] += truth_all
return threshold_arg, outputs_cluster_arg
def calc_recall(self, gt, argmax):
truth = np.zeros([self.class_numbers])
pred_all = np.zeros([self.class_numbers])
truth_all = np.zeros([self.class_numbers])
for i in range(self.class_numbers):
truth[i] = (gt == i)[argmax == i].sum()
pred_all[i] = (argmax == i).sum()
truth_all[i] = (gt == i).sum()
pass
return truth, pred_all, truth_all
def calc_mean_Clu_recall(self, ):
return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])
def calc_mean_Thr_recall(self, ):
return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])
def reset(self, ):
self.running_metrics_val_clusters.reset()
self.running_metrics_val_threshold.reset()
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_thr = np.zeros([19, 3])
class CustomModel():
def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):
self.cfg = cfg
self.writer = writer
self.class_numbers = 19
self.logger = logger
cfg_model = cfg['model']
self.cfg_model = cfg_model
self.best_iou = -100
self.iter = 0
self.nets = []
self.split_gpu = 0
self.default_gpu = cfg['model']['default_gpu']
self.PredNet_Dir = None
self.valid_classes = cfg['training']['valid_classes']
self.G_train = True
self.cls_feature_weight = cfg['training']['cls_feature_weight']
self.use_pseudo_label = use_pseudo_label
self.modal_num = modal_num
# cluster vectors & cuda initialization
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()
#self.metrics = CustomMetrics(self.class_numbers)
self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)
bn = cfg_model['bn']
if bn == 'sync_bn':
BatchNorm = SynchronizedBatchNorm2d
elif bn == 'bn':
BatchNorm = nn.BatchNorm2d
elif bn == 'gn':
BatchNorm = nn.GroupNorm
else:
raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))
if use_pseudo_label:
self.PredNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
).cuda()
self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)
self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True)
self.PredNet.eval()
self.PredNet_num = 0
self.BaseNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
)
logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))
self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)
self.nets.extend([self.BaseNet])
self.nets_DP = [self.BaseNet_DP]
# Discriminator
self.SOURCE_LABEL = 0
self.TARGET_LABEL = 1
self.DNets = []
self.DNets_DP = []
for _ in range(self.modal_num+1):
_net_d = FCDiscriminator(inplanes=19)
self.DNets.append(_net_d)
_net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)
self.DNets_DP.append(_net_d_DP)
self.nets.extend(self.DNets)
self.nets_DP.extend(self.DNets_DP)
self.optimizers = []
self.schedulers = []
optimizer_cls = torch.optim.SGD
optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items()
if k != 'name'}
optimizer_cls_D = torch.optim.Adam
optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items()
if k != 'name'}
if self.use_pseudo_label:
self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)
else:
self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)
self.optimizers.extend([self.BaseOpti])
self.DiscOptis = []
for _d_net in self.DNets:
self.DiscOptis.append(
optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)
)
self.optimizers.extend(self.DiscOptis)
self.schedulers = []
if self.use_pseudo_label:
self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self.BaseSchedule])
else:
"""BaseSchedule detail see FUNC: scheduler_step()"""
self.learning_rate = cfg['training']['optimizer']['lr']
self.gamma = cfg['training']['lr_schedule']['gamma']
self.num_steps = cfg['training']['lr_schedule']['max_iter']
self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self._BaseSchedule_nouse])
self.DiscSchedules = []
for _disc_opt in self.DiscOptis:
self.DiscSchedules.append(
get_scheduler(_disc_opt, cfg['training']['lr_schedule'])
)
self.schedulers.extend(self.DiscSchedules)
self.setup(cfg, writer, logger)
self.adv_source_label = 0
self.adv_target_label = 1
self.bceloss = nn.BCEWithLogitsLoss(reduce=False)
self.loss_fn = get_loss_function(cfg)
self.mseloss = nn.MSELoss()
self.l1loss = nn.L1Loss()
self.smoothloss = nn.SmoothL1Loss()
self.triplet_loss = nn.TripletMarginLoss()
def create_PredNet(self,):
ss = DeepLab(
num_classes=19,
backbone=self.cfg_model['basenet']['version'],
output_stride=16,
bn=self.cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num,
).cuda()
ss.eval()
return ss
def setup(self, cfg, writer, logger):
'''
set optimizer and load pretrained model
'''
for net in self.nets:
# name = net.__class__.__name__
self.init_weights(cfg['model']['init'], logger, net)
print("Initializition completed")
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print("loading pretrained model for {}".format(net.__class__.__name__))
net._load_pretrained_model()
'''load pretrained model
'''
if cfg['training']['resume_flag']:
self.load_nets(cfg, writer, logger)
pass
def lr_poly(self):
return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))
def adjust_basenet_learning_rate(self):
lr = self.lr_poly()
self.BaseOpti.param_groups[0]['lr'] = lr
if len(self.BaseOpti.param_groups) > 1:
self.BaseOpti.param_groups[1]['lr'] = lr * 10
def forward(self, input):
feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)
return feat, feat_low, feat_cls, output
def forward_Up(self, input):
feat, feat_low, feat_cls, outputs = self.forward(input)
output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)
return feat, feat_low, feat_cls, output
def PredNet_Forward(self, input):
with torch.no_grad():
_, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)
return _, _, feat_cls, output_result
def calculate_mean_vector(self, feat_cls, outputs, labels, ):
outputs_softmax = F.softmax(outputs, dim=1)
outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)
outputs_argmax = self.process_label(outputs_argmax.float())
labels_expanded = self.process_label(labels)
outputs_pred = labels_expanded * outputs_argmax
scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)
vectors = []
ids = []
for n in range(feat_cls.size()[0]):
for t in range(self.class_numbers):
if scale_factor[n][t].item()==0:
continue
if (outputs_pred[n][t] > 0).sum() < 10:
continue
s = feat_cls[n] * outputs_pred[n][t]
scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2
s = normalisation_pooling()(s, scale)
s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]
vectors.append(s)
ids.append(t)
return vectors, ids
def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):
assert len(source_modal_ids) == source_x.size(0), "modal_ids' batchsize != source_x's batchsize"
_, _, source_feat_cls, source_output = self.forward(input=source_x)
"""source_output: [B x 19 x W x H, ...]
select modal-branch output in each batchsize
Specific-modal output
"""
source_output_modal_k = torch.stack(
[
source_output[_modal_i][_batch_i]
for _batch_i, _modal_i in enumerate(source_modal_ids)
],
dim=0,
)
# attention output & specific-modal output
source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)
source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)
source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)
loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)
#self.PredNet.eval()
# adversarial loss
# -----------------------------
"""Generator (segmentation)"""
# -----------------------------
# On Source Domain
loss_adv = torch.Tensor([0]).cuda()
_batch_size = 0
_, _, _, target_output = self.forward(target_x)
target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()
for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):
# set grad false
self.set_requires_grad(self.logger, _d_net, requires_grad = False)
# true/false discriminator
t_D_out = _d_net_DP(F.softmax(t_out))
#source_modal_ids
loss_temp = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_adv += torch.mean(loss_temp)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_adv += 0.0
else:
loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))
_batch_size += t_out.size(0)
#loss_adv /= _batch_size
loss_adv *= self.cfg['training']['loss_adv_lambda']
loss_G = torch.Tensor([0]).cuda()
loss_G = loss_G + loss_GTA + loss_adv
self.BaseOpti.zero_grad()
if loss_G.item() != 0:
loss_G.backward()
self.BaseOpti.step()
# -----------------------------
"""Discriminator """
# -----------------------------
_batch_size = 0
loss_D_comb = torch.Tensor([0]).cuda()
source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()
for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = True)
_batch_size = 0
loss_D = torch.Tensor([0]).cuda()
# source domain
s_D_out = _d_net_DP(F.softmax(s_out.detach()))
loss_temp_s = torch.mean(self.bceloss(
s_D_out,
torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_s)
elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))
# target domain
_batch_size += (s_out.size(0) + t_out.size(0))
t_D_out = _d_net_DP(F.softmax(t_out.detach()))
loss_temp_t = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_t)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))
loss_D *= self.cfg['training']['loss_adv_lambda']*0.5
loss_D_comb += loss_D
_disc_opt.zero_grad()
if loss_D_comb.item() != 0:
loss_D_comb.backward()
_disc_opt.step()
return loss_GTA, loss_adv, loss_D_comb
def process_label(self, label):
batch, channel, w, h = label.size()
pred1 = torch.zeros(batch, 20, w, h).cuda()
id = torch.where(label < 19, label, torch.Tensor([19]).cuda())
pred1 = pred1.scatter_(1, id.long(), 1)
return pred1
def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):
#loss = torch.Tensor([0]).cuda(self.default_gpu)
loss = torch.Tensor([0]).cuda()
"""construct category objective vectors"""
# objective_vectors_group 2 x 19 x 256 --> 19 x 512
_objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(len(ids)):
if ids[i] not in self.valid_classes:
continue
new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])
while (new_loss.item() > 5):
new_loss = new_loss / 10
loss = loss + new_loss
loss = loss / len(ids) * 10
return loss
def freeze_bn_apply(self):
for net in self.nets:
net.apply(freeze_bn)
for net in self.nets_DP:
net.apply(freeze_bn)
def scheduler_step(self):
if self.use_pseudo_label:
for scheduler in self.schedulers:
scheduler.step()
else:
"""skipped _BaseScheduler_nouse"""
for scheduler in self.schedulers[1:]:
scheduler.step()
# baseNet scheduler
self.adjust_basenet_learning_rate()
def optimizer_zerograd(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
def optimizer_step(self):
for opt in self.optimizers:
opt.step()
def init_device(self, net, gpu_id=None, whether_DP=False):
gpu_id = gpu_id or self.default_gpu
device = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() else 'cpu')
net = net.to(device)
# if torch.cuda.is_available():
if whether_DP:
net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))
return net
def eval(self, net=None, logger=None):
"""Make specific models eval mode during test time"""
if net == None:
for net in self.nets:
net.eval()
for net in self.nets_DP:
net.eval()
if logger!=None:
logger.info("Successfully set the model eval mode")
else:
net.eval()
if logger!=None:
logger("Successfully set {} eval mode".format(net.__class__.__name__))
return
def train(self, net=None, logger=None):
if net==None:
for net in self.nets:
net.train()
for net in self.nets_DP:
net.train()
else:
net.train()
return
def set_requires_grad(self, logger, net, requires_grad = False):
"""Set requires_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
net (BaseModel) -- the network which will be operated on
requires_grad (bool) -- whether the networks require gradients or not
"""
for parameter in net.parameters():
parameter.requires_grad = requires_grad
def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False):
''' set specific type of layers whether needing grad
'''
# print('Warning: all the BatchNorm params are fixed!')
# logger.info('Warning: all the BatchNorm params are fixed!')
for net in self.nets:
for _i in net.modules():
if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:
_i.weight.requires_grad = requires_grad
return
def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
init_type = cfg.get('init_type', init_type)
init_gain = cfg.get('init_gain', init_gain)
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \
or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_() # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func) # apply the initialization function <init_func>
pass
def adaptive_load_nets(self, net, model_weight):
model_dict = net.state_dict()
pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}
# print("[INFO] Pretrained dict:", pretrained_dict.keys())
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
def load_nets(self, cfg, writer, logger): # load pretrained weights on the net
if os.path.isfile(cfg['training']['resume']):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume'])
)
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
net_state_no = {}
for net in self.nets:
name = net.__class__.__name__
if name not in net_state_no:
net_state_no[name] = 0
else:
net_state_no[name] += 1
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]]["model_state"])
else:
print("*****************************************")
print("[WARNING] Using depreciated load version! Model {}".format(name))
print("*****************************************")
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
if cfg['training']['optimizer_resume']:
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]]["scheduler_state"])
else:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]["scheduler_state"])
self.iter = checkpoint["iter"]
#self.best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg['training']['resume'], checkpoint["iter"]
)
)
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
def | (self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net
dir = dir or cfg['training']['Pred_resume']
best_iou = 0
if os.path.isfile(dir):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(dir)
)
checkpoint = torch.load(dir)
name = net.__class__.__name__
if checkpoint.get(name) == None:
return
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
return
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][0]["model_state"])
else:
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
iter = checkpoint["iter"]
best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet".format(
dir, checkpoint["iter"], best_iou
)
)
else:
raise Exception("No checkpoint found at '{}'".format(dir))
if hasattr(net, 'best_iou'):
#net.best_iou = best_iou
pass
return best_iou
def set_optimizer(self, optimizer): #set optimizer to all nets
pass
def reset_objective_SingleVector(self,):
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):
#vector = vector.squeeze().detach()
if torch.sum(vectors) == 0:
return
if name == 'moving_average':
self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
elif name == 'mean':
self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
else:
raise NotImplementedError('no such updating way of objective vectors {}'.format(name))
def grad_reverse(x):
return GradReverse()(x)
| load_PredNet |
net-listeners-callout.py | #!/usr/bin/env python3
""" Output a colorized list of listening addresses with owners.
This tool parses the output of ``netstat`` directly to obtain the list
of IPv4 and IPv6 addresses listening on tcp, tcp6, udp, and udp6 ports
also with pids of processes responsible for the listening.
The downside here is to obtain the full command name (netstat truncates
at 20 characters), we need to call ``ps`` again for each pid we have,
which is even more external commands.
Must be run as root due to netstat needing root for pid to socket mappings.
See ``net-listeners-proc.py`` for a much faster implementation because it
parses /proc directly and doesn't need to call out to external proceses."""
import subprocess
import re
NETSTAT_LISTENING = "/bin/netstat --numeric-hosts --listening --program --tcp --udp --inet --inet6"
TERMINAL_WIDTH = "/usr/bin/tput cols" # could also be "stty size"
class Color:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
COLOR_HEADER = Color.HEADER
COLOR_OKAY = Color.OKBLUE
COLOR_WARNING = Color.FAIL
COLOR_END = Color.END
# This should capture:
# 127.0.0.0/8
# 192.168.0.0/16
# 10.0.0.0/8
# 169.254.0.0/16
# 172.16.0.0/12
# ::1
# fe80::/10
# fc00::/7
# fd00::/8
NON_ROUTABLE_REGEX = r"""^((127\.) |
(192\.168\.) |
(10\.) |
(169\.254\.) |
(172\.1[6-9]\.) |
(172\.2[0-9]\.) |
(172\.3[0-1]\.) |
(::1) |
([fF][eE]80)
([fF][cCdD]))"""
likelyLocalOnly = re.compile(NON_ROUTABLE_REGEX, re.VERBOSE)
def run(thing):
""" Run any string as an async command invocation. """
# We don't use subprocess.check_output because we want to run all
# processes async
return subprocess.Popen(thing.split(), stdout=subprocess.PIPE)
def | (ranCommand):
""" Return array of rows split by newline from previous invocation. """
stdout, stderr = ranCommand.communicate()
return stdout.decode('utf-8').strip().splitlines()
def checkListenersSystemTools():
# We intentionally don't check the output of these until after they
# all run so they'll likely run in parallel without blocking.
listening = run(NETSTAT_LISTENING)
terminalWidth = run(TERMINAL_WIDTH)
listening = readOutput(listening)
try:
cols = readOutput(terminalWidth)[0]
cols = int(cols)
except BaseException:
cols = 80
# Remove first two header lines
listening = listening[2:]
# This is slightly ugly, but 'udp' has one column missing in the
# middle so our pid indices don't line up.
grandResult = []
for line in listening:
parts = line.split()
# "udp" rows have one less column in the middle, so
# our pid offset is lower than "tcp" rows:
if parts[0].startswith("udp"):
pid = parts[5].split('/')[0]
else:
pid = parts[6].split('/')[0]
proto = parts[0]
addr = parts[3]
grandResult.append([int(pid), addr, proto])
# Build map of pids to names...
# This dict is pid -> completedCommand
processes = {}
for row in grandResult:
pid = row[0]
# Don't do redundant work.
# We don't expect pid names to change across calls.
if pid not in processes:
processes[pid] = run(f"/bin/ps -p {pid} -o command=")
# Now generate the result dict of pid -> pidName
processName = {}
for pid in processes:
processName[pid] = readOutput(processes[pid])[0]
# Print our own custom output header...
proto = "Proto"
addr = "Listening"
pid = "PID"
process = "Process"
print(f"{COLOR_HEADER}{proto:^5} {addr:^25} {pid:>5} {process:^30}")
# Sort results by pid...
for row in sorted(grandResult, key=lambda x: x[0]):
pid = row[0]
addr = row[1]
proto = row[2]
process = processName[pid]
# If IP address looks like it could be visible to the world,
# throw up a color.
# Note: due to port forwarding and NAT and other issues,
# this clearly isn't exhaustive.
if re.match(likelyLocalOnly, addr):
colorNotice = COLOR_OKAY
else:
colorNotice = COLOR_WARNING
output = f"{colorNotice}{proto:5} {addr:25} {pid:5} {process}"
# Be a polite terminal citizen by limiting our width to user's width
# (colors take up non-visible space, so add it to our col count)
print(output[:cols + len(colorNotice)])
print(COLOR_END)
if __name__ == "__main__":
checkListenersSystemTools()
| readOutput |
queries.py | from click import argument
from flask.cli import AppGroup
from sqlalchemy.orm.exc import NoResultFound
manager = AppGroup(help="Queries management commands.")
@manager.command()
@argument("query_id")
@argument("tag")
def add_tag(query_id, tag):
|
@manager.command()
@argument("query_id")
@argument("tag")
def remove_tag(query_id, tag):
from redash import models
query_id = int(query_id)
try:
q = models.Query.get_by_id(query_id)
except NoResultFound:
print("Query not found.")
exit(1)
tags = q.tags
if tags is None:
print("Tag is empty.")
exit(1)
try:
tags.remove(tag)
except ValueError:
print("Tag not found.")
exit(1)
q.tags = list(set(tags))
models.db.session.add(q)
models.db.session.commit()
print("Tag removed.")
| from redash import models
query_id = int(query_id)
try:
q = models.Query.get_by_id(query_id)
except NoResultFound:
print("Query not found.")
exit(1)
tags = q.tags
if tags is None:
tags = []
tags.append(tag)
q.tags = list(set(tags))
models.db.session.add(q)
models.db.session.commit()
print("Tag added.") |
lib.rs | #![warn(clippy::all)]
extern crate curie;
extern crate fastobo;
extern crate horned_owl;
#[macro_use]
extern crate lazy_static;
extern crate ureq;
pub mod constants;
mod into_owl;
mod imports;
mod utils;
pub use into_owl::IntoOwl;
pub use imports::ImportData;
// ---------------------------------------------------------------------------
/// Create a [`curie::PrefixMapping`] instance with default prefixes declared.
///
/// The OBO Format 1.4 reference states that any OBO document translated into
/// OWL has the following prefixes declared implicitly: `xsd`, `owl`,
/// `oboInOwl`, `xml`, `rdf`, `dc` and `rdfs`.
///
/// [`curie::PrefixMapping`]: https://docs.rs/curie/0.0.8/curie/struct.PrefixMapping.html
pub fn | () -> curie::PrefixMapping {
let mut prefixes = curie::PrefixMapping::default();
prefixes.add_prefix("xsd", constants::uri::XSD).unwrap();
prefixes.add_prefix("owl", constants::uri::OWL).unwrap();
prefixes.add_prefix("obo", constants::uri::OBO).unwrap();
prefixes.add_prefix("oboInOwl", constants::uri::OBO_IN_OWL).unwrap();
prefixes.add_prefix("xml", constants::uri::XML).unwrap();
prefixes.add_prefix("rdf", constants::uri::RDF).unwrap();
prefixes.add_prefix("dc", constants::uri::DC).unwrap();
prefixes.add_prefix("rdfs", constants::uri::RDFS).unwrap();
prefixes
}
| obo_prefixes |
user-options.component.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { Hero } from '../hero';
import { HeroService } from '../hero.service';
@Component({
selector: 'user-options',
templateUrl: 'app/user-options/user-options.component.html',
styleUrls: ['app/user-options/user-options.component.css']
})
export class | implements OnInit {
constructor(
private router: Router,
private heroService: HeroService) {
}
ngOnInit() {
}
} | UserOptionsComponent |
features_test.go | package engine
import (
"testing"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/clockwork"
"github.com/stretchr/testify/require"
)
func TestFeatureFlagSet(t *testing.T) | {
tc := SetupEngineTest(t, "features")
defer tc.Cleanup()
fakeClock := clockwork.NewFakeClockAt(time.Now())
tc.G.SetClock(fakeClock)
m := NewMetaContextForTest(tc)
CreateAndSignupFakeUserPaper(tc, "feat")
on, err := tc.G.FeatureFlags.EnabledWithError(m, libkb.FeatureFTL)
require.NoError(t, err)
require.True(t, on)
_, err = tc.G.API.Post(libkb.APIArg{
Endpoint: "test/feature",
SessionType: libkb.APISessionTypeREQUIRED,
Args: libkb.HTTPArgs{
"feature": libkb.S{Val: string(libkb.FeatureFTL)},
"value": libkb.I{Val: 0},
"cache_sec": libkb.I{Val: 100},
},
MetaContext: m,
})
require.NoError(t, err)
// Still on, since it's still cached.
on, err = tc.G.FeatureFlags.EnabledWithError(m, libkb.FeatureFTL)
require.NoError(t, err)
require.True(t, on)
fakeClock.Advance(time.Hour * 10)
for i := 0; i < 2; i++ {
on, err = tc.G.FeatureFlags.EnabledWithError(m, libkb.FeatureFTL)
require.NoError(t, err)
require.False(t, on)
}
} |
|
books.py | import requests
class Books(object):
BASE_URL = \
'https://www.googleapis.com/books/v1/volumes?' \
'q="{}"&projection={}&printType={}&langRestrict={}&maxResults={}'
MAX_RESULTS = 1
PRINT_TYPE = 'books'
PROJECTION = 'full'
LANGUAGE = 'en'
# SEARCH_FIELDS = {
# "title": "intitle",
# "author": "inauthor",
# "publisher": "inpublisher",
# "subject": "subject",
# "isbn": "isbn",
# }
BOOK_FIELDS = [
'title',
'authors',
'categories',
'description',
'imageLinks'
]
def __init__(self):
pass
@staticmethod
def get_attribute(data, attribute, default_value):
return data.get(attribute) or default_value
def process_search(self, data):
book = {}
for field in self.BOOK_FIELDS:
book[field] = self.get_attribute(data, field, '')
if (field == 'authors') or (field == 'categories') and book[field] != '':
if len(book[field]) > 1:
book[field] = ', '.join(book[field])
else:
book[field] = book[field][0]
if field == 'imageLinks' and book[field] != '':
book[field] = self.get_attribute(book[field], 'thumbnail', '')
return book
def search(self, field, query):
| """
Search book on Google Books API
Parameters
----------
field
Search field
query
Value to be searched
Returns
-------
JSON
Search results in JSON format if successful, None o/w
"""
if field == 'search':
url = self.BASE_URL.format(query.replace(' ', '+'),
self.PROJECTION,
self.PRINT_TYPE,
self.LANGUAGE,
self.MAX_RESULTS)
else:
return None
try:
response = requests.get(url)
if response.status_code == 200:
response_json = response.json()
if response_json['totalItems'] != 0:
return self.process_search(response_json['items'][0]['volumeInfo'])
else:
return None
except requests.exceptions.RequestException as e:
print(e)
return None |
|
group.go | package models
import (
"errors"
"net/mail"
"time"
"github.com/jinzhu/gorm"
)
// Group contains the fields needed for a user -> group mapping
// Groups contain 1..* Targets
type Group struct {
Id int64 `json:"id"`
UserId int64 `json:"-"`
Name string `json:"name"`
ModifiedDate time.Time `json:"modified_date"`
Targets []Target `json:"targets" sql:"-"`
}
// GroupTarget is used for a many-to-many relationship between 1..* Groups and 1..* Targets
type GroupTarget struct {
GroupId int64 `json:"-"`
TargetId int64 `json:"-"`
}
// Target contains the fields needed for individual targets specified by the user
// Groups contain 1..* Targets, but 1 Target may belong to 1..* Groups
type Target struct {
Id int64 `json:"-"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Email string `json:"email"`
Position string `json:"position"`
}
// ErrNoEmailSpecified is thrown when no email is specified for the Target
var ErrEmailNotSpecified = errors.New("No email address specified")
// ErrGroupNameNotSpecified is thrown when a group name is not specified
var ErrGroupNameNotSpecified = errors.New("Group name not specified")
// ErrNoTargetsSpecified is thrown when no targets are specified by the user
var ErrNoTargetsSpecified = errors.New("No targets specified")
// Validate performs validation on a group given by the user
func (g *Group) Validate() error {
switch {
case g.Name == "":
return ErrGroupNameNotSpecified
case len(g.Targets) == 0:
return ErrNoTargetsSpecified
}
return nil
}
// GetGroups returns the groups owned by the given user.
func GetGroups(uid int64) ([]Group, error) {
gs := []Group{}
err := db.Where("user_id=?", uid).Find(&gs).Error
if err != nil {
Logger.Println(err)
return gs, err
}
for i, _ := range gs {
gs[i].Targets, err = GetTargets(gs[i].Id)
if err != nil {
Logger.Println(err)
}
}
return gs, nil
}
// GetGroup returns the group, if it exists, specified by the given id and user_id.
func GetGroup(id int64, uid int64) (Group, error) {
g := Group{}
err := db.Where("user_id=? and id=?", uid, id).Find(&g).Error
if err != nil {
Logger.Println(err)
return g, err
}
g.Targets, err = GetTargets(g.Id)
if err != nil {
Logger.Println(err)
}
return g, nil
}
// GetGroupByName returns the group, if it exists, specified by the given name and user_id.
func GetGroupByName(n string, uid int64) (Group, error) {
g := Group{}
err := db.Where("user_id=? and name=?", uid, n).Find(&g).Error
if err != nil {
Logger.Println(err)
return g, err
}
g.Targets, err = GetTargets(g.Id)
if err != nil |
return g, err
}
// PostGroup creates a new group in the database.
func PostGroup(g *Group) error {
if err := g.Validate(); err != nil {
return err
}
// Insert the group into the DB
err = db.Save(g).Error
if err != nil {
Logger.Println(err)
return err
}
for _, t := range g.Targets {
insertTargetIntoGroup(t, g.Id)
}
return nil
}
// PutGroup updates the given group if found in the database.
func PutGroup(g *Group) error {
if err := g.Validate(); err != nil {
return err
}
ts := []Target{}
ts, err = GetTargets(g.Id)
if err != nil {
Logger.Printf("Error getting targets from group ID: %d", g.Id)
return err
}
// Enumerate through, removing any entries that are no longer in the group
// For every target in the database
tExists := false
for _, t := range ts {
tExists = false
// Is the target still in the group?
for _, nt := range g.Targets {
if t.Email == nt.Email {
tExists = true
break
}
}
// If the target does not exist in the group any longer, we delete it
if !tExists {
err = db.Where("group_id=? and target_id=?", g.Id, t.Id).Delete(&GroupTarget{}).Error
if err != nil {
Logger.Printf("Error deleting email %s\n", t.Email)
}
}
}
// Insert any entries that are not in the database
// For every target in the new group
for _, nt := range g.Targets {
// Check and see if the target already exists in the db
tExists = false
for _, t := range ts {
if t.Email == nt.Email {
tExists = true
break
}
}
// If the target is not in the db, we add it
if !tExists {
insertTargetIntoGroup(nt, g.Id)
}
}
err = db.Save(g).Error
/*_, err = Conn.Update(g)*/
if err != nil {
Logger.Println(err)
return err
}
return nil
}
// DeleteGroup deletes a given group by group ID and user ID
func DeleteGroup(g *Group) error {
// Delete all the group_targets entries for this group
err := db.Where("group_id=?", g.Id).Delete(&GroupTarget{}).Error
if err != nil {
Logger.Println(err)
return err
}
// Delete the group itself
err = db.Delete(g).Error
if err != nil {
Logger.Println(err)
return err
}
return err
}
func insertTargetIntoGroup(t Target, gid int64) error {
if _, err = mail.ParseAddress(t.Email); err != nil {
Logger.Printf("Invalid email %s\n", t.Email)
return err
}
trans := db.Begin()
trans.Where(t).FirstOrCreate(&t)
if err != nil {
Logger.Printf("Error adding target: %s\n", t.Email)
return err
}
err = trans.Where("group_id=? and target_id=?", gid, t.Id).Find(&GroupTarget{}).Error
if err == gorm.ErrRecordNotFound {
err = trans.Save(&GroupTarget{GroupId: gid, TargetId: t.Id}).Error
if err != nil {
Logger.Println(err)
return err
}
}
if err != nil {
Logger.Printf("Error adding many-many mapping for %s\n", t.Email)
return err
}
err = trans.Commit().Error
if err != nil {
Logger.Printf("Error committing db changes\n")
return err
}
return nil
}
// GetTargets performs a many-to-many select to get all the Targets for a Group
func GetTargets(gid int64) ([]Target, error) {
ts := []Target{}
err := db.Table("targets").Select("targets.id, targets.email, targets.first_name, targets.last_name, targets.position").Joins("left join group_targets gt ON targets.id = gt.target_id").Where("gt.group_id=?", gid).Scan(&ts).Error
return ts, err
}
| {
Logger.Println(err)
} |
callbacks_test.go | package gorm_test
import (
"errors"
"reflect"
"testing"
"github.com/carloshlemos/gorm"
)
func (s *Product) BeforeCreate() (err error) {
if s.Code == "Invalid" {
err = errors.New("invalid product")
}
s.BeforeCreateCallTimes = s.BeforeCreateCallTimes + 1
return
}
func (s *Product) BeforeUpdate() (err error) {
if s.Code == "dont_update" {
err = errors.New("can't update")
} | func (s *Product) BeforeSave() (err error) {
if s.Code == "dont_save" {
err = errors.New("can't save")
}
s.BeforeSaveCallTimes = s.BeforeSaveCallTimes + 1
return
}
func (s *Product) AfterFind() {
s.AfterFindCallTimes = s.AfterFindCallTimes + 1
}
func (s *Product) AfterCreate(tx *gorm.DB) {
tx.Model(s).UpdateColumn(Product{AfterCreateCallTimes: s.AfterCreateCallTimes + 1})
}
func (s *Product) AfterUpdate() {
s.AfterUpdateCallTimes = s.AfterUpdateCallTimes + 1
}
func (s *Product) AfterSave() (err error) {
if s.Code == "after_save_error" {
err = errors.New("can't save")
}
s.AfterSaveCallTimes = s.AfterSaveCallTimes + 1
return
}
func (s *Product) BeforeDelete() (err error) {
if s.Code == "dont_delete" {
err = errors.New("can't delete")
}
s.BeforeDeleteCallTimes = s.BeforeDeleteCallTimes + 1
return
}
func (s *Product) AfterDelete() (err error) {
if s.Code == "after_delete_error" {
err = errors.New("can't delete")
}
s.AfterDeleteCallTimes = s.AfterDeleteCallTimes + 1
return
}
func (s *Product) GetCallTimes() []int64 {
return []int64{s.BeforeCreateCallTimes, s.BeforeSaveCallTimes, s.BeforeUpdateCallTimes, s.AfterCreateCallTimes, s.AfterSaveCallTimes, s.AfterUpdateCallTimes, s.BeforeDeleteCallTimes, s.AfterDeleteCallTimes, s.AfterFindCallTimes}
}
func TestRunCallbacks(t *testing.T) {
p := Product{Code: "unique_code", Price: 100}
DB.Save(&p)
if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 1, 0, 0, 0, 0}) {
t.Errorf("Callbacks should be invoked successfully, %v", p.GetCallTimes())
}
DB.Where("Code = ?", "unique_code").First(&p)
if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 1, 0, 1, 0, 0, 0, 0, 1}) {
t.Errorf("After callbacks values are not saved, %v", p.GetCallTimes())
}
p.Price = 200
DB.Save(&p)
if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 1, 1, 0, 0, 1}) {
t.Errorf("After update callbacks should be invoked successfully, %v", p.GetCallTimes())
}
var products []Product
DB.Find(&products, "code = ?", "unique_code")
if products[0].AfterFindCallTimes != 2 {
t.Errorf("AfterFind callbacks should work with slice")
}
DB.Where("Code = ?", "unique_code").First(&p)
if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 0, 0, 2}) {
t.Errorf("After update callbacks values are not saved, %v", p.GetCallTimes())
}
DB.Delete(&p)
if !reflect.DeepEqual(p.GetCallTimes(), []int64{1, 2, 1, 1, 0, 0, 1, 1, 2}) {
t.Errorf("After delete callbacks should be invoked successfully, %v", p.GetCallTimes())
}
if DB.Where("Code = ?", "unique_code").First(&p).Error == nil {
t.Errorf("Can't find a deleted record")
}
}
func TestCallbacksWithErrors(t *testing.T) {
p := Product{Code: "Invalid", Price: 100}
if DB.Save(&p).Error == nil {
t.Errorf("An error from before create callbacks happened when create with invalid value")
}
if DB.Where("code = ?", "Invalid").First(&Product{}).Error == nil {
t.Errorf("Should not save record that have errors")
}
if DB.Save(&Product{Code: "dont_save", Price: 100}).Error == nil {
t.Errorf("An error from after create callbacks happened when create with invalid value")
}
p2 := Product{Code: "update_callback", Price: 100}
DB.Save(&p2)
p2.Code = "dont_update"
if DB.Save(&p2).Error == nil {
t.Errorf("An error from before update callbacks happened when update with invalid value")
}
if DB.Where("code = ?", "update_callback").First(&Product{}).Error != nil {
t.Errorf("Record Should not be updated due to errors happened in before update callback")
}
if DB.Where("code = ?", "dont_update").First(&Product{}).Error == nil {
t.Errorf("Record Should not be updated due to errors happened in before update callback")
}
p2.Code = "dont_save"
if DB.Save(&p2).Error == nil {
t.Errorf("An error from before save callbacks happened when update with invalid value")
}
p3 := Product{Code: "dont_delete", Price: 100}
DB.Save(&p3)
if DB.Delete(&p3).Error == nil {
t.Errorf("An error from before delete callbacks happened when delete")
}
if DB.Where("Code = ?", "dont_delete").First(&p3).Error != nil {
t.Errorf("An error from before delete callbacks happened")
}
p4 := Product{Code: "after_save_error", Price: 100}
DB.Save(&p4)
if err := DB.First(&Product{}, "code = ?", "after_save_error").Error; err == nil {
t.Errorf("Record should be reverted if get an error in after save callback")
}
p5 := Product{Code: "after_delete_error", Price: 100}
DB.Save(&p5)
if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil {
t.Errorf("Record should be found")
}
DB.Delete(&p5)
if err := DB.First(&Product{}, "code = ?", "after_delete_error").Error; err != nil {
t.Errorf("Record shouldn't be deleted because of an error happened in after delete callback")
}
}
func TestGetCallback(t *testing.T) {
scope := DB.NewScope(nil)
if DB.Callback().Create().Get("gorm:test_callback") != nil {
t.Errorf("`gorm:test_callback` should be nil")
}
DB.Callback().Create().Register("gorm:test_callback", func(scope *gorm.Scope) { scope.Set("gorm:test_callback_value", 1) })
callback := DB.Callback().Create().Get("gorm:test_callback")
if callback == nil {
t.Errorf("`gorm:test_callback` should be non-nil")
}
callback(scope)
if v, ok := scope.Get("gorm:test_callback_value"); !ok || v != 1 {
t.Errorf("`gorm:test_callback_value` should be `1, true` but `%v, %v`", v, ok)
}
DB.Callback().Create().Replace("gorm:test_callback", func(scope *gorm.Scope) { scope.Set("gorm:test_callback_value", 2) })
callback = DB.Callback().Create().Get("gorm:test_callback")
if callback == nil {
t.Errorf("`gorm:test_callback` should be non-nil")
}
callback(scope)
if v, ok := scope.Get("gorm:test_callback_value"); !ok || v != 2 {
t.Errorf("`gorm:test_callback_value` should be `2, true` but `%v, %v`", v, ok)
}
DB.Callback().Create().Remove("gorm:test_callback")
if DB.Callback().Create().Get("gorm:test_callback") != nil {
t.Errorf("`gorm:test_callback` should be nil")
}
DB.Callback().Create().Register("gorm:test_callback", func(scope *gorm.Scope) { scope.Set("gorm:test_callback_value", 3) })
callback = DB.Callback().Create().Get("gorm:test_callback")
if callback == nil {
t.Errorf("`gorm:test_callback` should be non-nil")
}
callback(scope)
if v, ok := scope.Get("gorm:test_callback_value"); !ok || v != 3 {
t.Errorf("`gorm:test_callback_value` should be `3, true` but `%v, %v`", v, ok)
}
}
func TestUseDefaultCallback(t *testing.T) {
createCallbackName := "gorm:test_use_default_callback_for_create"
gorm.DefaultCallback.Create().Register(createCallbackName, func(*gorm.Scope) {
// nop
})
if gorm.DefaultCallback.Create().Get(createCallbackName) == nil {
t.Errorf("`%s` expected non-nil, but got nil", createCallbackName)
}
gorm.DefaultCallback.Create().Remove(createCallbackName)
if gorm.DefaultCallback.Create().Get(createCallbackName) != nil {
t.Errorf("`%s` expected nil, but got non-nil", createCallbackName)
}
updateCallbackName := "gorm:test_use_default_callback_for_update"
scopeValueName := "gorm:test_use_default_callback_for_update_value"
gorm.DefaultCallback.Update().Register(updateCallbackName, func(scope *gorm.Scope) {
scope.Set(scopeValueName, 1)
})
gorm.DefaultCallback.Update().Replace(updateCallbackName, func(scope *gorm.Scope) {
scope.Set(scopeValueName, 2)
})
scope := DB.NewScope(nil)
callback := gorm.DefaultCallback.Update().Get(updateCallbackName)
callback(scope)
if v, ok := scope.Get(scopeValueName); !ok || v != 2 {
t.Errorf("`%s` should be `2, true` but `%v, %v`", scopeValueName, v, ok)
}
} | s.BeforeUpdateCallTimes = s.BeforeUpdateCallTimes + 1
return
}
|
icon_bookmark.rs |
pub struct IconBookmark {
props: crate::Props,
}
impl yew::Component for IconBookmark {
type Properties = crate::Props;
type Message = ();
fn | (props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" height="24" viewBox="0 0 24 24" width="24"><path d="M0 0h24v24H0V0z" fill="none"/><path d="M19 3H5v18l7-3 7 3V3z"/></svg>
</svg>
}
}
}
| create |
Table.js | /**
* Copyright 2018-2020 Cargill Incorporated
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import React from 'react';
import PropTypes from 'prop-types'; | import TableRow from './TableRow';
import TableHeader from './TableHeader';
import './CircuitsTable.scss';
const CircuitsTable = ({ circuits, dispatch }) => {
const noCircuits = (
<tr>
<td colSpan="5" className="no-circuits-msg">
No circuits found
</td>
</tr>
);
return (
<div className="table-container">
<table className="circuits-table">
<TableHeader dispatch={dispatch} circuits={circuits} />
{circuits.length === 0 ? noCircuits : ''}
{circuits.map(item => {
return <TableRow circuit={item} />;
})}
</table>
</div>
);
};
CircuitsTable.propTypes = {
circuits: PropTypes.arrayOf(Circuit).isRequired,
dispatch: PropTypes.func.isRequired
};
export default CircuitsTable; | import { Circuit } from '../../data/circuits'; |
dynamic.py | from collections import namedtuple
from guillotina import configure
from guillotina import schema
from guillotina.component import get_adapter
from guillotina.exceptions import ComponentLookupError
from guillotina.exceptions import ValueDeserializationError
from guillotina.fields.interfaces import IDynamicField
from guillotina.fields.interfaces import IDynamicFieldOperation
from guillotina.fields.patch import field_converter
from guillotina.fields.patch import PatchDictDel
from guillotina.fields.patch import PatchDictSet
from guillotina.fields.patch import PatchDictUpdate
from guillotina.fields.patch import PatchField
from guillotina.interfaces import IJSONToValue
from guillotina.schema.interfaces import IDict
from zope.interface import implementer
from zope.interface import Interface
@implementer(IDynamicField)
class DynamicField(PatchField):
operation_type = IDynamicFieldOperation
@configure.value_deserializer(IDynamicField)
def dynamic_field_converter(field, value, context):
if not isinstance(value, dict) or "op" not in value:
raise ValueDeserializationError(field, value, "Not valid payload")
return field_converter(field, value, context)
class IDynamicType(Interface):
"""
Used to dynamicly bind data to validate
new values against
"""
date = schema.Datetime(required=False)
text = schema.Text(required=False)
integer = schema.Int(required=False)
float = schema.Float(required=False)
boolean = schema.Bool(required=False)
keyword = schema.UnionField(
schema.List(required=False, value_type=schema.Text(), max_length=1000),
schema.Text(required=False),
required=False,
)
def _validate_field(field, context, value):
if "key" not in value or "value" not in value:
raise ValueDeserializationError(field, value, f"Invalid data")
from guillotina.behaviors.dynamic import find_field
field = find_field(context, value["key"])
# now, verify value...
if not field:
raise ValueDeserializationError(field, value, f"Dynamic field not found")
field_type = field.get("type", "unknown")
try:
valid_type = namedtuple("temp_assign_type", [field_type])
ob = valid_type({field_type: None})
bound_field = IDynamicType[field_type].bind(ob)
# validate and convert
real_value = get_adapter(bound_field, IJSONToValue, args=[value["value"], ob])
bound_field.validate(real_value)
value["value"] = real_value
except (KeyError, ComponentLookupError):
raise ValueDeserializationError(field, value, f"Invalid type {field_type}")
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="assign")
class DynamicDictSet(PatchDictSet):
def __call__(self, context, value):
if "key" in value and "value" in value:
_validate_field(self.field, context, value)
return super().__call__(context, value)
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="update")
class DynamicDictUpdate(PatchDictUpdate):
def __call__(self, context, value):
if not isinstance(value, list):
raise ValueDeserializationError(
self.field, value, f"Invalid type patch data, must be list of updates"
)
for item in value:
_validate_field(self.field, context, item)
return super().__call__(context, value)
@configure.adapter(for_=IDict, provides=IDynamicFieldOperation, name="del")
class DynamicDictDel(PatchDictDel):
| """ """ |
|
getDetail.js | let detail = {
"id": 0,
"name": "", // 菜谱名称
"img": "", // 图片描述
"times": 0, // 做过的次数
"评分": 0, // 吃完后对于菜的评分
"createTime": "", // 创建菜谱的时间
"modifyTime": "", // 菜谱更新时间
"material": [{ // 用料(包含做菜的所有原材料)
"id": 0,
"name": "",
"unit": "", // 单位
"amount": "",
"price": "" // 单价,可以根据选择的数量计算中总价
}],
"progress": [{
"description": "",
"img": ""
}], | "tips": "" // 备注信息
}
export default detail; | |
dbtables.py | #-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
# this header remains intact and that you do not claim any
# rights of ownership or authorship of this software. This
# software has been tested, but no warranty is expressed or
# implied.
#
# -- Gregory P. Smith <[email protected]>
# This provides a simple database table interface built on top of
# the Python BerkeleyDB 3 interface.
#
_cvsid = '$Id: dbtables.py,v 1.11 2004/08/08 00:54:20 tim_one Exp $'
import re
import sys
import copy
import xdrlib
import random
from types import ListType, StringType
import cPickle as pickle
try:
# For Pythons w/distutils pybsddb
from bsddb3.db import *
except ImportError:
# For Python 2.3
from bsddb.db import *
class TableDBError(StandardError):
pass
class TableAlreadyExists(TableDBError): |
class Cond:
"""This condition matches everything"""
def __call__(self, s):
return 1
class ExactCond(Cond):
"""Acts as an exact match condition function"""
def __init__(self, strtomatch):
self.strtomatch = strtomatch
def __call__(self, s):
return s == self.strtomatch
class PrefixCond(Cond):
"""Acts as a condition function for matching a string prefix"""
def __init__(self, prefix):
self.prefix = prefix
def __call__(self, s):
return s[:len(self.prefix)] == self.prefix
class PostfixCond(Cond):
"""Acts as a condition function for matching a string postfix"""
def __init__(self, postfix):
self.postfix = postfix
def __call__(self, s):
return s[-len(self.postfix):] == self.postfix
class LikeCond(Cond):
"""
Acts as a function that will match using an SQL 'LIKE' style
string. Case insensitive and % signs are wild cards.
This isn't perfect but it should work for the simple common cases.
"""
def __init__(self, likestr, re_flags=re.IGNORECASE):
# escape python re characters
chars_to_escape = '.*+()[]?'
for char in chars_to_escape :
likestr = likestr.replace(char, '\\'+char)
# convert %s to wildcards
self.likestr = likestr.replace('%', '.*')
self.re = re.compile('^'+self.likestr+'$', re_flags)
def __call__(self, s):
return self.re.match(s)
#
# keys used to store database metadata
#
_table_names_key = '__TABLE_NAMES__' # list of the tables in this db
_columns = '._COLUMNS__' # table_name+this key contains a list of columns
def _columns_key(table):
return table + _columns
#
# these keys are found within table sub databases
#
_data = '._DATA_.' # this+column+this+rowid key contains table data
_rowid = '._ROWID_.' # this+rowid+this key contains a unique entry for each
# row in the table. (no data is stored)
_rowid_str_len = 8 # length in bytes of the unique rowid strings
def _data_key(table, col, rowid):
return table + _data + col + _data + rowid
def _search_col_data_key(table, col):
return table + _data + col + _data
def _search_all_data_key(table):
return table + _data
def _rowid_key(table, rowid):
return table + _rowid + rowid + _rowid
def _search_rowid_key(table):
return table + _rowid
def contains_metastrings(s) :
"""Verify that the given string does not contain any
metadata strings that might interfere with dbtables database operation.
"""
if (s.find(_table_names_key) >= 0 or
s.find(_columns) >= 0 or
s.find(_data) >= 0 or
s.find(_rowid) >= 0):
# Then
return 1
else:
return 0
class bsdTableDB :
def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
recover=0, dbflags=0):
"""bsdTableDB.open(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome BerkeleyDB directory.
Use keyword arguments when calling this constructor.
"""
self.db = None
myflags = DB_THREAD
if create:
myflags |= DB_CREATE
flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG |
DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | DB_RECOVER
self.env = DBEnv()
# enable auto deadlock avoidance
self.env.set_lk_detect(DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate:
myflags |= DB_TRUNCATE
self.db = DB(self.env)
# this code relies on DBCursor.set* methods to raise exceptions
# rather than returning None
self.db.set_get_returns_none(1)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(DB_DUP)
self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
# Initialize the table names list if this is a new database
txn = self.env.txn_begin()
try:
if not self.db.has_key(_table_names_key, txn):
self.db.put(_table_names_key, pickle.dumps([], 1), txn=txn)
# Yes, bare except
except:
txn.abort()
raise
else:
txn.commit()
# TODO verify more of the database's metadata?
self.__tablecolumns = {}
def __del__(self):
self.close()
def close(self):
if self.db is not None:
self.db.close()
self.db = None
if self.env is not None:
self.env.close()
self.env = None
def checkpoint(self, mins=0):
try:
self.env.txn_checkpoint(mins)
except DBIncompleteError:
pass
def sync(self):
try:
self.db.sync()
except DBIncompleteError:
pass
def _db_print(self) :
"""Print the database to stdout for debugging"""
print "******** Printing raw database for debugging ********"
cur = self.db.cursor()
try:
key, data = cur.first()
while 1:
print repr({key: data})
next = cur.next()
if next:
key, data = next
else:
cur.close()
return
except DBNotFoundError:
cur.close()
def CreateTable(self, table, columns):
"""CreateTable(table, columns) - Create a new table in the database
raises TableDBError if it already exists or for other DB errors.
"""
assert isinstance(columns, ListType)
txn = None
try:
# checking sanity of the table and column names here on
# table creation will prevent problems elsewhere.
if contains_metastrings(table):
raise ValueError(
"bad table name: contains reserved metastrings")
for column in columns :
if contains_metastrings(column):
raise ValueError(
"bad column name: contains reserved metastrings")
columnlist_key = _columns_key(table)
if self.db.has_key(columnlist_key):
raise TableAlreadyExists, "table already exists"
txn = self.env.txn_begin()
# store the table's column info
self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn)
# add the table name to the tablelist
tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn,
flags=DB_RMW))
tablelist.append(table)
# delete 1st, in case we opened with DB_DUP
self.db.delete(_table_names_key, txn)
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
except DBError, dberror:
if txn:
txn.abort()
raise TableDBError, dberror[1]
def ListTableColumns(self, table):
"""Return a list of columns in the given table.
[] if the table doesn't exist.
"""
assert isinstance(table, StringType)
if contains_metastrings(table):
raise ValueError, "bad table name: contains reserved metastrings"
columnlist_key = _columns_key(table)
if not self.db.has_key(columnlist_key):
return []
pickledcolumnlist = self.db.get(columnlist_key)
if pickledcolumnlist:
return pickle.loads(pickledcolumnlist)
else:
return []
def ListTables(self):
"""Return a list of tables in this database."""
pickledtablelist = self.db.get(_table_names_key)
if pickledtablelist:
return pickle.loads(pickledtablelist)
else:
return []
def CreateOrExtendTable(self, table, columns):
"""CreateOrExtendTable(table, columns)
- Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
all of its current columns.
"""
assert isinstance(columns, ListType)
try:
self.CreateTable(table, columns)
except TableAlreadyExists:
# the table already existed, add any new columns
txn = None
try:
columnlist_key = _columns_key(table)
txn = self.env.txn_begin()
# load the current column list
oldcolumnlist = pickle.loads(
self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
# create a hash table for fast lookups of column names in the
# loop below
oldcolumnhash = {}
for c in oldcolumnlist:
oldcolumnhash[c] = c
# create a new column list containing both the old and new
# column names
newcolumnlist = copy.copy(oldcolumnlist)
for c in columns:
if not oldcolumnhash.has_key(c):
newcolumnlist.append(c)
# store the table's new extended column list
if newcolumnlist != oldcolumnlist :
# delete the old one first since we opened with DB_DUP
self.db.delete(columnlist_key, txn)
self.db.put(columnlist_key,
pickle.dumps(newcolumnlist, 1),
txn=txn)
txn.commit()
txn = None
self.__load_column_info(table)
except DBError, dberror:
if txn:
txn.abort()
raise TableDBError, dberror[1]
def __load_column_info(self, table) :
"""initialize the self.__tablecolumns dict"""
# check the column names
try:
tcolpickles = self.db.get(_columns_key(table))
except DBNotFoundError:
raise TableDBError, "unknown table: %r" % (table,)
if not tcolpickles:
raise TableDBError, "unknown table: %r" % (table,)
self.__tablecolumns[table] = pickle.loads(tcolpickles)
def __new_rowid(self, table, txn) :
"""Create a new unique row identifier"""
unique = 0
while not unique:
# Generate a random 64-bit row ID string
# (note: this code has <64 bits of randomness
# but it's plenty for our database id needs!)
p = xdrlib.Packer()
p.pack_int(int(random.random()*2147483647))
p.pack_int(int(random.random()*2147483647))
newid = p.get_buffer()
# Guarantee uniqueness by adding this key to the database
try:
self.db.put(_rowid_key(table, newid), None, txn=txn,
flags=DB_NOOVERWRITE)
except DBKeyExistError:
pass
else:
unique = 1
return newid
def Insert(self, table, rowdict) :
"""Insert(table, datadict) - Insert a new row into the table
using the keys+values from rowdict as the column values.
"""
txn = None
try:
if not self.db.has_key(_columns_key(table)):
raise TableDBError, "unknown table"
# check the validity of each column name
if not self.__tablecolumns.has_key(table):
self.__load_column_info(table)
for column in rowdict.keys() :
if not self.__tablecolumns[table].count(column):
raise TableDBError, "unknown column: %r" % (column,)
# get a unique row identifier for this row
txn = self.env.txn_begin()
rowid = self.__new_rowid(table, txn=txn)
# insert the row values into the table database
for column, dataitem in rowdict.items():
# store the value
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
txn.commit()
txn = None
except DBError, dberror:
# WIBNI we could just abort the txn and re-raise the exception?
# But no, because TableDBError is not related to DBError via
# inheritance, so it would be backwards incompatible. Do the next
# best thing.
info = sys.exc_info()
if txn:
txn.abort()
self.db.delete(_rowid_key(table, rowid))
raise TableDBError, dberror[1], info[2]
def Modify(self, table, conditions={}, mappings={}):
"""Modify(table, conditions) - Modify in rows matching 'conditions'
using mapping functions in 'mappings'
* conditions is a dictionary keyed on column names
containing condition functions expecting the data string as an
argument and returning a boolean.
* mappings is a dictionary keyed on column names containint condition
functions expecting the data string as an argument and returning the
new string for that column.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# modify only requested columns
columns = mappings.keys()
for rowid in matching_rowids.keys():
txn = None
try:
for column in columns:
txn = self.env.txn_begin()
# modify the requested column
try:
dataitem = self.db.get(
_data_key(table, column, rowid),
txn)
self.db.delete(
_data_key(table, column, rowid),
txn)
except DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no
# error
dataitem = None
dataitem = mappings[column](dataitem)
if dataitem <> None:
self.db.put(
_data_key(table, column, rowid),
dataitem, txn=txn)
txn.commit()
txn = None
except DBError, dberror:
if txn:
txn.abort()
raise
except DBError, dberror:
raise TableDBError, dberror[1]
def Delete(self, table, conditions={}):
"""Delete(table, conditions) - Delete items matching the given
conditions from the table.
* conditions is a dictionary keyed on column names
containing condition functions expecting the data string as an
argument and returning a boolean.
"""
try:
matching_rowids = self.__Select(table, [], conditions)
# delete row data from all columns
columns = self.__tablecolumns[table]
for rowid in matching_rowids.keys():
txn = None
try:
txn = self.env.txn_begin()
for column in columns:
# delete the data key
try:
self.db.delete(_data_key(table, column, rowid),
txn)
except DBNotFoundError:
# XXXXXXX column may not exist, assume no error
pass
try:
self.db.delete(_rowid_key(table, rowid), txn)
except DBNotFoundError:
# XXXXXXX row key somehow didn't exist, assume no error
pass
txn.commit()
txn = None
except DBError, dberror:
if txn:
txn.abort()
raise
except DBError, dberror:
raise TableDBError, dberror[1]
def Select(self, table, columns, conditions={}):
"""Select(table, conditions) - retrieve specific row data
Returns a list of row column->value mapping dictionaries.
* columns is a list of which column data to return. If
columns is None, all columns will be returned.
* conditions is a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.
"""
try:
if not self.__tablecolumns.has_key(table):
self.__load_column_info(table)
if columns is None:
columns = self.__tablecolumns[table]
matching_rowids = self.__Select(table, columns, conditions)
except DBError, dberror:
raise TableDBError, dberror[1]
# return the matches as a list of dictionaries
return matching_rowids.values()
def __Select(self, table, columns, conditions):
"""__Select() - Used to implement Select and Delete (above)
Returns a dictionary keyed on rowids containing dicts
holding the row data for columns listed in the columns param
that match the given conditions.
* conditions is a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.
"""
# check the validity of each column name
if not self.__tablecolumns.has_key(table):
self.__load_column_info(table)
if columns is None:
columns = self.tablecolumns[table]
for column in (columns + conditions.keys()):
if not self.__tablecolumns[table].count(column):
raise TableDBError, "unknown column: %r" % (column,)
# keyed on rows that match so far, containings dicts keyed on
# column names containing the data for that row and column.
matching_rowids = {}
# keys are rowids that do not match
rejected_rowids = {}
# attempt to sort the conditions in such a way as to minimize full
# column lookups
def cmp_conditions(atuple, btuple):
a = atuple[1]
b = btuple[1]
if type(a) is type(b):
if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
# longest prefix first
return cmp(len(b.prefix), len(a.prefix))
if isinstance(a, LikeCond) and isinstance(b, LikeCond):
# longest likestr first
return cmp(len(b.likestr), len(a.likestr))
return 0
if isinstance(a, ExactCond):
return -1
if isinstance(b, ExactCond):
return 1
if isinstance(a, PrefixCond):
return -1
if isinstance(b, PrefixCond):
return 1
# leave all unknown condition callables alone as equals
return 0
conditionlist = conditions.items()
conditionlist.sort(cmp_conditions)
# Apply conditions to column data to find what we want
cur = self.db.cursor()
column_num = -1
for column, condition in conditionlist:
column_num = column_num + 1
searchkey = _search_col_data_key(table, column)
# speedup: don't linear search columns within loop
if column in columns:
savethiscolumndata = 1 # save the data for return
else:
savethiscolumndata = 0 # data only used for selection
try:
key, data = cur.set_range(searchkey)
while key[:len(searchkey)] == searchkey:
# extract the rowid from the key
rowid = key[-_rowid_str_len:]
if not rejected_rowids.has_key(rowid):
# if no condition was specified or the condition
# succeeds, add row to our match list.
if not condition or condition(data):
if not matching_rowids.has_key(rowid):
matching_rowids[rowid] = {}
if savethiscolumndata:
matching_rowids[rowid][column] = data
else:
if matching_rowids.has_key(rowid):
del matching_rowids[rowid]
rejected_rowids[rowid] = rowid
key, data = cur.next()
except DBError, dberror:
if dberror[0] != DB_NOTFOUND:
raise
continue
cur.close()
# we're done selecting rows, garbage collect the reject list
del rejected_rowids
# extract any remaining desired column data from the
# database for the matching rows.
if len(columns) > 0:
for rowid, rowdata in matching_rowids.items():
for column in columns:
if rowdata.has_key(column):
continue
try:
rowdata[column] = self.db.get(
_data_key(table, column, rowid))
except DBError, dberror:
if dberror[0] != DB_NOTFOUND:
raise
rowdata[column] = None
# return the matches
return matching_rowids
def Drop(self, table):
"""Remove an entire table from the database"""
txn = None
try:
txn = self.env.txn_begin()
# delete the column list
self.db.delete(_columns_key(table), txn)
cur = self.db.cursor(txn)
# delete all keys containing this tables column and row info
table_key = _search_all_data_key(table)
while 1:
try:
key, data = cur.set_range(table_key)
except DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
break
cur.delete()
# delete all rowids used by this table
table_key = _search_rowid_key(table)
while 1:
try:
key, data = cur.set_range(table_key)
except DBNotFoundError:
break
# only delete items in this table
if key[:len(table_key)] != table_key:
break
cur.delete()
cur.close()
# delete the tablename from the table name list
tablelist = pickle.loads(
self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
try:
tablelist.remove(table)
except ValueError:
# hmm, it wasn't there, oh well, that's what we want.
pass
# delete 1st, incase we opened with DB_DUP
self.db.delete(_table_names_key, txn)
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
if self.__tablecolumns.has_key(table):
del self.__tablecolumns[table]
except DBError, dberror:
if txn:
txn.abort()
raise TableDBError, dberror[1] | pass
|
simple-requester.ts | import { Injectable } from '@angular/core';
import { Http, Headers, RequestOptions, XHRBackend, Response, ResponseContentType } from '@angular/http';
import { Observable } from 'rxjs/Rx';
import 'rxjs/add/operator/map';
import { CookieService } from 'angular2-cookie/core';
import { environment } from '../../environments/environment';
import { NotificationsService } from 'angular2-notifications';
import { Router, ActivatedRoute } from '@angular/router';
import { GlobalDataService } from './globaldata.service';
import { ATError } from '../shared/models/error';
@Injectable()
export class | extends Http {
public pendingRequests = 0;
public showLoading = false;
public withChildsQP = 'withChildren=1';
public api: string = environment.host ? environment.host : `${location.origin}/api`;
constructor(
protected route: ActivatedRoute,
private notificationsService: NotificationsService,
private backend: XHRBackend,
private defaultOptions: RequestOptions,
protected globaldata: GlobalDataService,
protected router: Router,
protected cookieService: CookieService) {
super(backend, defaultOptions);
}
public isAuthHeaderExists() {
return !!this.cookieService.get('iio78');
}
public handleError(err:Response) {
let errorMessage: ATError = err.json()
if (errorMessage.message.startsWith('Duplicate entry')) {
const matches = errorMessage.message.match(/'([^']+)'/);
errorMessage.message = `The '${matches[1]}' value is duplicated by another entity!`;
}
if (+err.status === 401) {
this.cookieService.remove('iio78');
}
this.notificationsService.error(
`Ooops! ${err.status} code`,
errorMessage.message
);
}
public handleSimpleError(header: string, message: string) {
this.notificationsService.error(
header,
message
);
}
public handleWarning(header: string, message: string) {
this.notificationsService.warn(
header, message
);
}
public handleSuccess(message: string) {
this.notificationsService.success(
`Successful`,
message
);
}
public handleInfo(message: string) {
this.notificationsService.info(
`Information`,
message
);
}
protected doPost(url: string, object?, params: { [key: string]: any | any[]; } = null, showLoading: boolean = false) {
let jsonString = JSON.stringify(object);
if (typeof jsonString === 'undefined') { jsonString = '{}'; }
const headers = new Headers();
this.createAuthorizationHeader(headers);
return this.intercept(super.post(this.api + url, jsonString, { headers: headers, params }), showLoading);
}
protected doPut(url: string, object?) {
let jsonString = JSON.stringify(object);
if (typeof jsonString === 'undefined') { jsonString = '{}'; }
const headers = new Headers();
this.createAuthorizationHeader(headers);
return this.intercept(super.put(this.api + url, jsonString, { headers: headers }), true);
}
protected doDelete(url: string, params: { [key: string]: any | any[]; } = null, body: any = undefined) {
const headers = new Headers();
this.createAuthorizationHeader(headers);
return this.intercept(super.delete(this.api + url, { headers, params, body }), false);
}
protected doPostFiles(url: string, fileList: File[], params: { [key: string]: any | any[]; }) {
this.turnOnModal();
const formData: FormData = new FormData();
let i = 0;
for (const file of fileList) {
formData.append('uploadFile' + i, file, file.name);
i++;
}
const headers = new Headers();
this.createAuthorizationHeader(headers);
return this.intercept(super.post(this.api + url, formData, { headers: headers, params: params }), true);
}
protected doGetWithoutAuthHeader(url: string) {
return this.intercept(super.get(this.api + url), false);
}
protected doGet(url: string, params: { [key: string]: any | any[]; } = null, showLoading: boolean = false, handleError: boolean = true) {
const headers = new Headers();
this.createAuthorizationHeader(headers);
return this.intercept(super.get(this.api + url, { headers: headers, params: params }), showLoading, handleError);
}
protected doDownload(url: string) {
const headers = new Headers();
this.createAuthorizationHeader(headers);
return this.intercept(super.get(this.api + url, { headers: headers, responseType: ResponseContentType.Blob }), false);
}
private createAuthorizationHeader(headers: Headers) {
if (this.cookieService.get('iio78')) {
headers.append('Authorization', 'Basic ' + this.cookieService.get('iio78'));
}
}
private intercept(observable: Observable<Response>, showLoading: boolean, handleError: boolean = true): Observable<Response> {
if (showLoading) {
this.globaldata.requestQuery++;
this.turnOnModal();
}
return observable
.catch((err, source) => {
if (handleError) {
this.handleError(err);
}
return Observable.throw(err.statusText);
})
.do((res: Response) => {
}, (err: any) => { })
.finally(() => {
const timer = Observable.timer(1);
timer.subscribe(t => {
if (showLoading) {
this.globaldata.requestQuery--;
this.turnOffModal();
}
});
});
}
private turnOnModal() {
this.globaldata.setLoaderVisibility(true);
}
private turnOffModal() {
if (this.globaldata.requestQuery === 0) {
this.globaldata.setLoaderVisibility(false);
}
}
}
| SimpleRequester |
2015_14b.py | input = """Vixen can fly 19 km/s for 7 seconds, but then must rest for 124 seconds.
Rudolph can fly 3 km/s for 15 seconds, but then must rest for 28 seconds.
Donner can fly 19 km/s for 9 seconds, but then must rest for 164 seconds.
Blitzen can fly 19 km/s for 9 seconds, but then must rest for 158 seconds.
Comet can fly 13 km/s for 7 seconds, but then must rest for 82 seconds.
Cupid can fly 25 km/s for 6 seconds, but then must rest for 145 seconds.
Dasher can fly 14 km/s for 3 seconds, but then must rest for 38 seconds.
Dancer can fly 3 km/s for 16 seconds, but then must rest for 37 seconds.
Prancer can fly 25 km/s for 6 seconds, but then must rest for 143 seconds."""
t = 2503
# input = """Comet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.
# Dancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds."""
# t = 1000
class Reindeer:
def | (self, name, speed, flying_time, resting_time):
self.distance_covered = 0
self.current_time = 0
self.is_flying = True
self.time_since_last_change = 0
self.points = 0
self.name = name
self.speed = speed
self.flying_time = flying_time
self.resting_time = resting_time
def advance(self):
self.current_time += 1
self.time_since_last_change += 1
if self.is_flying:
self.distance_covered += self.speed
if self.time_since_last_change >= self.flying_time:
self.time_since_last_change = 0
self.is_flying = False
else:
if self.time_since_last_change >= self.resting_time:
self.time_since_last_change = 0
self.is_flying = True
# print(self.name,
# "is",
# "flying" if self.is_flying else "resting",
# "and has travelled",
# str(self.distance_covered),
# "after",
# str(self.current_time),
# "seconds"
# )
def score_point(self):
# print(self.name, "has scored a point")
self.points += 1
reindeers = []
for line in input.split("\n"):
words = line.split()
name = words[0]
speed = int(words[3])
ft = int(words[6])
rt = int(words[-2])
reindeers.append(Reindeer(name, speed, ft, rt))
for i in range(t):
for r in reindeers:
r.advance()
furthest = max([r.distance_covered for r in reindeers])
for r in reindeers:
if r.distance_covered == furthest:
r.score_point()
print(max([r.points for r in reindeers]))
| __init__ |
main.go | package main
import (
"flag"
"fmt"
"time"
"github.com/kandoo/beehive"
"github.com/kandoo/beehive/Godeps/_workspace/src/github.com/golang/glog"
)
const (
maxSpike = 10000
)
var (
elephantProb float64
)
func createHive(addr string, paddrs []string, minDriver, maxDriver int,
minCol, maxCol int, stickyCollector bool, lockRouter bool, joinCh chan bool) |
func main() {
flag.Float64Var(&elephantProb, "p", 0.1,
"The probability of an elephant flow.")
nswitches := flag.Int("nswitches", 4, "Number of switches.")
nhives := flag.Int("nhives", 4, "Number of hives.")
stickyCol := flag.Bool("stickycollectors", false,
"Whether collectors are sticky.")
centCol := flag.Bool("centralized", false,
"Whether to centralize the collectors")
flag.Parse()
lAddr := "127.0.0.1:%d"
port := 7777
driverPerHive := *nswitches / *nhives
var collectorPerHive int
if *centCol {
collectorPerHive = 0
} else {
collectorPerHive = *nswitches / *nhives
}
joinChannels := make([]chan bool, 0)
var pas []string
for h := 0; h < *nhives; h++ {
addr := fmt.Sprintf(lAddr, port)
port++
jCh := make(chan bool)
joinChannels = append(joinChannels, jCh)
if *centCol && h == 0 {
createHive(addr, pas, h*driverPerHive, (h+1)*driverPerHive,
0, *nswitches, *stickyCol, true, jCh)
time.Sleep(1 * time.Second)
pas = append(pas, addr)
continue
}
createHive(addr, pas, h*driverPerHive, (h+1)*driverPerHive,
h*collectorPerHive, (h+1)*collectorPerHive, *stickyCol, false, jCh)
}
for _, ch := range joinChannels {
<-ch
}
}
| {
h := beehive.NewHive(beehive.Addr(addr), beehive.PeerAddrs(paddrs...))
cOps := []beehive.AppOption{}
if stickyCollector {
cOps = append(cOps, beehive.Sticky())
}
c := h.NewApp("Collector", cOps...)
p := NewPoller(1 * time.Second)
c.Detached(p)
c.Handle(StatResult{}, &Collector{uint64(maxSpike * (1 - elephantProb)), p})
c.Handle(SwitchJoined{}, &SwitchJoinHandler{p})
r := h.NewApp("Router", beehive.Sticky())
r.Handle(MatrixUpdate{}, &UpdateHandler{})
d := h.NewApp("Driver", beehive.Sticky())
driver := NewDriver(minDriver, maxDriver-minDriver)
d.Handle(StatQuery{}, driver)
d.Handle(FlowMod{}, driver)
if lockRouter {
h.Emit(MatrixUpdate{})
}
if maxDriver != minDriver {
glog.Infof("Running driver from %d to %d\n", minDriver, maxDriver-1)
d.Detached(driver)
for i := minDriver; i < maxDriver; i++ {
h.Emit(StatQuery{Switch(i)})
}
}
if maxCol != minCol {
glog.Infof("Running collector from %d to %d\n", minCol, maxCol-1)
for i := minCol; i < maxCol; i++ {
h.Emit(SwitchJoined{Switch(i)})
}
}
h.RegisterMsg(SwitchStats{})
go func() {
h.Start()
<-joinCh
}()
} |
NetworkService.js | /**
* SPDX-License-Identifier: Apache-2.0
*/
const helper = require('../../../common/helper');
const logger = helper.getLogger('NetworkService');
/**
*
*
* @class NetworkService
*/
class NetworkService {
/**
* Creates an instance of NetworkService.
* @param {*} platform
* @memberof NetworkService
*/
constructor(platform) {
this.platform = platform;
}
/**
*
*
* @returns
* @memberof NetworkService | const networks = this.platform.getNetworks();
const iterator = networks.entries();
for (const value of iterator) {
networklist.push(value);
}
logger.log('Network list ', networklist);
return networklist;
}
}
module.exports = NetworkService; | */
async networkList() {
// Get the list of the networks from the configuration that was loaded from the config.json
const networklist = []; |
submit_workflow_job.go | package vod
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// SubmitWorkflowJob invokes the vod.SubmitWorkflowJob API synchronously
// api document: https://help.aliyun.com/api/vod/submitworkflowjob.html
func (client *Client) SubmitWorkflowJob(request *SubmitWorkflowJobRequest) (response *SubmitWorkflowJobResponse, err error) {
response = CreateSubmitWorkflowJobResponse()
err = client.DoAction(request, response)
return
}
// SubmitWorkflowJobWithChan invokes the vod.SubmitWorkflowJob API asynchronously
// api document: https://help.aliyun.com/api/vod/submitworkflowjob.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) SubmitWorkflowJobWithChan(request *SubmitWorkflowJobRequest) (<-chan *SubmitWorkflowJobResponse, <-chan error) {
responseChan := make(chan *SubmitWorkflowJobResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.SubmitWorkflowJob(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil |
return responseChan, errChan
}
// SubmitWorkflowJobWithCallback invokes the vod.SubmitWorkflowJob API asynchronously
// api document: https://help.aliyun.com/api/vod/submitworkflowjob.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) SubmitWorkflowJobWithCallback(request *SubmitWorkflowJobRequest, callback func(response *SubmitWorkflowJobResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *SubmitWorkflowJobResponse
var err error
defer close(result)
response, err = client.SubmitWorkflowJob(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// SubmitWorkflowJobRequest is the request struct for api SubmitWorkflowJob
type SubmitWorkflowJobRequest struct {
*requests.RpcRequest
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
WorkflowId string `position:"Query" name:"WorkflowId"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
MediaId string `position:"Query" name:"MediaId"`
FileUrl string `position:"Query" name:"FileUrl"`
}
// SubmitWorkflowJobResponse is the response struct for api SubmitWorkflowJob
type SubmitWorkflowJobResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
}
// CreateSubmitWorkflowJobRequest creates a request to invoke SubmitWorkflowJob API
func CreateSubmitWorkflowJobRequest() (request *SubmitWorkflowJobRequest) {
request = &SubmitWorkflowJobRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("vod", "2017-03-21", "SubmitWorkflowJob", "vod", "openAPI")
return
}
// CreateSubmitWorkflowJobResponse creates a response to parse from SubmitWorkflowJob response
func CreateSubmitWorkflowJobResponse() (response *SubmitWorkflowJobResponse) {
response = &SubmitWorkflowJobResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| {
errChan <- err
close(responseChan)
close(errChan)
} |
run.rs | // Copyright © 2016 libmussh developers
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! Runtime
use crate::error::MusshResult;
use crate::logging::Loggers;
use crate::subcmd::{Run, Subcommand};
use clap::{App, Arg};
use libmussh::Config;
use slog::trace;
use slog_try::try_trace;
use std::convert::TryFrom;
use std::env;
use std::path::PathBuf;
crate const MUSSH_CONFIG_FILE_NAME: &str = "mussh.toml";
crate const MUSSH_DB_FILE_NAME: &str = "mussh.db";
fn base_config_dir() -> MusshResult<PathBuf> {
Ok(if let Some(config_dir) = dirs::config_dir() {
config_dir
} else if let Ok(current_dir) = env::current_dir() {
current_dir
} else {
return Err("Unable to determine a suitable config directory!".into());
}
.join(env!("CARGO_PKG_NAME")))
}
crate fn run() -> MusshResult<()> {
// Setup the default config path for use in clap App
let base_path = base_config_dir()?;
let base_path_str = format!("{}", base_path.display());
let matches = app(&base_path_str).get_matches_safe()?;
// Setup the slog Loggers
let (stdout, stderr) = Loggers::try_from(&matches)?.split();
// Grab the mussh config
let config_path = PathBuf::from(matches.value_of("config").unwrap_or_else(|| "./"))
.join(MUSSH_CONFIG_FILE_NAME);
try_trace!(stdout, "Config Path: {}", config_path.display());
let config = Config::try_from(config_path)?;
let db_path =
PathBuf::from(matches.value_of("config").unwrap_or_else(|| "./")).join(MUSSH_DB_FILE_NAME);
if matches.is_present("output") {
try_trace!(stdout, "{:?}", config);
}
// Run, run, run...
match matches.subcommand() {
// 'cmd' subcommand
// ("cmd", Some(sub_m)) => command::cmd(&mut config, sub_m, &stderr),
// 'hostlist' subcommand
// ("hostlist", Some(sub_m)) => hostlist::cmd(&mut config, sub_m, &stderr),
// 'hosts' subcommand
// ("hosts", Some(sub_m)) => hosts::cmd(&mut config, sub_m),
// 'run' subcommand
("run", Some(sub_m)) => Run::new(stdout, stderr, db_path).execute(&config, sub_m),
(cmd, _) => Err(format!("Unknown subcommand {}", cmd).into()),
}
}
fn app<'a, 'b>(default_config_path: &'a str) -> App<'a, 'b> {
App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author("Jason Ozias <[email protected]>")
.about("ssh multiplexing client")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.value_name("CONFIG")
.help("Specify a path for the TOML config file.")
.default_value(default_config_path)
.takes_value(true),
)
.arg(
Arg::with_name("dry_run")
.short("d")
.long("dry_run")
.help("Load the configuration and display what would be run"),
)
.arg(
Arg::with_name("verbose")
.short("v")
.multiple(true)
.help("Set the output verbosity level (more v's = more verbose)"),
)
.arg(
Arg::with_name("output")
.short("o")
.long("output")
.help("Show the TOML configuration"),
)
.subcommand(Run::subcommand())
}
#[cfg(test)]
mod test {
use super::app;
use crate::error::MusshResult;
use clap::ArgMatches;
fn check_multiple_arg(m: &ArgMatches<'_>, name: &str, expected: &[&str]) {
assert!(m.is_present(name));
assert_eq!(m.occurrences_of(name), 1); // notice only one occurrence
if let Some(values) = m.values_of(name) {
let values_vec: Vec<_> = values.collect();
assert_eq!(values_vec, expected);
} else {
panic!("no values found!");
}
}
#[test]
fn full_run_subcmd() -> MusshResult<()> {
let app_m = app("").get_matches_from_safe(vec![
"mussh",
"-vvv",
"-c",
"test_cfg",
"--dry_run",
"--output",
"run",
"-c",
"python,nginx,tmux",
"-h",
"all,!m8",
"--sync",
"-s",
"m4",
"-y",
"bar",
])?;
if let ("run", Some(sub_m)) = app_m.subcommand() {
// Check the commands
check_multiple_arg(sub_m, "commands", &["python", "nginx", "tmux"]);
// Check the hosts
check_multiple_arg(sub_m, "hosts", &["all", "!m8"]);
// Check for the presence of sync
assert!(sub_m.is_present("sync"));
// Check the group-cmds
check_multiple_arg(sub_m, "sync_commands", &["bar"]);
// Check the group-pre
check_multiple_arg(sub_m, "sync_hosts", &["m4"]);
} else {
// Either no run subcommand or one not tested for...
panic!("Run subcommand not found!");
}
Ok(())
}
#[test]
fn full_run_subcmd_alt_order_one() -> MusshResult<()> {
let app_m = app("").get_matches_from_safe(vec![
"mussh",
"run",
"-h",
"all,!m8",
"--sync",
"-c",
"python,nginx,tmux",
])?;
if let ("run", Some(sub_m)) = app_m.subcommand() {
// Check the commands
check_multiple_arg(sub_m, "commands", &["python", "nginx", "tmux"]);
// Check the hosts
check_multiple_arg(sub_m, "hosts", &["all", "!m8"]);
// Check for the presence of sync
assert!(sub_m.is_present("sync"));
} else {
// Either no run subcommand or one not tested for...
panic!("Run subcommand not found!");
}
Ok(())
}
#[test]
fn full_run_subcmd_alt_order_two() -> MusshResult<()> {
let app_m = app("").get_matches_from_safe(vec![
"mussh",
"run",
"--sync",
"-h",
"all,!m8",
"-c",
"python,nginx,tmux",
])?;
if let ("run", Some(sub_m)) = app_m.subcommand() {
// Check the commands
check_multiple_arg(sub_m, "commands", &["python", "nginx", "tmux"]);
// Check the hosts
check_multiple_arg(sub_m, "hosts", &["all", "!m8"]);
// Check for the presence of sync
assert!(sub_m.is_present("sync"));
} else {
// Either no run subcommand or one not tested for...
panic!("Run subcommand not found!");
}
Ok(())
}
#[test]
fn run_subcmd_no_sync() -> MusshResult<()> {
let app_m = app("").get_matches_from_safe(vec![
"mussh",
"run",
"-c",
"python,nginx,tmux",
"-h",
"all,!m8",
])?;
if let ("run", Some(sub_m)) = app_m.subcommand() {
// Check the commands
check_multiple_arg(sub_m, "commands", &["python", "nginx", "tmux"]);
// Check the hosts
check_multiple_arg(sub_m, "hosts", &["all", "!m8"]);
// Check for the presence of sync
assert!(!sub_m.is_present("sync"));
} else {
// Either no run subcommand or one not tested for...
panic!("Run subcommand not found!");
}
Ok(())
}
#[test]
fn r | ) {
assert!(app("")
.get_matches_from_safe(vec!["mussh", "run", "-h", "all", "!m8", "-s",])
.is_err());
}
#[test]
fn run_subcommand_missing_hosts() {
assert!(app("")
.get_matches_from_safe(vec!["mussh", "run", "-c", "python", "nginx", "tmux", "-s",])
.is_err());
}
#[test]
fn run_subcommand_missing_all() {
assert!(app("").get_matches_from_safe(vec!["mussh", "run"]).is_err());
}
#[test]
fn run_subcommand_missing_group() {
assert!(app("")
.get_matches_from_safe(vec![
"mussh",
"run",
"--group-cmds",
"bar",
"--group-pre",
"m4"
])
.is_err());
}
#[test]
fn run_subcommand_missing_group_pre() {
assert!(app("")
.get_matches_from_safe(vec![
"mussh",
"run",
"--group-cmds",
"bar",
"--group",
"m1,m2,m3"
])
.is_err());
}
#[test]
fn run_subcommand_missing_group_cmds() {
assert!(app("")
.get_matches_from_safe(vec![
"mussh",
"run",
"--group-pre",
"m4",
"--group",
"m1,m2,m3"
])
.is_err());
}
}
| un_subcommand_missing_commands( |
plugins.go | package filters
import "github.com/zencoder/go-dash/mpd"
type execPluginDASH func(manifest *mpd.MPD)
var (
pluginDASH = map[string]execPluginDASH{
"dvsRoleOverride": dvsRoleOverride,
}
)
func dvsRoleOverride(manifest *mpd.MPD) | {
for _, period := range manifest.Periods {
for _, as := range period.AdaptationSets {
for i, access := range as.AccessibilityElems {
if access != nil && *access.SchemeIdUri == "urn:tva:metadata:cs:AudioPurposeCS:2007" {
as.Roles[i].Value = strptr("description")
}
}
}
}
} |
|
accounts_db.rs | //! Persistent accounts are stored in below path location:
//! <path>/<pid>/data/
//!
//! The persistent store would allow for this mode of operation:
//! - Concurrent single thread append with many concurrent readers.
//!
//! The underlying memory is memory mapped to a file. The accounts would be
//! stored across multiple files and the mappings of file and offset of a
//! particular account would be stored in a shared index. This will allow for
//! concurrent commits without blocking reads, which will sequentially write
//! to memory, ssd or disk, and should be as fast as the hardware allow for.
//! The only required in memory data structure with a write lock is the index,
//! which should be fast to update.
//!
//! AppendVec's only store accounts for single slots. To bootstrap the
//! index from a persistent store of AppendVec's, the entries include
//! a "write_version". A single global atomic `AccountsDb::write_version`
//! tracks the number of commits to the entire data store. So the latest
//! commit for each slot entry would be indexed.
use crate::{
accounts_background_service::{DroppedSlotsSender, SendDroppedBankCallback},
accounts_cache::{AccountsCache, CachedAccount, SlotCache},
accounts_hash::{AccountsHash, CalculateHashIntermediate, HashStats, PreviousPass},
accounts_index::{
AccountIndexGetResult, AccountSecondaryIndexes, AccountsIndex, AccountsIndexRootsStats,
IndexKey, IsCached, ScanResult, SlotList, SlotSlice, ZeroLamport,
},
ancestors::Ancestors,
append_vec::{AppendVec, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion},
contains::Contains,
pubkey_bins::PubkeyBinCalculator16,
read_only_accounts_cache::ReadOnlyAccountsCache,
sorted_storages::SortedStorages,
};
use blake3::traits::digest::Digest;
use crossbeam_channel::{unbounded, Receiver, Sender};
use dashmap::{
mapref::entry::Entry::{Occupied, Vacant},
DashMap, DashSet,
};
use lazy_static::lazy_static;
use log::*;
use rand::{prelude::SliceRandom, thread_rng, Rng};
use rayon::{prelude::*, ThreadPool};
use serde::{Deserialize, Serialize};
use solana_measure::measure::Measure;
use solana_rayon_threadlimit::get_thread_count;
use solana_sdk::{
account::{AccountSharedData, ReadableAccount},
clock::{BankId, Epoch, Slot},
genesis_config::ClusterType,
hash::{Hash, Hasher},
pubkey::Pubkey,
};
use solana_vote_program::vote_state::MAX_LOCKOUT_HISTORY;
use std::{
borrow::{Borrow, Cow},
boxed::Box,
collections::{hash_map::Entry, BTreeSet, HashMap, HashSet},
convert::TryFrom,
io::{Error as IoError, Result as IoResult},
ops::{Range, RangeBounds},
path::{Path, PathBuf},
sync::atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
sync::{Arc, Condvar, Mutex, MutexGuard, RwLock},
thread::Builder,
time::Instant,
};
use tempfile::TempDir;
#[cfg(test)]
use std::{thread::sleep, time::Duration};
const PAGE_SIZE: u64 = 4 * 1024;
const MAX_RECYCLE_STORES: usize = 1000;
const STORE_META_OVERHEAD: usize = 256;
const MAX_CACHE_SLOTS: usize = 200;
const FLUSH_CACHE_RANDOM_THRESHOLD: usize = MAX_LOCKOUT_HISTORY;
const SCAN_SLOT_PAR_ITER_THRESHOLD: usize = 4000;
pub const DEFAULT_FILE_SIZE: u64 = PAGE_SIZE * 1024;
pub const DEFAULT_NUM_THREADS: u32 = 8;
pub const DEFAULT_NUM_DIRS: u32 = 4;
// A specially reserved storage id just for entries in the cache, so that
// operations that take a storage entry can maintain a common interface
// when interacting with cached accounts. This id is "virtual" in that it
// doesn't actually refer to an actual storage entry.
const CACHE_VIRTUAL_STORAGE_ID: usize = AppendVecId::MAX;
// A specially reserved write version (identifier for ordering writes in an AppendVec)
// for entries in the cache, so that operations that take a storage entry can maintain
// a common interface when interacting with cached accounts. This version is "virtual" in
// that it doesn't actually map to an entry in an AppendVec.
const CACHE_VIRTUAL_WRITE_VERSION: StoredMetaWriteVersion = 0;
// A specially reserved offset (represents an offset into an AppendVec)
// for entries in the cache, so that operations that take a storage entry can maintain
// a common interface when interacting with cached accounts. This version is "virtual" in
// that it doesn't actually map to an entry in an AppendVec.
const CACHE_VIRTUAL_OFFSET: usize = 0;
const CACHE_VIRTUAL_STORED_SIZE: usize = 0;
#[cfg(not(test))]
const ABSURD_CONSECUTIVE_FAILED_ITERATIONS: usize = 100;
type DashMapVersionHash = DashMap<Pubkey, (u64, Hash)>;
lazy_static! {
// FROZEN_ACCOUNT_PANIC is used to signal local_cluster that an AccountsDb panic has occurred,
// as |cargo test| cannot observe panics in other threads
pub static ref FROZEN_ACCOUNT_PANIC: Arc<AtomicBool> = Arc::new(AtomicBool::new(false));
}
#[derive(Debug, Clone, Copy)]
pub enum AccountShrinkThreshold {
/// Measure the total space sparseness across all candididates
/// And select the candidiates by using the top sparse account storage entries to shrink.
/// The value is the overall shrink threshold measured as ratio of the total live bytes
/// over the total bytes.
TotalSpace { shrink_ratio: f64 },
/// Use the following option to shrink all stores whose alive ratio is below
/// the specified threshold.
IndividalStore { shrink_ratio: f64 },
}
pub const DEFAULT_ACCOUNTS_SHRINK_OPTIMIZE_TOTAL_SPACE: bool = true;
pub const DEFAULT_ACCOUNTS_SHRINK_RATIO: f64 = 0.80;
// The default extra account space in percentage from the ideal target
const DEFAULT_ACCOUNTS_SHRINK_THRESHOLD_OPTION: AccountShrinkThreshold =
AccountShrinkThreshold::TotalSpace {
shrink_ratio: DEFAULT_ACCOUNTS_SHRINK_RATIO,
};
impl Default for AccountShrinkThreshold {
fn default() -> AccountShrinkThreshold {
DEFAULT_ACCOUNTS_SHRINK_THRESHOLD_OPTION
}
}
pub enum ScanStorageResult<R, B> {
Cached(Vec<R>),
Stored(B),
}
#[derive(Debug, Default)]
pub struct ErrorCounters {
pub total: usize,
pub account_in_use: usize,
pub account_loaded_twice: usize,
pub account_not_found: usize,
pub blockhash_not_found: usize,
pub blockhash_too_old: usize,
pub call_chain_too_deep: usize,
pub already_processed: usize,
pub instruction_error: usize,
pub insufficient_funds: usize,
pub invalid_account_for_fee: usize,
pub invalid_account_index: usize,
pub invalid_program_for_execution: usize,
pub not_allowed_during_cluster_maintenance: usize,
}
#[derive(Default, Debug)]
struct GenerateIndexTimings {
pub index_time: u64,
pub scan_time: u64,
pub insertion_time_us: u64,
pub min_bin_size: usize,
pub max_bin_size: usize,
pub total_items: usize,
pub storage_size_accounts_map_us: u64,
pub storage_size_storages_us: u64,
pub storage_size_accounts_map_flatten_us: u64,
}
impl GenerateIndexTimings {
pub fn report(&self) {
datapoint_info!(
"generate_index",
// we cannot accurately measure index insertion time because of many threads and lock contention
("total_us", self.index_time, i64),
("scan_stores_us", self.scan_time, i64),
("insertion_time_us", self.insertion_time_us, i64),
("min_bin_size", self.min_bin_size as i64, i64),
("max_bin_size", self.max_bin_size as i64, i64),
(
"storage_size_accounts_map_us",
self.storage_size_accounts_map_us as i64,
i64
),
(
"storage_size_storages_us",
self.storage_size_storages_us as i64,
i64
),
(
"storage_size_accounts_map_flatten_us",
self.storage_size_accounts_map_flatten_us as i64,
i64
),
);
}
}
#[derive(Default, Debug, PartialEq, Clone)]
pub struct AccountInfo {
/// index identifying the append storage
store_id: AppendVecId,
/// offset into the storage
offset: usize,
/// needed to track shrink candidacy in bytes. Used to update the number
/// of alive bytes in an AppendVec as newer slots purge outdated entries
stored_size: usize,
/// lamports in the account used when squashing kept for optimization
/// purposes to remove accounts with zero balance.
lamports: u64,
}
impl IsCached for AccountInfo {
fn is_cached(&self) -> bool {
self.store_id == CACHE_VIRTUAL_STORAGE_ID
}
}
impl ZeroLamport for AccountInfo {
fn is_zero_lamport(&self) -> bool {
self.lamports == 0
}
}
struct MultiThreadProgress<'a> {
last_update: Instant,
my_last_report_count: u64,
total_count: &'a AtomicU64,
report_delay_secs: u64,
first_caller: bool,
ultimate_count: u64,
}
impl<'a> MultiThreadProgress<'a> {
fn new(total_count: &'a AtomicU64, report_delay_secs: u64, ultimate_count: u64) -> Self |
fn report(&mut self, my_current_count: u64) {
let now = Instant::now();
if now.duration_since(self.last_update).as_secs() >= self.report_delay_secs {
let my_total_newly_processed_slots_since_last_report =
my_current_count - self.my_last_report_count;
self.my_last_report_count = my_current_count;
let previous_total_processed_slots_across_all_threads = self.total_count.fetch_add(
my_total_newly_processed_slots_since_last_report,
Ordering::Relaxed,
);
self.first_caller =
self.first_caller || 0 == previous_total_processed_slots_across_all_threads;
if self.first_caller {
info!(
"generating index: {}/{} slots...",
previous_total_processed_slots_across_all_threads
+ my_total_newly_processed_slots_since_last_report,
self.ultimate_count
);
}
self.last_update = now;
}
}
}
/// An offset into the AccountsDb::storage vector
pub type AppendVecId = usize;
pub type SnapshotStorage = Vec<Arc<AccountStorageEntry>>;
pub type SnapshotStorages = Vec<SnapshotStorage>;
// Each slot has a set of storage entries.
pub(crate) type SlotStores = Arc<RwLock<HashMap<usize, Arc<AccountStorageEntry>>>>;
type AccountSlots = HashMap<Pubkey, HashSet<Slot>>;
type AppendVecOffsets = HashMap<AppendVecId, HashSet<usize>>;
type ReclaimResult = (AccountSlots, AppendVecOffsets);
type StorageFinder<'a> = Box<dyn Fn(Slot, usize) -> Arc<AccountStorageEntry> + 'a>;
type ShrinkCandidates = HashMap<Slot, HashMap<AppendVecId, Arc<AccountStorageEntry>>>;
trait Versioned {
fn version(&self) -> u64;
}
impl Versioned for (u64, Hash) {
fn version(&self) -> u64 {
self.0
}
}
impl Versioned for (u64, AccountInfo) {
fn version(&self) -> u64 {
self.0
}
}
// Some hints for applicability of additional sanity checks for the do_load fast-path;
// Slower fallback code path will be taken if the fast path has failed over the retry
// threshold, regardless of these hints. Also, load cannot fail not-deterministically
// even under very rare circumstances, unlike previously did allow.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LoadHint {
// Caller hints that it's loading transactions for a block which is
// descended from the current root, and at the tip of its fork.
// Thereby, further this assumes AccountIndex::max_root should not increase
// during this load, meaning there should be no squash.
// Overall, this enables us to assert!() strictly while running the fast-path for
// account loading, while maintaining the determinism of account loading and resultant
// transaction execution thereof.
FixedMaxRoot,
// Caller can't hint the above safety assumption. Generally RPC and miscellaneous
// other call-site falls into this category. The likelihood of slower path is slightly
// increased as well.
Unspecified,
}
#[derive(Debug)]
pub enum LoadedAccountAccessor<'a> {
// StoredAccountMeta can't be held directly here due to its lifetime dependency to
// AccountStorageEntry
Stored(Option<(Arc<AccountStorageEntry>, usize)>),
// None value in Cached variant means the cache was flushed
Cached(Option<(Pubkey, Cow<'a, CachedAccount>)>),
}
impl<'a> LoadedAccountAccessor<'a> {
fn check_and_get_loaded_account(&mut self) -> LoadedAccount {
// all of these following .expect() and .unwrap() are like serious logic errors,
// ideal for representing this as rust type system....
match self {
LoadedAccountAccessor::Cached(None) | LoadedAccountAccessor::Stored(None) => {
panic!("Should have already been taken care of when creating this LoadedAccountAccessor");
}
LoadedAccountAccessor::Cached(Some(_cached_account)) => {
// Cached(Some(x)) variant always produces `Some` for get_loaded_account() since
// it just returns the inner `x` without additional fetches
self.get_loaded_account().unwrap()
}
LoadedAccountAccessor::Stored(Some(_maybe_storage_entry)) => {
// If we do find the storage entry, we can guarantee that the storage entry is
// safe to read from because we grabbed a reference to the storage entry while it
// was still in the storage map. This means even if the storage entry is removed
// from the storage map after we grabbed the storage entry, the recycler should not
// reset the storage entry until we drop the reference to the storage entry.
self.get_loaded_account()
.expect("If a storage entry was found in the storage map, it must not have been reset yet")
}
}
}
fn get_loaded_account(&mut self) -> Option<LoadedAccount> {
match self {
LoadedAccountAccessor::Cached(cached_account) => {
let cached_account: (Pubkey, Cow<'a, CachedAccount>) =
cached_account.take().expect(
"Cache flushed/purged should be handled before trying to fetch account",
);
Some(LoadedAccount::Cached(cached_account))
}
LoadedAccountAccessor::Stored(maybe_storage_entry) => {
// storage entry may not be present if slot was cleaned up in
// between reading the accounts index and calling this function to
// get account meta from the storage entry here
maybe_storage_entry
.as_ref()
.and_then(|(storage_entry, offset)| {
storage_entry
.get_stored_account_meta(*offset)
.map(LoadedAccount::Stored)
})
}
}
}
}
pub enum LoadedAccount<'a> {
Stored(StoredAccountMeta<'a>),
Cached((Pubkey, Cow<'a, CachedAccount>)),
}
impl<'a> LoadedAccount<'a> {
pub fn owner(&self) -> &Pubkey {
match self {
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.account_meta.owner,
LoadedAccount::Cached((_, cached_account)) => cached_account.account.owner(),
}
}
pub fn executable(&self) -> bool {
match self {
LoadedAccount::Stored(stored_account_meta) => {
stored_account_meta.account_meta.executable
}
LoadedAccount::Cached((_, cached_account)) => cached_account.account.executable(),
}
}
pub fn loaded_hash(&self) -> Hash {
match self {
LoadedAccount::Stored(stored_account_meta) => *stored_account_meta.hash,
LoadedAccount::Cached((_, cached_account)) => cached_account.hash(),
}
}
pub fn pubkey(&self) -> &Pubkey {
match self {
LoadedAccount::Stored(stored_account_meta) => &stored_account_meta.meta.pubkey,
LoadedAccount::Cached((pubkey, _)) => pubkey,
}
}
pub fn write_version(&self) -> StoredMetaWriteVersion {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.meta.write_version,
LoadedAccount::Cached(_) => CACHE_VIRTUAL_WRITE_VERSION,
}
}
pub fn compute_hash(&self, slot: Slot, pubkey: &Pubkey) -> Hash {
match self {
LoadedAccount::Stored(stored_account_meta) => {
AccountsDb::hash_stored_account(slot, stored_account_meta)
}
LoadedAccount::Cached((_, cached_account)) => {
AccountsDb::hash_account(slot, &cached_account.account, pubkey)
}
}
}
pub fn stored_size(&self) -> usize {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.stored_size,
LoadedAccount::Cached(_) => CACHE_VIRTUAL_STORED_SIZE,
}
}
pub fn lamports(&self) -> u64 {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.account_meta.lamports,
LoadedAccount::Cached((_, cached_account)) => cached_account.account.lamports(),
}
}
pub fn take_account(self) -> AccountSharedData {
match self {
LoadedAccount::Stored(stored_account_meta) => stored_account_meta.clone_account(),
LoadedAccount::Cached((_, cached_account)) => match cached_account {
Cow::Owned(cached_account) => cached_account.account.clone(),
Cow::Borrowed(cached_account) => cached_account.account.clone(),
},
}
}
pub fn is_cached(&self) -> bool {
match self {
LoadedAccount::Stored(_) => false,
LoadedAccount::Cached(_) => true,
}
}
}
#[derive(Clone, Default, Debug)]
pub struct AccountStorage(pub DashMap<Slot, SlotStores>);
impl AccountStorage {
fn get_account_storage_entry(
&self,
slot: Slot,
store_id: AppendVecId,
) -> Option<Arc<AccountStorageEntry>> {
self.get_slot_stores(slot)
.and_then(|storage_map| storage_map.read().unwrap().get(&store_id).cloned())
}
fn get_slot_stores(&self, slot: Slot) -> Option<SlotStores> {
self.0.get(&slot).map(|result| result.value().clone())
}
fn get_slot_storage_entries(&self, slot: Slot) -> Option<Vec<Arc<AccountStorageEntry>>> {
self.get_slot_stores(slot)
.map(|res| res.read().unwrap().values().cloned().collect())
}
fn slot_store_count(&self, slot: Slot, store_id: AppendVecId) -> Option<usize> {
self.get_account_storage_entry(slot, store_id)
.map(|store| store.count())
}
fn all_slots(&self) -> Vec<Slot> {
self.0.iter().map(|iter_item| *iter_item.key()).collect()
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone, Deserialize, Serialize, AbiExample, AbiEnumVisitor)]
pub enum AccountStorageStatus {
Available = 0,
Full = 1,
Candidate = 2,
}
impl Default for AccountStorageStatus {
fn default() -> Self {
Self::Available
}
}
#[derive(Debug)]
pub enum BankHashVerificationError {
MismatchedAccountHash,
MismatchedBankHash,
MissingBankHash,
MismatchedTotalLamports(u64, u64),
}
#[derive(Default)]
struct CleanKeyTimings {
collect_delta_keys_us: u64,
delta_insert_us: u64,
hashset_to_vec_us: u64,
dirty_store_processing_us: u64,
delta_key_count: u64,
dirty_pubkeys_count: u64,
}
/// Persistent storage structure holding the accounts
#[derive(Debug)]
pub struct AccountStorageEntry {
pub(crate) id: AtomicUsize,
pub(crate) slot: AtomicU64,
/// storage holding the accounts
pub(crate) accounts: AppendVec,
/// Keeps track of the number of accounts stored in a specific AppendVec.
/// This is periodically checked to reuse the stores that do not have
/// any accounts in it
/// status corresponding to the storage, lets us know that
/// the append_vec, once maxed out, then emptied, can be reclaimed
count_and_status: RwLock<(usize, AccountStorageStatus)>,
/// This is the total number of accounts stored ever since initialized to keep
/// track of lifetime count of all store operations. And this differs from
/// count_and_status in that this field won't be decremented.
///
/// This is used as a rough estimate for slot shrinking. As such a relaxed
/// use case, this value ARE NOT strictly synchronized with count_and_status!
approx_store_count: AtomicUsize,
alive_bytes: AtomicUsize,
}
impl AccountStorageEntry {
pub fn new(path: &Path, slot: Slot, id: usize, file_size: u64) -> Self {
let tail = AppendVec::file_name(slot, id);
let path = Path::new(path).join(tail);
let accounts = AppendVec::new(&path, true, file_size as usize);
Self {
id: AtomicUsize::new(id),
slot: AtomicU64::new(slot),
accounts,
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(0),
alive_bytes: AtomicUsize::new(0),
}
}
pub(crate) fn new_existing(
slot: Slot,
id: AppendVecId,
accounts: AppendVec,
num_accounts: usize,
) -> Self {
Self {
id: AtomicUsize::new(id),
slot: AtomicU64::new(slot),
accounts,
count_and_status: RwLock::new((0, AccountStorageStatus::Available)),
approx_store_count: AtomicUsize::new(num_accounts),
alive_bytes: AtomicUsize::new(0),
}
}
pub fn set_status(&self, mut status: AccountStorageStatus) {
let mut count_and_status = self.count_and_status.write().unwrap();
let count = count_and_status.0;
if status == AccountStorageStatus::Full && count == 0 {
// this case arises when the append_vec is full (store_ptrs fails),
// but all accounts have already been removed from the storage
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
self.accounts.reset();
status = AccountStorageStatus::Available;
}
*count_and_status = (count, status);
}
pub fn recycle(&self, slot: Slot, id: usize) {
let mut count_and_status = self.count_and_status.write().unwrap();
self.accounts.reset();
*count_and_status = (0, AccountStorageStatus::Available);
self.slot.store(slot, Ordering::Release);
self.id.store(id, Ordering::Relaxed);
self.approx_store_count.store(0, Ordering::Relaxed);
self.alive_bytes.store(0, Ordering::Relaxed);
}
pub fn status(&self) -> AccountStorageStatus {
self.count_and_status.read().unwrap().1
}
pub fn count(&self) -> usize {
self.count_and_status.read().unwrap().0
}
pub fn approx_stored_count(&self) -> usize {
self.approx_store_count.load(Ordering::Relaxed)
}
pub fn alive_bytes(&self) -> usize {
self.alive_bytes.load(Ordering::SeqCst)
}
pub fn written_bytes(&self) -> u64 {
self.accounts.len() as u64
}
pub fn total_bytes(&self) -> u64 {
self.accounts.capacity()
}
pub fn has_accounts(&self) -> bool {
self.count() > 0
}
pub fn slot(&self) -> Slot {
self.slot.load(Ordering::Acquire)
}
pub fn append_vec_id(&self) -> AppendVecId {
self.id.load(Ordering::Relaxed)
}
pub fn flush(&self) -> Result<(), IoError> {
self.accounts.flush()
}
fn get_stored_account_meta(&self, offset: usize) -> Option<StoredAccountMeta> {
Some(self.accounts.get_account(offset)?.0)
}
fn add_account(&self, num_bytes: usize) {
let mut count_and_status = self.count_and_status.write().unwrap();
*count_and_status = (count_and_status.0 + 1, count_and_status.1);
self.approx_store_count.fetch_add(1, Ordering::Relaxed);
self.alive_bytes.fetch_add(num_bytes, Ordering::SeqCst);
}
fn try_available(&self) -> bool {
let mut count_and_status = self.count_and_status.write().unwrap();
let (count, status) = *count_and_status;
if status == AccountStorageStatus::Available {
*count_and_status = (count, AccountStorageStatus::Candidate);
true
} else {
false
}
}
pub fn all_accounts(&self) -> Vec<StoredAccountMeta> {
self.accounts.accounts(0)
}
fn remove_account(&self, num_bytes: usize, reset_accounts: bool) -> usize {
let mut count_and_status = self.count_and_status.write().unwrap();
let (mut count, mut status) = *count_and_status;
if count == 1 && status == AccountStorageStatus::Full && reset_accounts {
// this case arises when we remove the last account from the
// storage, but we've learned from previous write attempts that
// the storage is full
//
// the only time it's safe to call reset() on an append_vec is when
// every account has been removed
// **and**
// the append_vec has previously been completely full
//
// otherwise, the storage may be in flight with a store()
// call
self.accounts.reset();
status = AccountStorageStatus::Available;
}
// Some code path is removing accounts too many; this may result in an
// unintended reveal of old state for unrelated accounts.
assert!(
count > 0,
"double remove of account in slot: {}/store: {}!!",
self.slot(),
self.append_vec_id(),
);
self.alive_bytes.fetch_sub(num_bytes, Ordering::SeqCst);
count -= 1;
*count_and_status = (count, status);
count
}
pub fn get_path(&self) -> PathBuf {
self.accounts.get_path()
}
}
pub fn get_temp_accounts_paths(count: u32) -> IoResult<(Vec<TempDir>, Vec<PathBuf>)> {
let temp_dirs: IoResult<Vec<TempDir>> = (0..count).map(|_| TempDir::new()).collect();
let temp_dirs = temp_dirs?;
let paths: Vec<PathBuf> = temp_dirs.iter().map(|t| t.path().to_path_buf()).collect();
Ok((temp_dirs, paths))
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashStats {
pub num_updated_accounts: u64,
pub num_removed_accounts: u64,
pub num_lamports_stored: u64,
pub total_data_len: u64,
pub num_executable_accounts: u64,
}
impl BankHashStats {
pub fn update<T: ReadableAccount>(&mut self, account: &T) {
if account.lamports() == 0 {
self.num_removed_accounts += 1;
} else {
self.num_updated_accounts += 1;
}
self.total_data_len = self
.total_data_len
.wrapping_add(account.data().len() as u64);
if account.executable() {
self.num_executable_accounts += 1;
}
self.num_lamports_stored = self.num_lamports_stored.wrapping_add(account.lamports());
}
pub fn merge(&mut self, other: &BankHashStats) {
self.num_updated_accounts += other.num_updated_accounts;
self.num_removed_accounts += other.num_removed_accounts;
self.total_data_len = self.total_data_len.wrapping_add(other.total_data_len);
self.num_lamports_stored = self
.num_lamports_stored
.wrapping_add(other.num_lamports_stored);
self.num_executable_accounts += other.num_executable_accounts;
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, AbiExample)]
pub struct BankHashInfo {
pub hash: Hash,
pub snapshot_hash: Hash,
pub stats: BankHashStats,
}
#[derive(Debug)]
struct FrozenAccountInfo {
pub hash: Hash, // Hash generated by hash_frozen_account_data()
pub lamports: u64, // Account balance cannot be lower than this amount
}
#[derive(Default)]
pub struct StoreAccountsTiming {
store_accounts_elapsed: u64,
update_index_elapsed: u64,
handle_reclaims_elapsed: u64,
}
#[derive(Debug, Default)]
struct RecycleStores {
entries: Vec<(Instant, Arc<AccountStorageEntry>)>,
total_bytes: u64,
}
// 30 min should be enough to be certain there won't be any prospective recycle uses for given
// store entry
// That's because it already processed ~2500 slots and ~25 passes of AccountsBackgroundService
pub const EXPIRATION_TTL_SECONDS: u64 = 1800;
impl RecycleStores {
fn add_entry(&mut self, new_entry: Arc<AccountStorageEntry>) {
self.total_bytes += new_entry.total_bytes();
self.entries.push((Instant::now(), new_entry))
}
fn iter(&self) -> std::slice::Iter<(Instant, Arc<AccountStorageEntry>)> {
self.entries.iter()
}
fn add_entries(&mut self, new_entries: Vec<Arc<AccountStorageEntry>>) {
self.total_bytes += new_entries.iter().map(|e| e.total_bytes()).sum::<u64>();
let now = Instant::now();
for new_entry in new_entries {
self.entries.push((now, new_entry));
}
}
fn expire_old_entries(&mut self) -> Vec<Arc<AccountStorageEntry>> {
let mut expired = vec![];
let now = Instant::now();
let mut expired_bytes = 0;
self.entries.retain(|(recycled_time, entry)| {
if now.duration_since(*recycled_time).as_secs() > EXPIRATION_TTL_SECONDS {
if Arc::strong_count(entry) >= 2 {
warn!(
"Expiring still in-use recycled StorageEntry anyway...: id: {} slot: {}",
entry.append_vec_id(),
entry.slot(),
);
}
expired_bytes += entry.total_bytes();
expired.push(entry.clone());
false
} else {
true
}
});
self.total_bytes -= expired_bytes;
expired
}
fn remove_entry(&mut self, index: usize) -> Arc<AccountStorageEntry> {
let (_added_time, removed_entry) = self.entries.swap_remove(index);
self.total_bytes -= removed_entry.total_bytes();
removed_entry
}
fn entry_count(&self) -> usize {
self.entries.len()
}
fn total_bytes(&self) -> u64 {
self.total_bytes
}
}
/// Removing unrooted slots in Accounts Background Service needs to be synchronized with flushing
/// slots from the Accounts Cache. This keeps track of those slots and the Mutex + Condvar for
/// synchronization.
#[derive(Debug, Default)]
struct RemoveUnrootedSlotsSynchronization {
// slots being flushed from the cache or being purged
slots_under_contention: Mutex<HashSet<Slot>>,
signal: Condvar,
}
type AccountInfoAccountsIndex = AccountsIndex<AccountInfo>;
// This structure handles the load/store of the accounts
#[derive(Debug)]
pub struct AccountsDb {
/// Keeps tracks of index into AppendVec on a per slot basis
pub accounts_index: AccountInfoAccountsIndex,
pub storage: AccountStorage,
pub accounts_cache: AccountsCache,
sender_bg_hasher: Option<Sender<CachedAccount>>,
pub read_only_accounts_cache: ReadOnlyAccountsCache,
recycle_stores: RwLock<RecycleStores>,
/// distribute the accounts across storage lists
pub next_id: AtomicUsize,
/// Set of shrinkable stores organized by map of slot to append_vec_id
pub shrink_candidate_slots: Mutex<ShrinkCandidates>,
/// Legacy shrink slots to support non-cached code-path.
pub shrink_candidate_slots_v1: Mutex<Vec<Slot>>,
pub(crate) write_version: AtomicU64,
/// Set of storage paths to pick from
pub(crate) paths: Vec<PathBuf>,
pub shrink_paths: RwLock<Option<Vec<PathBuf>>>,
/// Directory of paths this accounts_db needs to hold/remove
pub(crate) temp_paths: Option<Vec<TempDir>>,
/// Starting file size of appendvecs
file_size: u64,
/// Accounts that will cause a panic! if data modified or lamports decrease
frozen_accounts: HashMap<Pubkey, FrozenAccountInfo>,
/// Thread pool used for par_iter
pub thread_pool: ThreadPool,
pub thread_pool_clean: ThreadPool,
/// Number of append vecs to create to maximize parallelism when scanning
/// the accounts
min_num_stores: usize,
pub bank_hashes: RwLock<HashMap<Slot, BankHashInfo>>,
stats: AccountsStats,
clean_accounts_stats: CleanAccountsStats,
// Stats for purges called outside of clean_accounts()
external_purge_slots_stats: PurgeStats,
shrink_stats: ShrinkStats,
pub cluster_type: Option<ClusterType>,
pub account_indexes: AccountSecondaryIndexes,
pub caching_enabled: bool,
/// Set of unique keys per slot which is used
/// to drive clean_accounts
/// Generated by get_accounts_delta_hash
uncleaned_pubkeys: DashMap<Slot, Vec<Pubkey>>,
#[cfg(test)]
load_delay: u64,
#[cfg(test)]
load_limit: AtomicU64,
is_bank_drop_callback_enabled: AtomicBool,
/// Set of slots currently being flushed by `flush_slot_cache()` or removed
/// by `remove_unrooted_slot()`. Used to ensure `remove_unrooted_slots(slots)`
/// can safely clear the set of unrooted slots `slots`.
remove_unrooted_slots_synchronization: RemoveUnrootedSlotsSynchronization,
shrink_ratio: AccountShrinkThreshold,
/// Set of stores which are recently rooted or had accounts removed
/// such that potentially a 0-lamport account update could be present which
/// means we can remove the account from the index entirely.
dirty_stores: DashMap<(Slot, AppendVecId), Arc<AccountStorageEntry>>,
}
#[derive(Debug, Default)]
struct AccountsStats {
delta_hash_scan_time_total_us: AtomicU64,
delta_hash_accumulate_time_total_us: AtomicU64,
delta_hash_num: AtomicU64,
last_store_report: AtomicU64,
store_hash_accounts: AtomicU64,
calc_stored_meta: AtomicU64,
store_accounts: AtomicU64,
store_update_index: AtomicU64,
store_handle_reclaims: AtomicU64,
store_append_accounts: AtomicU64,
store_find_store: AtomicU64,
store_num_accounts: AtomicU64,
store_total_data: AtomicU64,
recycle_store_count: AtomicU64,
create_store_count: AtomicU64,
store_get_slot_store: AtomicU64,
store_find_existing: AtomicU64,
dropped_stores: AtomicU64,
store_uncleaned_update: AtomicU64,
}
#[derive(Debug, Default)]
struct PurgeStats {
last_report: AtomicU64,
safety_checks_elapsed: AtomicU64,
remove_cache_elapsed: AtomicU64,
remove_storage_entries_elapsed: AtomicU64,
drop_storage_entries_elapsed: AtomicU64,
num_cached_slots_removed: AtomicUsize,
num_stored_slots_removed: AtomicUsize,
total_removed_storage_entries: AtomicUsize,
total_removed_cached_bytes: AtomicU64,
total_removed_stored_bytes: AtomicU64,
recycle_stores_write_elapsed: AtomicU64,
scan_storages_elasped: AtomicU64,
purge_accounts_index_elapsed: AtomicU64,
handle_reclaims_elapsed: AtomicU64,
}
impl PurgeStats {
fn report(&self, metric_name: &'static str, report_interval_ms: Option<u64>) {
let should_report = report_interval_ms
.map(|report_interval_ms| {
let last = self.last_report.load(Ordering::Relaxed);
let now = solana_sdk::timing::timestamp();
now.saturating_sub(last) > report_interval_ms
&& self.last_report.compare_exchange(
last,
now,
Ordering::Relaxed,
Ordering::Relaxed,
) == Ok(last)
&& last != 0
})
.unwrap_or(true);
if should_report {
datapoint_info!(
metric_name,
(
"safety_checks_elapsed",
self.safety_checks_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"remove_cache_elapsed",
self.remove_cache_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"remove_storage_entries_elapsed",
self.remove_storage_entries_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"drop_storage_entries_elapsed",
self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"num_cached_slots_removed",
self.num_cached_slots_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"num_stored_slots_removed",
self.num_stored_slots_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_storage_entries",
self.total_removed_storage_entries
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_cached_bytes",
self.total_removed_cached_bytes.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"total_removed_stored_bytes",
self.total_removed_stored_bytes.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"recycle_stores_write_elapsed",
self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"scan_storages_elasped",
self.scan_storages_elasped.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"purge_accounts_index_elapsed",
self.purge_accounts_index_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"handle_reclaims_elapsed",
self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
#[derive(Debug)]
struct FlushStats {
slot: Slot,
num_flushed: usize,
num_purged: usize,
total_size: u64,
}
#[derive(Debug, Default)]
struct LatestAccountsIndexRootsStats {
roots_len: AtomicUsize,
uncleaned_roots_len: AtomicUsize,
previous_uncleaned_roots_len: AtomicUsize,
roots_range: AtomicU64,
rooted_cleaned_count: AtomicUsize,
unrooted_cleaned_count: AtomicUsize,
}
impl LatestAccountsIndexRootsStats {
fn update(&self, accounts_index_roots_stats: &AccountsIndexRootsStats) {
self.roots_len
.store(accounts_index_roots_stats.roots_len, Ordering::Relaxed);
self.uncleaned_roots_len.store(
accounts_index_roots_stats.uncleaned_roots_len,
Ordering::Relaxed,
);
self.previous_uncleaned_roots_len.store(
accounts_index_roots_stats.previous_uncleaned_roots_len,
Ordering::Relaxed,
);
self.roots_range
.store(accounts_index_roots_stats.roots_range, Ordering::Relaxed);
self.rooted_cleaned_count.fetch_add(
accounts_index_roots_stats.rooted_cleaned_count,
Ordering::Relaxed,
);
self.unrooted_cleaned_count.fetch_add(
accounts_index_roots_stats.unrooted_cleaned_count,
Ordering::Relaxed,
);
}
fn report(&self) {
datapoint_info!(
"accounts_index_roots_len",
(
"roots_len",
self.roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"uncleaned_roots_len",
self.uncleaned_roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"previous_uncleaned_roots_len",
self.previous_uncleaned_roots_len.load(Ordering::Relaxed) as i64,
i64
),
(
"roots_range_width",
self.roots_range.load(Ordering::Relaxed) as i64,
i64
),
(
"unrooted_cleaned_count",
self.unrooted_cleaned_count.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"rooted_cleaned_count",
self.rooted_cleaned_count.swap(0, Ordering::Relaxed) as i64,
i64
),
);
// Don't need to reset since this tracks the latest updates, not a cumulative total
}
}
#[derive(Debug, Default)]
struct CleanAccountsStats {
purge_stats: PurgeStats,
latest_accounts_index_roots_stats: LatestAccountsIndexRootsStats,
}
impl CleanAccountsStats {
fn report(&self) {
self.purge_stats.report("clean_purge_slots_stats", None);
self.latest_accounts_index_roots_stats.report();
}
}
#[derive(Debug, Default)]
struct ShrinkStats {
last_report: AtomicU64,
num_slots_shrunk: AtomicUsize,
storage_read_elapsed: AtomicU64,
index_read_elapsed: AtomicU64,
find_alive_elapsed: AtomicU64,
create_and_insert_store_elapsed: AtomicU64,
store_accounts_elapsed: AtomicU64,
update_index_elapsed: AtomicU64,
handle_reclaims_elapsed: AtomicU64,
write_storage_elapsed: AtomicU64,
rewrite_elapsed: AtomicU64,
drop_storage_entries_elapsed: AtomicU64,
recycle_stores_write_elapsed: AtomicU64,
accounts_removed: AtomicUsize,
bytes_removed: AtomicU64,
bytes_written: AtomicU64,
skipped_shrink: AtomicU64,
}
impl ShrinkStats {
fn report(&self) {
let last = self.last_report.load(Ordering::Relaxed);
let now = solana_sdk::timing::timestamp();
// last is initialized to 0 by ::default()
// thus, the first 'report' call would always log.
// Instead, the first call now initialializes 'last_report' to now.
let is_first_call = last == 0;
let should_report = now.saturating_sub(last) > 1000
&& self
.last_report
.compare_exchange(last, now, Ordering::Relaxed, Ordering::Relaxed)
== Ok(last);
if !is_first_call && should_report {
datapoint_info!(
"shrink_stats",
(
"num_slots_shrunk",
self.num_slots_shrunk.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"storage_read_elapsed",
self.storage_read_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"index_read_elapsed",
self.index_read_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"find_alive_elapsed",
self.find_alive_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"create_and_insert_store_elapsed",
self.create_and_insert_store_elapsed
.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"store_accounts_elapsed",
self.store_accounts_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"update_index_elapsed",
self.update_index_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"handle_reclaims_elapsed",
self.handle_reclaims_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"write_storage_elapsed",
self.write_storage_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"rewrite_elapsed",
self.rewrite_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"drop_storage_entries_elapsed",
self.drop_storage_entries_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"recycle_stores_write_time",
self.recycle_stores_write_elapsed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"accounts_removed",
self.accounts_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"bytes_removed",
self.bytes_removed.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"bytes_written",
self.bytes_written.swap(0, Ordering::Relaxed) as i64,
i64
),
(
"skipped_shrink",
self.skipped_shrink.swap(0, Ordering::Relaxed) as i64,
i64
),
);
}
}
}
pub fn make_min_priority_thread_pool() -> ThreadPool {
// Use lower thread count to reduce priority.
let num_threads = std::cmp::max(2, num_cpus::get() / 4);
rayon::ThreadPoolBuilder::new()
.thread_name(|i| format!("solana-cleanup-accounts-{}", i))
.num_threads(num_threads)
.build()
.unwrap()
}
#[cfg(all(test, RUSTC_WITH_SPECIALIZATION))]
impl solana_frozen_abi::abi_example::AbiExample for AccountsDb {
fn example() -> Self {
let accounts_db = AccountsDb::new_single();
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
accounts_db.store_uncached(some_slot, &[(&key, &account)]);
accounts_db.add_root(0);
accounts_db
}
}
impl<'a> ReadableAccount for StoredAccountMeta<'a> {
fn lamports(&self) -> u64 {
self.account_meta.lamports
}
fn data(&self) -> &[u8] {
self.data
}
fn owner(&self) -> &Pubkey {
&self.account_meta.owner
}
fn executable(&self) -> bool {
self.account_meta.executable
}
fn rent_epoch(&self) -> Epoch {
self.account_meta.rent_epoch
}
}
impl Default for AccountsDb {
fn default() -> Self {
let num_threads = get_thread_count();
const MAX_READ_ONLY_CACHE_DATA_SIZE: usize = 200_000_000;
let mut bank_hashes = HashMap::new();
bank_hashes.insert(0, BankHashInfo::default());
AccountsDb {
accounts_index: AccountsIndex::default(),
storage: AccountStorage::default(),
accounts_cache: AccountsCache::default(),
sender_bg_hasher: None,
read_only_accounts_cache: ReadOnlyAccountsCache::new(MAX_READ_ONLY_CACHE_DATA_SIZE),
recycle_stores: RwLock::new(RecycleStores::default()),
uncleaned_pubkeys: DashMap::new(),
next_id: AtomicUsize::new(0),
shrink_candidate_slots_v1: Mutex::new(Vec::new()),
shrink_candidate_slots: Mutex::new(HashMap::new()),
write_version: AtomicU64::new(0),
paths: vec![],
shrink_paths: RwLock::new(None),
temp_paths: None,
file_size: DEFAULT_FILE_SIZE,
thread_pool: rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("solana-db-accounts-{}", i))
.build()
.unwrap(),
thread_pool_clean: make_min_priority_thread_pool(),
min_num_stores: num_threads,
bank_hashes: RwLock::new(bank_hashes),
frozen_accounts: HashMap::new(),
external_purge_slots_stats: PurgeStats::default(),
clean_accounts_stats: CleanAccountsStats::default(),
shrink_stats: ShrinkStats::default(),
stats: AccountsStats::default(),
cluster_type: None,
account_indexes: AccountSecondaryIndexes::default(),
caching_enabled: false,
#[cfg(test)]
load_delay: u64::default(),
#[cfg(test)]
load_limit: AtomicU64::default(),
is_bank_drop_callback_enabled: AtomicBool::default(),
remove_unrooted_slots_synchronization: RemoveUnrootedSlotsSynchronization::default(),
shrink_ratio: AccountShrinkThreshold::default(),
dirty_stores: DashMap::default(),
}
}
}
type GenerateIndexAccountsMap<'a> =
HashMap<Pubkey, (StoredMetaWriteVersion, AppendVecId, StoredAccountMeta<'a>)>;
impl AccountsDb {
pub fn new(paths: Vec<PathBuf>, cluster_type: &ClusterType) -> Self {
AccountsDb::new_with_config(
paths,
cluster_type,
AccountSecondaryIndexes::default(),
false,
AccountShrinkThreshold::default(),
)
}
pub fn new_with_config(
paths: Vec<PathBuf>,
cluster_type: &ClusterType,
account_indexes: AccountSecondaryIndexes,
caching_enabled: bool,
shrink_ratio: AccountShrinkThreshold,
) -> Self {
let mut new = if !paths.is_empty() {
Self {
paths,
temp_paths: None,
cluster_type: Some(*cluster_type),
account_indexes,
caching_enabled,
shrink_ratio,
..Self::default()
}
} else {
// Create a temporary set of accounts directories, used primarily
// for testing
let (temp_dirs, paths) = get_temp_accounts_paths(DEFAULT_NUM_DIRS).unwrap();
Self {
paths,
temp_paths: Some(temp_dirs),
cluster_type: Some(*cluster_type),
account_indexes,
caching_enabled,
shrink_ratio,
..Self::default()
}
};
new.start_background_hasher();
{
for path in new.paths.iter() {
std::fs::create_dir_all(path).expect("Create directory failed.");
}
}
new
}
pub fn set_shrink_paths(&self, paths: Vec<PathBuf>) {
assert!(!paths.is_empty());
let mut shrink_paths = self.shrink_paths.write().unwrap();
for path in &paths {
std::fs::create_dir_all(path).expect("Create directory failed.");
}
*shrink_paths = Some(paths);
}
pub fn file_size(&self) -> u64 {
self.file_size
}
pub fn new_single() -> Self {
AccountsDb {
min_num_stores: 0,
..AccountsDb::new(Vec::new(), &ClusterType::Development)
}
}
fn new_storage_entry(&self, slot: Slot, path: &Path, size: u64) -> AccountStorageEntry {
AccountStorageEntry::new(
path,
slot,
self.next_id.fetch_add(1, Ordering::Relaxed),
size,
)
}
pub fn expected_cluster_type(&self) -> ClusterType {
self.cluster_type
.expect("Cluster type must be set at initialization")
}
/// Reclaim older states of accounts older than max_clean_root for AccountsDb bloat mitigation
fn clean_accounts_older_than_root(
&self,
purges: Vec<Pubkey>,
max_clean_root: Option<Slot>,
) -> ReclaimResult {
if purges.is_empty() {
return ReclaimResult::default();
}
// This number isn't carefully chosen; just guessed randomly such that
// the hot loop will be the order of ~Xms.
const INDEX_CLEAN_BULK_COUNT: usize = 4096;
let mut clean_rooted = Measure::start("clean_old_root-ms");
let reclaim_vecs = purges
.par_chunks(INDEX_CLEAN_BULK_COUNT)
.map(|pubkeys: &[Pubkey]| {
let mut reclaims = Vec::new();
for pubkey in pubkeys {
self.accounts_index
.clean_rooted_entries(pubkey, &mut reclaims, max_clean_root);
}
reclaims
});
let reclaims: Vec<_> = reclaim_vecs.flatten().collect();
clean_rooted.stop();
inc_new_counter_info!("clean-old-root-par-clean-ms", clean_rooted.as_ms() as usize);
let mut measure = Measure::start("clean_old_root_reclaims");
// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
let reset_accounts = false;
let mut reclaim_result = ReclaimResult::default();
self.handle_reclaims(
&reclaims,
None,
Some(&self.clean_accounts_stats.purge_stats),
Some(&mut reclaim_result),
reset_accounts,
);
measure.stop();
debug!("{} {}", clean_rooted, measure);
inc_new_counter_info!("clean-old-root-reclaim-ms", measure.as_ms() as usize);
reclaim_result
}
fn do_reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) {
self.accounts_index.reset_uncleaned_roots(max_clean_root);
}
fn calc_delete_dependencies(
purges: &HashMap<Pubkey, (SlotList<AccountInfo>, u64)>,
store_counts: &mut HashMap<AppendVecId, (usize, HashSet<Pubkey>)>,
) {
// Another pass to check if there are some filtered accounts which
// do not match the criteria of deleting all appendvecs which contain them
// then increment their storage count.
let mut already_counted = HashSet::new();
for (pubkey, (account_infos, ref_count_from_storage)) in purges.iter() {
let no_delete = if account_infos.len() as u64 != *ref_count_from_storage {
debug!(
"calc_delete_dependencies(),
pubkey: {},
account_infos: {:?},
account_infos_len: {},
ref_count_from_storage: {}",
pubkey,
account_infos,
account_infos.len(),
ref_count_from_storage,
);
true
} else {
let mut no_delete = false;
for (_slot, account_info) in account_infos {
debug!(
"calc_delete_dependencies()
storage id: {},
count len: {}",
account_info.store_id,
store_counts.get(&account_info.store_id).unwrap().0,
);
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
no_delete = true;
break;
}
}
no_delete
};
if no_delete {
let mut pending_store_ids: HashSet<usize> = HashSet::new();
for (_bank_id, account_info) in account_infos {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
while !pending_store_ids.is_empty() {
let id = pending_store_ids.iter().next().cloned().unwrap();
pending_store_ids.remove(&id);
if already_counted.contains(&id) {
continue;
}
store_counts.get_mut(&id).unwrap().0 += 1;
already_counted.insert(id);
let affected_pubkeys = &store_counts.get(&id).unwrap().1;
for key in affected_pubkeys {
for (_slot, account_info) in &purges.get(key).unwrap().0 {
if !already_counted.contains(&account_info.store_id) {
pending_store_ids.insert(account_info.store_id);
}
}
}
}
}
}
}
fn background_hasher(receiver: Receiver<CachedAccount>) {
loop {
let result = receiver.recv();
match result {
Ok(account) => {
// if we hold the only ref, then this account doesn't need to be hashed, we ignore this account and it will disappear
if Arc::strong_count(&account) > 1 {
// this will cause the hash to be calculated and store inside account if it needs to be calculated
let _ = (*account).hash();
};
}
Err(_) => {
break;
}
}
}
}
fn start_background_hasher(&mut self) {
let (sender, receiver) = unbounded();
Builder::new()
.name("solana-db-store-hasher-accounts".to_string())
.spawn(move || {
Self::background_hasher(receiver);
})
.unwrap();
self.sender_bg_hasher = Some(sender);
}
fn purge_keys_exact<'a, C: 'a>(
&'a self,
pubkey_to_slot_set: impl Iterator<Item = &'a (Pubkey, C)>,
) -> Vec<(u64, AccountInfo)>
where
C: Contains<'a, Slot>,
{
let mut reclaims = Vec::new();
let mut dead_keys = Vec::new();
for (pubkey, slots_set) in pubkey_to_slot_set {
let is_empty = self
.accounts_index
.purge_exact(pubkey, slots_set, &mut reclaims);
if is_empty {
dead_keys.push(pubkey);
}
}
self.accounts_index
.handle_dead_keys(&dead_keys, &self.account_indexes);
reclaims
}
fn max_clean_root(&self, proposed_clean_root: Option<Slot>) -> Option<Slot> {
match (
self.accounts_index.min_ongoing_scan_root(),
proposed_clean_root,
) {
(None, None) => None,
(Some(min_scan_root), None) => Some(min_scan_root),
(None, Some(proposed_clean_root)) => Some(proposed_clean_root),
(Some(min_scan_root), Some(proposed_clean_root)) => {
Some(std::cmp::min(min_scan_root, proposed_clean_root))
}
}
}
/// Collect all the uncleaned slots, up to a max slot
///
/// Search through the uncleaned Pubkeys and return all the slots, up to a maximum slot.
fn collect_uncleaned_slots_up_to_slot(&self, max_slot: Slot) -> Vec<Slot> {
self.uncleaned_pubkeys
.iter()
.filter_map(|entry| {
let slot = *entry.key();
(slot <= max_slot).then(|| slot)
})
.collect()
}
/// Remove `slots` from `uncleaned_pubkeys` and collect all pubkeys
///
/// For each slot in the list of uncleaned slots, remove it from the `uncleaned_pubkeys` Map
/// and collect all the pubkeys to return.
fn remove_uncleaned_slots_and_collect_pubkeys(
&self,
uncleaned_slots: Vec<Slot>,
) -> Vec<Vec<Pubkey>> {
uncleaned_slots
.into_iter()
.filter_map(|uncleaned_slot| {
self.uncleaned_pubkeys
.remove(&uncleaned_slot)
.map(|(_removed_slot, removed_pubkeys)| removed_pubkeys)
})
.collect()
}
/// Remove uncleaned slots, up to a maximum slot, and return the collected pubkeys
///
fn remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(
&self,
max_slot: Slot,
) -> Vec<Vec<Pubkey>> {
let uncleaned_slots = self.collect_uncleaned_slots_up_to_slot(max_slot);
self.remove_uncleaned_slots_and_collect_pubkeys(uncleaned_slots)
}
// Construct a vec of pubkeys for cleaning from:
// uncleaned_pubkeys - the delta set of updated pubkeys in rooted slots from the last clean
// dirty_stores - set of stores which had accounts removed or recently rooted
fn construct_candidate_clean_keys(
&self,
max_clean_root: Option<Slot>,
timings: &mut CleanKeyTimings,
) -> Vec<Pubkey> {
let mut dirty_store_processing_time = Measure::start("dirty_store_processing");
let max_slot = max_clean_root.unwrap_or_else(|| self.accounts_index.max_root());
let mut dirty_stores = Vec::with_capacity(self.dirty_stores.len());
self.dirty_stores.retain(|(slot, _store_id), store| {
if *slot > max_slot {
true
} else {
dirty_stores.push((*slot, store.clone()));
false
}
});
let dirty_stores_len = dirty_stores.len();
let pubkeys = DashSet::new();
for (_slot, store) in dirty_stores {
for account in store.accounts.accounts(0) {
pubkeys.insert(account.meta.pubkey);
}
}
trace!(
"dirty_stores.len: {} pubkeys.len: {}",
dirty_stores_len,
pubkeys.len()
);
timings.dirty_pubkeys_count = pubkeys.len() as u64;
dirty_store_processing_time.stop();
timings.dirty_store_processing_us += dirty_store_processing_time.as_us();
let mut collect_delta_keys = Measure::start("key_create");
let delta_keys = self.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(max_slot);
collect_delta_keys.stop();
timings.collect_delta_keys_us += collect_delta_keys.as_us();
let mut delta_insert = Measure::start("delta_insert");
self.thread_pool_clean.install(|| {
delta_keys.par_iter().for_each(|keys| {
for key in keys {
pubkeys.insert(*key);
}
});
});
delta_insert.stop();
timings.delta_insert_us += delta_insert.as_us();
timings.delta_key_count = pubkeys.len() as u64;
let mut hashset_to_vec = Measure::start("flat_map");
let pubkeys: Vec<Pubkey> = pubkeys.into_iter().collect();
hashset_to_vec.stop();
timings.hashset_to_vec_us += hashset_to_vec.as_us();
pubkeys
}
// Purge zero lamport accounts and older rooted account states as garbage
// collection
// Only remove those accounts where the entire rooted history of the account
// can be purged because there are no live append vecs in the ancestors
pub fn clean_accounts(&self, max_clean_root: Option<Slot>, is_startup: bool) {
let max_clean_root = self.max_clean_root(max_clean_root);
// hold a lock to prevent slot shrinking from running because it might modify some rooted
// slot storages which can not happen as long as we're cleaning accounts because we're also
// modifying the rooted slot storages!
let mut candidates_v1 = self.shrink_candidate_slots_v1.lock().unwrap();
self.report_store_stats();
let mut key_timings = CleanKeyTimings::default();
let pubkeys = self.construct_candidate_clean_keys(max_clean_root, &mut key_timings);
let total_keys_count = pubkeys.len();
let mut accounts_scan = Measure::start("accounts_scan");
// parallel scan the index.
let (mut purges_zero_lamports, purges_old_accounts) = {
let do_clean_scan = || {
pubkeys
.par_chunks(4096)
.map(|pubkeys: &[Pubkey]| {
let mut purges_zero_lamports = HashMap::new();
let mut purges_old_accounts = Vec::new();
for pubkey in pubkeys {
match self.accounts_index.get(pubkey, None, max_clean_root) {
AccountIndexGetResult::Found(locked_entry, index) => {
let slot_list = locked_entry.slot_list();
let (slot, account_info) = &slot_list[index];
if account_info.lamports == 0 {
purges_zero_lamports.insert(
*pubkey,
self.accounts_index
.roots_and_ref_count(&locked_entry, max_clean_root),
);
}
// Release the lock
let slot = *slot;
drop(locked_entry);
if self.accounts_index.is_uncleaned_root(slot) {
// Assertion enforced by `accounts_index.get()`, the latest slot
// will not be greater than the given `max_clean_root`
if let Some(max_clean_root) = max_clean_root {
assert!(slot <= max_clean_root);
}
purges_old_accounts.push(*pubkey);
}
}
AccountIndexGetResult::NotFoundOnFork => {
// This pubkey is in the index but not in a root slot, so clean
// it up by adding it to the to-be-purged list.
//
// Also, this pubkey must have been touched by some slot since
// it was in the dirty list, so we assume that the slot it was
// touched in must be unrooted.
purges_old_accounts.push(*pubkey);
}
AccountIndexGetResult::Missing(_lock) => {}
};
}
(purges_zero_lamports, purges_old_accounts)
})
.reduce(
|| (HashMap::new(), Vec::new()),
|mut m1, m2| {
// Collapse down the hashmaps/vecs into one.
m1.0.extend(m2.0);
m1.1.extend(m2.1);
m1
},
)
};
if is_startup {
do_clean_scan()
} else {
self.thread_pool_clean.install(do_clean_scan)
}
};
accounts_scan.stop();
let mut clean_old_rooted = Measure::start("clean_old_roots");
let (purged_account_slots, removed_accounts) =
self.clean_accounts_older_than_root(purges_old_accounts, max_clean_root);
if self.caching_enabled {
self.do_reset_uncleaned_roots(max_clean_root);
} else {
self.do_reset_uncleaned_roots_v1(&mut candidates_v1, max_clean_root);
}
clean_old_rooted.stop();
let mut store_counts_time = Measure::start("store_counts");
// Calculate store counts as if everything was purged
// Then purge if we can
let mut store_counts: HashMap<AppendVecId, (usize, HashSet<Pubkey>)> = HashMap::new();
for (key, (account_infos, ref_count)) in purges_zero_lamports.iter_mut() {
if purged_account_slots.contains_key(key) {
*ref_count = self.accounts_index.ref_count_from_storage(key);
}
account_infos.retain(|(slot, account_info)| {
let was_slot_purged = purged_account_slots
.get(key)
.map(|slots_removed| slots_removed.contains(slot))
.unwrap_or(false);
if was_slot_purged {
// No need to look up the slot storage below if the entire
// slot was purged
return false;
}
// Check if this update in `slot` to the account with `key` was reclaimed earlier by
// `clean_accounts_older_than_root()`
let was_reclaimed = removed_accounts
.get(&account_info.store_id)
.map(|store_removed| store_removed.contains(&account_info.offset))
.unwrap_or(false);
if was_reclaimed {
return false;
}
if let Some(store_count) = store_counts.get_mut(&account_info.store_id) {
store_count.0 -= 1;
store_count.1.insert(*key);
} else {
let mut key_set = HashSet::new();
key_set.insert(*key);
let count = self
.storage
.slot_store_count(*slot, account_info.store_id)
.unwrap()
- 1;
debug!(
"store_counts, inserting slot: {}, store id: {}, count: {}",
slot, account_info.store_id, count
);
store_counts.insert(account_info.store_id, (count, key_set));
}
true
});
}
store_counts_time.stop();
let mut calc_deps_time = Measure::start("calc_deps");
Self::calc_delete_dependencies(&purges_zero_lamports, &mut store_counts);
calc_deps_time.stop();
// Only keep purges_zero_lamports where the entire history of the account in the root set
// can be purged. All AppendVecs for those updates are dead.
let mut purge_filter = Measure::start("purge_filter");
purges_zero_lamports.retain(|_pubkey, (account_infos, _ref_count)| {
for (_slot, account_info) in account_infos.iter() {
if store_counts.get(&account_info.store_id).unwrap().0 != 0 {
return false;
}
}
true
});
purge_filter.stop();
let mut reclaims_time = Measure::start("reclaims");
// Recalculate reclaims with new purge set
let pubkey_to_slot_set: Vec<_> = purges_zero_lamports
.into_iter()
.map(|(key, (slots_list, _ref_count))| {
(
key,
slots_list
.into_iter()
.map(|(slot, _)| slot)
.collect::<HashSet<Slot>>(),
)
})
.collect();
let reclaims = self.purge_keys_exact(pubkey_to_slot_set.iter());
// Don't reset from clean, since the pubkeys in those stores may need to be unref'ed
// and those stores may be used for background hashing.
let reset_accounts = false;
let mut reclaim_result = ReclaimResult::default();
let reclaim_result = Some(&mut reclaim_result);
self.handle_reclaims(
&reclaims,
None,
Some(&self.clean_accounts_stats.purge_stats),
reclaim_result,
reset_accounts,
);
reclaims_time.stop();
self.clean_accounts_stats.report();
datapoint_info!(
"clean_accounts",
(
"collect_delta_keys_us",
key_timings.collect_delta_keys_us,
i64
),
(
"dirty_store_processing_us",
key_timings.dirty_store_processing_us,
i64
),
("accounts_scan", accounts_scan.as_us() as i64, i64),
("clean_old_rooted", clean_old_rooted.as_us() as i64, i64),
("store_counts", store_counts_time.as_us() as i64, i64),
("purge_filter", purge_filter.as_us() as i64, i64),
("calc_deps", calc_deps_time.as_us() as i64, i64),
("reclaims", reclaims_time.as_us() as i64, i64),
("delta_key_count", key_timings.delta_key_count, i64),
("dirty_pubkeys_count", key_timings.dirty_pubkeys_count, i64),
("total_keys_count", total_keys_count, i64),
);
}
/// Removes the accounts in the input `reclaims` from the tracked "count" of
/// their corresponding storage entries. Note this does not actually free
/// the memory from the storage entries until all the storage entries for
/// a given slot `S` are empty, at which point `process_dead_slots` will
/// remove all the storage entries for `S`.
///
/// # Arguments
/// * `reclaims` - The accounts to remove from storage entries' "count". Note here
/// that we should not remove cache entries, only entries for accounts actually
/// stored in a storage entry.
///
/// * `expected_single_dead_slot` - A correctness assertion. If this is equal to `Some(S)`,
/// then the function will check that the only slot being cleaned up in `reclaims`
/// is the slot == `S`. This is true for instance when `handle_reclaims` is called
/// from store or slot shrinking, as those should only touch the slot they are
/// currently storing to or shrinking.
///
/// * `purge_stats` - The stats used to track performance of purging dead slots. This
/// also serves a correctness assertion. If `purge_stats.is_none()`, this implies
/// there can be no dead slots that happen as a result of this call, and the function
/// will check that no slots are cleaned up/removed via `process_dead_slots`. For instance,
/// on store, no slots should be cleaned up, but during the background clean accounts
/// purges accounts from old rooted slots, so outdated slots may be removed.
///
/// * `reclaim_result` - Information about accounts that were removed from storage, does
/// not include accounts that were removed from the cache
///
/// * `reset_accounts` - Reset the append_vec store when the store is dead (count==0)
/// From the clean and shrink paths it should be false since there may be an in-progress
/// hash operation and the stores may hold accounts that need to be unref'ed.
fn handle_reclaims(
&self,
reclaims: SlotSlice<AccountInfo>,
expected_single_dead_slot: Option<Slot>,
// TODO: coalesce `purge_stats` and `reclaim_result` together into one option, as they
// are both either Some or None
purge_stats: Option<&PurgeStats>,
reclaim_result: Option<&mut ReclaimResult>,
reset_accounts: bool,
) {
if reclaims.is_empty() {
return;
}
let (purged_account_slots, reclaimed_offsets) =
if let Some((ref mut x, ref mut y)) = reclaim_result {
(Some(x), Some(y))
} else {
(None, None)
};
let dead_slots = self.remove_dead_accounts(
reclaims,
expected_single_dead_slot,
reclaimed_offsets,
reset_accounts,
);
if purge_stats.is_none() {
assert!(dead_slots.is_empty());
} else if let Some(expected_single_dead_slot) = expected_single_dead_slot {
assert!(dead_slots.len() <= 1);
if dead_slots.len() == 1 {
assert!(dead_slots.contains(&expected_single_dead_slot));
}
}
if let Some(purge_stats) = purge_stats {
self.process_dead_slots(&dead_slots, purged_account_slots, purge_stats);
}
}
// Must be kept private!, does sensitive cleanup that should only be called from
// supported pipelines in AccountsDb
fn process_dead_slots(
&self,
dead_slots: &HashSet<Slot>,
purged_account_slots: Option<&mut AccountSlots>,
purge_stats: &PurgeStats,
) {
if dead_slots.is_empty() {
return;
}
let mut clean_dead_slots = Measure::start("reclaims::clean_dead_slots");
self.clean_stored_dead_slots(dead_slots, purged_account_slots);
clean_dead_slots.stop();
let mut purge_removed_slots = Measure::start("reclaims::purge_removed_slots");
self.purge_dead_slots_from_storage(dead_slots.iter(), purge_stats);
purge_removed_slots.stop();
// If the slot is dead, remove the need to shrink the storages as
// the storage entries will be purged.
{
let mut list = self.shrink_candidate_slots.lock().unwrap();
for slot in dead_slots {
list.remove(slot);
}
}
debug!(
"process_dead_slots({}): {} {} {:?}",
dead_slots.len(),
clean_dead_slots,
purge_removed_slots,
dead_slots,
);
}
fn do_shrink_slot_stores<'a, I>(&'a self, slot: Slot, stores: I, _is_startup: bool) -> usize
where
I: Iterator<Item = &'a Arc<AccountStorageEntry>>,
{
struct FoundStoredAccount<'a> {
pub account: StoredAccountMeta<'a>,
pub store_id: AppendVecId,
pub account_size: usize,
}
debug!("do_shrink_slot_stores: slot: {}", slot);
let mut stored_accounts: HashMap<Pubkey, FoundStoredAccount> = HashMap::new();
let mut original_bytes = 0;
let mut num_stores = 0;
for store in stores {
let mut start = 0;
original_bytes += store.total_bytes();
while let Some((account, next)) = store.accounts.get_account(start) {
let new_entry = FoundStoredAccount {
account,
store_id: store.append_vec_id(),
account_size: next - start,
};
match stored_accounts.entry(new_entry.account.meta.pubkey) {
Entry::Occupied(mut occupied_entry) => {
if new_entry.account.meta.write_version
> occupied_entry.get().account.meta.write_version
{
occupied_entry.insert(new_entry);
}
}
Entry::Vacant(vacant_entry) => {
vacant_entry.insert(new_entry);
}
}
start = next;
}
num_stores += 1;
}
let mut index_read_elapsed = Measure::start("index_read_elapsed");
let mut alive_total = 0;
let mut alive_accounts: Vec<_> = Vec::with_capacity(stored_accounts.len());
let mut unrefed_pubkeys = vec![];
for (pubkey, stored_account) in &stored_accounts {
let lookup = self.accounts_index.get_account_read_entry(pubkey);
if let Some(locked_entry) = lookup {
let is_alive = locked_entry.slot_list().iter().any(|(_slot, i)| {
i.store_id == stored_account.store_id
&& i.offset == stored_account.account.offset
});
if !is_alive {
// This pubkey was found in the storage, but no longer exists in the index.
// It would have had a ref to the storage from the initial store, but it will
// not exist in the re-written slot. Unref it to keep the index consistent with
// rewriting the storage entries.
unrefed_pubkeys.push(pubkey);
locked_entry.unref()
} else {
alive_accounts.push((pubkey, stored_account));
alive_total += stored_account.account_size;
}
}
}
index_read_elapsed.stop();
let aligned_total: u64 = Self::page_align(alive_total as u64);
// This shouldn't happen if alive_bytes/approx_stored_count are accurate
if Self::should_not_shrink(aligned_total, original_bytes, num_stores) {
self.shrink_stats
.skipped_shrink
.fetch_add(1, Ordering::Relaxed);
for pubkey in unrefed_pubkeys {
if let Some(locked_entry) = self.accounts_index.get_account_read_entry(pubkey) {
locked_entry.addref();
}
}
return 0;
}
let total_starting_accounts = stored_accounts.len();
let total_accounts_after_shrink = alive_accounts.len();
debug!(
"shrinking: slot: {}, accounts: ({} => {}) bytes: ({} ; aligned to: {}) original: {}",
slot,
total_starting_accounts,
total_accounts_after_shrink,
alive_total,
aligned_total,
original_bytes,
);
let mut rewrite_elapsed = Measure::start("rewrite_elapsed");
let mut dead_storages = vec![];
let mut find_alive_elapsed = 0;
let mut create_and_insert_store_elapsed = 0;
let mut write_storage_elapsed = 0;
let mut store_accounts_timing = StoreAccountsTiming::default();
if aligned_total > 0 {
let mut start = Measure::start("find_alive_elapsed");
let mut accounts = Vec::with_capacity(alive_accounts.len());
let mut hashes = Vec::with_capacity(alive_accounts.len());
let mut write_versions = Vec::with_capacity(alive_accounts.len());
for (pubkey, alive_account) in alive_accounts {
accounts.push((pubkey, &alive_account.account));
hashes.push(alive_account.account.hash);
write_versions.push(alive_account.account.meta.write_version);
}
start.stop();
find_alive_elapsed = start.as_us();
let mut start = Measure::start("create_and_insert_store_elapsed");
let shrunken_store = if let Some(new_store) =
self.try_recycle_and_insert_store(slot, aligned_total, aligned_total + 1024)
{
new_store
} else {
let maybe_shrink_paths = self.shrink_paths.read().unwrap();
if let Some(ref shrink_paths) = *maybe_shrink_paths {
self.create_and_insert_store_with_paths(
slot,
aligned_total,
"shrink-w-path",
shrink_paths,
)
} else {
self.create_and_insert_store(slot, aligned_total, "shrink")
}
};
start.stop();
create_and_insert_store_elapsed = start.as_us();
// here, we're writing back alive_accounts. That should be an atomic operation
// without use of rather wide locks in this whole function, because we're
// mutating rooted slots; There should be no writers to them.
store_accounts_timing = self.store_accounts_frozen(
slot,
&accounts,
Some(&hashes),
Some(Box::new(move |_, _| shrunken_store.clone())),
Some(Box::new(write_versions.into_iter())),
);
// `store_accounts_frozen()` above may have purged accounts from some
// other storage entries (the ones that were just overwritten by this
// new storage entry). This means some of those stores might have caused
// this slot to be read to `self.shrink_candidate_slots`, so delete
// those here
self.shrink_candidate_slots.lock().unwrap().remove(&slot);
// Purge old, overwritten storage entries
let mut start = Measure::start("write_storage_elapsed");
if let Some(slot_stores) = self.storage.get_slot_stores(slot) {
slot_stores.write().unwrap().retain(|_key, store| {
if store.count() == 0 {
self.dirty_stores
.insert((slot, store.append_vec_id()), store.clone());
dead_storages.push(store.clone());
false
} else {
true
}
});
}
start.stop();
write_storage_elapsed = start.as_us();
}
rewrite_elapsed.stop();
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time");
let mut recycle_stores = self.recycle_stores.write().unwrap();
recycle_stores_write_elapsed.stop();
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
if recycle_stores.entry_count() < MAX_RECYCLE_STORES {
recycle_stores.add_entries(dead_storages);
drop(recycle_stores);
} else {
self.stats
.dropped_stores
.fetch_add(dead_storages.len() as u64, Ordering::Relaxed);
drop(recycle_stores);
drop(dead_storages);
}
drop_storage_entries_elapsed.stop();
self.shrink_stats
.num_slots_shrunk
.fetch_add(1, Ordering::Relaxed);
self.shrink_stats
.index_read_elapsed
.fetch_add(index_read_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.find_alive_elapsed
.fetch_add(find_alive_elapsed, Ordering::Relaxed);
self.shrink_stats
.create_and_insert_store_elapsed
.fetch_add(create_and_insert_store_elapsed, Ordering::Relaxed);
self.shrink_stats.store_accounts_elapsed.fetch_add(
store_accounts_timing.store_accounts_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.update_index_elapsed.fetch_add(
store_accounts_timing.update_index_elapsed,
Ordering::Relaxed,
);
self.shrink_stats.handle_reclaims_elapsed.fetch_add(
store_accounts_timing.handle_reclaims_elapsed,
Ordering::Relaxed,
);
self.shrink_stats
.write_storage_elapsed
.fetch_add(write_storage_elapsed, Ordering::Relaxed);
self.shrink_stats
.rewrite_elapsed
.fetch_add(rewrite_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
self.shrink_stats.accounts_removed.fetch_add(
total_starting_accounts - total_accounts_after_shrink,
Ordering::Relaxed,
);
self.shrink_stats.bytes_removed.fetch_add(
original_bytes.saturating_sub(aligned_total),
Ordering::Relaxed,
);
self.shrink_stats
.bytes_written
.fetch_add(aligned_total, Ordering::Relaxed);
self.shrink_stats.report();
total_accounts_after_shrink
}
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
fn shrink_slot_forced(&self, slot: Slot, is_startup: bool) -> usize {
debug!("shrink_slot_forced: slot: {}", slot);
if let Some(stores_lock) = self.storage.get_slot_stores(slot) {
let stores: Vec<Arc<AccountStorageEntry>> =
stores_lock.read().unwrap().values().cloned().collect();
if !Self::is_shrinking_productive(slot, &stores) {
return 0;
}
self.do_shrink_slot_stores(slot, stores.iter(), is_startup)
} else {
0
}
}
fn all_slots_in_storage(&self) -> Vec<Slot> {
self.storage.all_slots()
}
fn all_root_slots_in_index(&self) -> Vec<Slot> {
self.accounts_index.all_roots()
}
/// Given the input `ShrinkCandidates`, this function sorts the stores by their alive ratio
/// in increasing order with the most sparse entries in the front. It will then simulate the
/// shrinking by working on the most sparse entries first and if the overall alive ratio is
/// achieved, it will stop and return the filtered-down candidates.
fn select_candidates_by_total_usage(
&self,
shrink_slots: &ShrinkCandidates,
shrink_ratio: f64,
) -> ShrinkCandidates {
struct StoreUsageInfo {
slot: Slot,
alive_ratio: f64,
store: Arc<AccountStorageEntry>,
}
let mut measure = Measure::start("select_top_sparse_storage_entries-ms");
let mut store_usage: Vec<StoreUsageInfo> = Vec::with_capacity(shrink_slots.len());
let mut total_alive_bytes: u64 = 0;
let mut candidates_count: usize = 0;
let mut total_bytes: u64 = 0;
for (slot, slot_shrink_candidates) in shrink_slots {
candidates_count += slot_shrink_candidates.len();
for store in slot_shrink_candidates.values() {
total_alive_bytes += Self::page_align(store.alive_bytes() as u64);
total_bytes += store.total_bytes();
let alive_ratio = Self::page_align(store.alive_bytes() as u64) as f64
/ store.total_bytes() as f64;
store_usage.push(StoreUsageInfo {
slot: *slot,
alive_ratio,
store: store.clone(),
});
}
}
store_usage.sort_by(|a, b| {
a.alive_ratio
.partial_cmp(&b.alive_ratio)
.unwrap_or(std::cmp::Ordering::Equal)
});
// Working from the beginning of store_usage which are the most sparse and see when we can stop
// shrinking while still achieving the overall goals.
let mut shrink_slots: ShrinkCandidates = HashMap::new();
for usage in &store_usage {
let alive_ratio = (total_alive_bytes as f64) / (total_bytes as f64);
if alive_ratio > shrink_ratio {
// we have reached our goal, stop
debug!(
"Shrinking goal can be achieved at slot {:?}, total_alive_bytes: {:?} \
total_bytes: {:?}, alive_ratio: {:}, shrink_ratio: {:?}",
usage.slot, total_alive_bytes, total_bytes, alive_ratio, shrink_ratio
);
break;
}
let store = &usage.store;
let current_store_size = store.total_bytes();
let after_shrink_size = Self::page_align(store.alive_bytes() as u64);
let bytes_saved = current_store_size.saturating_sub(after_shrink_size);
total_bytes -= bytes_saved;
shrink_slots
.entry(usage.slot)
.or_default()
.insert(store.append_vec_id(), store.clone());
}
measure.stop();
inc_new_counter_info!(
"select_top_sparse_storage_entries-ms",
measure.as_ms() as usize
);
inc_new_counter_info!("select_top_sparse_storage_entries-seeds", candidates_count);
shrink_slots
}
pub fn shrink_candidate_slots(&self) -> usize {
let shrink_candidates_slots =
std::mem::take(&mut *self.shrink_candidate_slots.lock().unwrap());
let shrink_slots = {
if let AccountShrinkThreshold::TotalSpace { shrink_ratio } = self.shrink_ratio {
self.select_candidates_by_total_usage(&shrink_candidates_slots, shrink_ratio)
} else {
shrink_candidates_slots
}
};
let mut measure_shrink_all_candidates = Measure::start("shrink_all_candidate_slots-ms");
let num_candidates = shrink_slots.len();
let mut shrink_candidates_count: usize = 0;
for (slot, slot_shrink_candidates) in shrink_slots {
shrink_candidates_count += slot_shrink_candidates.len();
let mut measure = Measure::start("shrink_candidate_slots-ms");
self.do_shrink_slot_stores(slot, slot_shrink_candidates.values(), false);
measure.stop();
inc_new_counter_info!("shrink_candidate_slots-ms", measure.as_ms() as usize);
}
measure_shrink_all_candidates.stop();
inc_new_counter_info!(
"shrink_all_candidate_slots-ms",
measure_shrink_all_candidates.as_ms() as usize
);
inc_new_counter_info!("shrink_all_candidate_slots-count", shrink_candidates_count);
num_candidates
}
pub fn shrink_all_slots(&self, is_startup: bool) {
if is_startup && self.caching_enabled {
let slots = self.all_slots_in_storage();
let chunk_size = std::cmp::max(slots.len() / 8, 1); // approximately 400k slots in a snapshot
slots.par_chunks(chunk_size).for_each(|slots| {
for slot in slots {
self.shrink_slot_forced(*slot, is_startup);
}
});
} else {
for slot in self.all_slots_in_storage() {
if self.caching_enabled {
self.shrink_slot_forced(slot, false);
} else {
self.do_shrink_slot_forced_v1(slot);
}
}
}
}
pub fn scan_accounts<F, A>(
&self,
ancestors: &Ancestors,
bank_id: BankId,
scan_func: F,
) -> ScanResult<A>
where
F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>),
A: Default,
{
let mut collector = A::default();
// This can error out if the slots being scanned over are aborted
self.accounts_index
.scan_accounts(ancestors, bank_id, |pubkey, (account_info, slot)| {
let account_slot = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.take_account(), slot));
scan_func(&mut collector, account_slot)
})?;
Ok(collector)
}
pub fn unchecked_scan_accounts<F, A>(
&self,
metric_name: &'static str,
ancestors: &Ancestors,
scan_func: F,
) -> A
where
F: Fn(&mut A, (&Pubkey, LoadedAccount, Slot)),
A: Default,
{
let mut collector = A::default();
self.accounts_index.unchecked_scan_accounts(
metric_name,
ancestors,
|pubkey, (account_info, slot)| {
if let Some(loaded_account) = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
{
scan_func(&mut collector, (pubkey, loaded_account, slot));
}
},
);
collector
}
pub fn range_scan_accounts<F, A, R>(
&self,
metric_name: &'static str,
ancestors: &Ancestors,
range: R,
scan_func: F,
) -> A
where
F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>),
A: Default,
R: RangeBounds<Pubkey>,
{
let mut collector = A::default();
self.accounts_index.range_scan_accounts(
metric_name,
ancestors,
range,
|pubkey, (account_info, slot)| {
// unlike other scan fns, this is called from Bank::collect_rent_eagerly(),
// which is on-consensus processing in the banking/replaying stage.
// This requires infallible and consistent account loading.
// So, we unwrap Option<LoadedAccount> from get_loaded_account() here.
// This is safe because this closure is invoked with the account_info,
// while we lock the index entry at AccountsIndex::do_scan_accounts() ultimately,
// meaning no other subsystems can invalidate the account_info before making their
// changes to the index entry.
// For details, see the comment in retry_to_get_account_accessor()
let account_slot = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.take_account(), slot))
.unwrap();
scan_func(&mut collector, Some(account_slot))
},
);
collector
}
pub fn index_scan_accounts<F, A>(
&self,
ancestors: &Ancestors,
bank_id: BankId,
index_key: IndexKey,
scan_func: F,
) -> ScanResult<(A, bool)>
where
F: Fn(&mut A, Option<(&Pubkey, AccountSharedData, Slot)>),
A: Default,
{
let key = match &index_key {
IndexKey::ProgramId(key) => key,
IndexKey::SplTokenMint(key) => key,
IndexKey::SplTokenOwner(key) => key,
};
if !self.account_indexes.include_key(key) {
// the requested key was not indexed in the secondary index, so do a normal scan
let used_index = false;
let scan_result = self.scan_accounts(ancestors, bank_id, scan_func)?;
return Ok((scan_result, used_index));
}
let mut collector = A::default();
self.accounts_index.index_scan_accounts(
ancestors,
bank_id,
index_key,
|pubkey, (account_info, slot)| {
let account_slot = self
.get_account_accessor(slot, pubkey, account_info.store_id, account_info.offset)
.get_loaded_account()
.map(|loaded_account| (pubkey, loaded_account.take_account(), slot));
scan_func(&mut collector, account_slot)
},
)?;
let used_index = true;
Ok((collector, used_index))
}
/// Scan a specific slot through all the account storage in parallel
pub fn scan_account_storage<R, B>(
&self,
slot: Slot,
cache_map_func: impl Fn(LoadedAccount) -> Option<R> + Sync,
storage_scan_func: impl Fn(&B, LoadedAccount) + Sync,
) -> ScanStorageResult<R, B>
where
R: Send,
B: Send + Default + Sync,
{
if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
// If we see the slot in the cache, then all the account information
// is in this cached slot
if slot_cache.len() > SCAN_SLOT_PAR_ITER_THRESHOLD {
ScanStorageResult::Cached(self.thread_pool.install(|| {
slot_cache
.par_iter()
.filter_map(|cached_account| {
cache_map_func(LoadedAccount::Cached((
*cached_account.key(),
Cow::Borrowed(cached_account.value()),
)))
})
.collect()
}))
} else {
ScanStorageResult::Cached(
slot_cache
.iter()
.filter_map(|cached_account| {
cache_map_func(LoadedAccount::Cached((
*cached_account.key(),
Cow::Borrowed(cached_account.value()),
)))
})
.collect(),
)
}
} else {
let retval = B::default();
// If the slot is not in the cache, then all the account information must have
// been flushed. This is guaranteed because we only remove the rooted slot from
// the cache *after* we've finished flushing in `flush_slot_cache`.
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
.storage
.get_slot_storage_entries(slot)
.unwrap_or_default();
self.thread_pool.install(|| {
storage_maps
.par_iter()
.flat_map(|storage| storage.all_accounts())
.for_each(|account| storage_scan_func(&retval, LoadedAccount::Stored(account)));
});
ScanStorageResult::Stored(retval)
}
}
pub fn set_hash(&self, slot: Slot, parent_slot: Slot) {
let mut bank_hashes = self.bank_hashes.write().unwrap();
if bank_hashes.get(&slot).is_some() {
error!(
"set_hash: already exists; multiple forks with shared slot {} as child (parent: {})!?",
slot, parent_slot,
);
return;
}
let new_hash_info = BankHashInfo {
hash: Hash::default(),
snapshot_hash: Hash::default(),
stats: BankHashStats::default(),
};
bank_hashes.insert(slot, new_hash_info);
}
pub fn load(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
load_hint: LoadHint,
) -> Option<(AccountSharedData, Slot)> {
self.do_load(ancestors, pubkey, None, load_hint)
}
pub fn load_with_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.load(ancestors, pubkey, LoadHint::FixedMaxRoot)
}
pub fn load_without_fixed_root(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
) -> Option<(AccountSharedData, Slot)> {
self.load(ancestors, pubkey, LoadHint::Unspecified)
}
fn read_index_for_accessor_or_load_slow<'a>(
&'a self,
ancestors: &Ancestors,
pubkey: &'a Pubkey,
max_root: Option<Slot>,
clone_in_lock: bool,
) -> Option<(Slot, AppendVecId, usize, Option<LoadedAccountAccessor<'a>>)> {
let (lock, index) = match self.accounts_index.get(pubkey, Some(ancestors), max_root) {
AccountIndexGetResult::Found(lock, index) => (lock, index),
// we bail out pretty early for missing.
AccountIndexGetResult::NotFoundOnFork => {
return None;
}
AccountIndexGetResult::Missing(_) => {
return None;
}
};
let slot_list = lock.slot_list();
let (
slot,
AccountInfo {
store_id, offset, ..
},
) = slot_list[index];
let some_from_slow_path = if clone_in_lock {
// the fast path must have failed.... so take the slower approach
// of copying potentially large Account::data inside the lock.
// calling check_and_get_loaded_account is safe as long as we're guaranteed to hold
// the lock during the time and there should be no purge thanks to alive ancestors
// held by our caller.
Some(self.get_account_accessor(slot, pubkey, store_id, offset))
} else {
None
};
Some((slot, store_id, offset, some_from_slow_path))
// `lock` is dropped here rather pretty quickly with clone_in_lock = false,
// so the entry could be raced for mutation by other subsystems,
// before we actually provision an account data for caller's use from now on.
// This is traded for less contention and resultant performance, introducing fair amount of
// delicate handling in retry_to_get_account_accessor() below ;)
// you're warned!
}
fn retry_to_get_account_accessor<'a>(
&'a self,
mut slot: Slot,
mut store_id: usize,
mut offset: usize,
ancestors: &'a Ancestors,
pubkey: &'a Pubkey,
max_root: Option<Slot>,
load_hint: LoadHint,
) -> Option<(LoadedAccountAccessor<'a>, Slot)> {
// Happy drawing time! :)
//
// Reader | Accessed data source for cached/stored
// -------------------------------------+----------------------------------
// R1 read_index_for_accessor_or_load_slow()| cached/stored: index
// | |
// <(store_id, offset, ..)> |
// V |
// R2 retry_to_get_account_accessor()/ | cached: map of caches & entry for (slot, pubkey)
// get_account_accessor() | stored: map of stores
// | |
// <Accessor> |
// V |
// R3 check_and_get_loaded_account()/ | cached: N/A (note: basically noop unwrap)
// get_loaded_account() | stored: store's entry for slot
// | |
// <LoadedAccount> |
// V |
// R4 take_account() | cached/stored: entry of cache/storage for (slot, pubkey)
// | |
// <AccountSharedData> |
// V |
// Account!! V
//
// Flusher | Accessed data source for cached/stored
// -------------------------------------+----------------------------------
// F1 flush_slot_cache() | N/A
// | |
// V |
// F2 store_accounts_frozen()/ | map of stores (creates new entry)
// write_accounts_to_storage() |
// | |
// V |
// F3 store_accounts_frozen()/ | index
// update_index() | (replaces existing store_id, offset in caches)
// | |
// V |
// F4 accounts_cache.remove_slot() | map of caches (removes old entry)
// V
//
// Remarks for flusher: So, for any reading operations, it's a race condition where F4 happens
// between R1 and R2. In that case, retrying from R1 is safu because F3 should have
// been occurred.
//
// Shrinker | Accessed data source for stored
// -------------------------------------+----------------------------------
// S1 do_shrink_slot_stores() | N/A
// | |
// V |
// S2 store_accounts_frozen()/ | map of stores (creates new entry)
// write_accounts_to_storage() |
// | |
// V |
// S3 store_accounts_frozen()/ | index
// update_index() | (replaces existing store_id, offset in stores)
// | |
// V |
// S4 do_shrink_slot_stores()/ | map of stores (removes old entry)
// dead_storages
//
// Remarks for shrinker: So, for any reading operations, it's a race condition
// where S4 happens between R1 and R2. In that case, retrying from R1 is safu because S3 should have
// been occurred, and S3 atomically replaced the index accordingly.
//
// Cleaner | Accessed data source for stored
// -------------------------------------+----------------------------------
// C1 clean_accounts() | N/A
// | |
// V |
// C2 clean_accounts()/ | index
// purge_keys_exact() | (removes existing store_id, offset for stores)
// | |
// V |
// C3 clean_accounts()/ | map of stores (removes old entry)
// handle_reclaims() |
//
// Remarks for cleaner: So, for any reading operations, it's a race condition
// where C3 happens between R1 and R2. In that case, retrying from R1 is safu.
// In that case, None would be returned while bailing out at R1.
//
// Purger | Accessed data source for cached/stored
// ---------------------------------------+----------------------------------
// P1 purge_slot() | N/A
// | |
// V |
// P2 purge_slots_from_cache_and_store() | map of caches/stores (removes old entry)
// | |
// V |
// P3 purge_slots_from_cache_and_store()/ | index
// purge_slot_cache()/ |
// purge_slot_cache_pubkeys() | (removes existing store_id, offset for cache)
// purge_slot_storage()/ |
// purge_keys_exact() | (removes accounts index entries)
// handle_reclaims() | (removes storage entries)
// OR |
// clean_accounts()/ |
// clean_accounts_older_than_root()| (removes existing store_id, offset for stores)
// V
//
// Remarks for purger: So, for any reading operations, it's a race condition
// where P2 happens between R1 and R2. In that case, retrying from R1 is safu.
// In that case, we may bail at index read retry when P3 hasn't been run
#[cfg(test)]
{
// Give some time for cache flushing to occur here for unit tests
sleep(Duration::from_millis(self.load_delay));
}
// Failsafe for potential race conditions with other subsystems
let mut num_acceptable_failed_iterations = 0;
loop {
let account_accessor = self.get_account_accessor(slot, pubkey, store_id, offset);
match account_accessor {
LoadedAccountAccessor::Cached(Some(_)) | LoadedAccountAccessor::Stored(Some(_)) => {
// Great! There was no race, just return :) This is the most usual situation
return Some((account_accessor, slot));
}
LoadedAccountAccessor::Cached(None) => {
num_acceptable_failed_iterations += 1;
// Cache was flushed in between checking the index and retrieving from the cache,
// so retry. This works because in accounts cache flush, an account is written to
// storage *before* it is removed from the cache
match load_hint {
LoadHint::FixedMaxRoot => {
// it's impossible for this to fail for transaction loads from
// replaying/banking more than once.
// This is because:
// 1) For a slot `X` that's being replayed, there is only one
// latest ancestor containing the latest update for the account, and this
// ancestor can only be flushed once.
// 2) The root cannot move while replaying, so the index cannot continually
// find more up to date entries than the current `slot`
assert!(num_acceptable_failed_iterations <= 1);
}
LoadHint::Unspecified => {
// Because newer root can be added to the index (= not fixed),
// multiple flush race conditions can be observed under very rare
// condition, at least theoretically
}
}
}
LoadedAccountAccessor::Stored(None) => {
match load_hint {
LoadHint::FixedMaxRoot => {
// When running replay on the validator, or banking stage on the leader,
// it should be very rare that the storage entry doesn't exist if the
// entry in the accounts index is the latest version of this account.
//
// There are only a few places where the storage entry may not exist
// after reading the index:
// 1) Shrink has removed the old storage entry and rewritten to
// a newer storage entry
// 2) The `pubkey` asked for in this function is a zero-lamport account,
// and the storage entry holding this account qualified for zero-lamport clean.
//
// In both these cases, it should be safe to retry and recheck the accounts
// index indefinitely, without incrementing num_acceptable_failed_iterations.
// That's because if the root is fixed, there should be a bounded number
// of pending cleans/shrinks (depends how far behind the AccountsBackgroundService
// is), termination to the desired condition is guaranteed.
//
// Also note that in both cases, if we do find the storage entry,
// we can guarantee that the storage entry is safe to read from because
// we grabbed a reference to the storage entry while it was still in the
// storage map. This means even if the storage entry is removed from the storage
// map after we grabbed the storage entry, the recycler should not reset the
// storage entry until we drop the reference to the storage entry.
//
// eh, no code in this arm? yes!
}
LoadHint::Unspecified => {
// RPC get_account() may have fetched an old root from the index that was
// either:
// 1) Cleaned up by clean_accounts(), so the accounts index has been updated
// and the storage entries have been removed.
// 2) Dropped by purge_slots() because the slot was on a minor fork, which
// removes the slots' storage entries but doesn't purge from the accounts index
// (account index cleanup is left to clean for stored slots). Note that
// this generally is impossible to occur in the wild because the RPC
// should hold the slot's bank, preventing it from being purged() to
// begin with.
num_acceptable_failed_iterations += 1;
}
}
}
}
#[cfg(not(test))]
let load_limit = ABSURD_CONSECUTIVE_FAILED_ITERATIONS;
#[cfg(test)]
let load_limit = self.load_limit.load(Ordering::Relaxed);
let fallback_to_slow_path = if num_acceptable_failed_iterations >= load_limit {
// The latest version of the account existed in the index, but could not be
// fetched from storage. This means a race occurred between this function and clean
// accounts/purge_slots
let message = format!(
"do_load() failed to get key: {} from storage, latest attempt was for \
slot: {}, storage_entry: {} offset: {}, load_hint: {:?}",
pubkey, slot, store_id, offset, load_hint,
);
datapoint_warn!("accounts_db-do_load_warn", ("warn", message, String));
true
} else {
false
};
// Because reading from the cache/storage failed, retry from the index read
let (new_slot, new_store_id, new_offset, maybe_account_accessor) = self
.read_index_for_accessor_or_load_slow(
ancestors,
pubkey,
max_root,
fallback_to_slow_path,
)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
if new_slot == slot && new_store_id == store_id {
// Considering that we're failed to get accessor above and further that
// the index still returned the same (slot, store_id) tuple, offset must be same
// too.
assert!(new_offset == offset);
// If the entry was missing from the cache, that means it must have been flushed,
// and the accounts index is always updated before cache flush, so store_id must
// not indicate being cached at this point.
assert!(new_store_id != CACHE_VIRTUAL_STORAGE_ID);
// If this is not a cache entry, then this was a minor fork slot
// that had its storage entries cleaned up by purge_slots() but hasn't been
// cleaned yet. That means this must be rpc access and not replay/banking at the
// very least. Note that purge shouldn't occur even for RPC as caller must hold all
// of ancestor slots..
assert!(load_hint == LoadHint::Unspecified);
// Everything being assert!()-ed, let's panic!() here as it's an error condition
// after all....
// That reasoning is based on the fact all of code-path reaching this fn
// retry_to_get_account_accessor() must outlive the Arc<Bank> (and its all
// ancestors) over this fn invocation, guaranteeing the prevention of being purged,
// first of all.
// For details, see the comment in AccountIndex::do_checked_scan_accounts(),
// which is referring back here.
panic!(
"Bad index entry detected ({}, {}, {}, {}, {:?})",
pubkey, slot, store_id, offset, load_hint
);
} else if fallback_to_slow_path {
// the above bad-index-entry check must had been checked first to retain the same
// behavior
return Some((
maybe_account_accessor.expect("must be some if clone_in_lock=true"),
new_slot,
));
}
slot = new_slot;
store_id = new_store_id;
offset = new_offset;
}
}
fn do_load(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
max_root: Option<Slot>,
load_hint: LoadHint,
) -> Option<(AccountSharedData, Slot)> {
#[cfg(not(test))]
assert!(max_root.is_none());
let (slot, store_id, offset, _maybe_account_accesor) =
self.read_index_for_accessor_or_load_slow(ancestors, pubkey, max_root, false)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
if self.caching_enabled && store_id != CACHE_VIRTUAL_STORAGE_ID {
let result = self.read_only_accounts_cache.load(pubkey, slot);
if let Some(account) = result {
return Some((account, slot));
}
}
let (mut account_accessor, slot) = self.retry_to_get_account_accessor(
slot, store_id, offset, ancestors, pubkey, max_root, load_hint,
)?;
let loaded_account = account_accessor.check_and_get_loaded_account();
let is_cached = loaded_account.is_cached();
let account = loaded_account.take_account();
if self.caching_enabled && !is_cached {
/*
We show this store into the read-only cache for account 'A' and future loads of 'A' from the read-only cache are
safe/reflect 'A''s latest state on this fork.
This safety holds if during replay of slot 'S', we show we only read 'A' from the write cache,
not the read-only cache, after it's been updated in replay of slot 'S'.
Assume for contradiction this is not true, and we read 'A' from the read-only cache *after* it had been updated in 'S'.
This means an entry '(S, A)' was added to the read-only cache after 'A' had been updated in 'S'.
Now when '(S, A)' was being added to the read-only cache, it must have been true that 'is_cache == false',
which means '(S', A)' does not exist in the write cache yet.
However, by the assumption for contradiction above , 'A' has already been updated in 'S' which means '(S, A)'
must exist in the write cache, which is a contradiction.
*/
self.read_only_accounts_cache.store(pubkey, slot, &account);
}
Some((account, slot))
}
pub fn load_account_hash(
&self,
ancestors: &Ancestors,
pubkey: &Pubkey,
max_root: Option<Slot>,
load_hint: LoadHint,
) -> Option<Hash> {
let (slot, store_id, offset, _maybe_account_accesor) =
self.read_index_for_accessor_or_load_slow(ancestors, pubkey, max_root, false)?;
// Notice the subtle `?` at previous line, we bail out pretty early if missing.
let (mut account_accessor, _) = self.retry_to_get_account_accessor(
slot, store_id, offset, ancestors, pubkey, max_root, load_hint,
)?;
let loaded_account = account_accessor.check_and_get_loaded_account();
Some(loaded_account.loaded_hash())
}
fn get_account_accessor<'a>(
&'a self,
slot: Slot,
pubkey: &'a Pubkey,
store_id: usize,
offset: usize,
) -> LoadedAccountAccessor<'a> {
if store_id == CACHE_VIRTUAL_STORAGE_ID {
let maybe_cached_account = self
.accounts_cache
.load(slot, pubkey)
.map(|cached_account| (*pubkey, Cow::Owned(cached_account)));
LoadedAccountAccessor::Cached(maybe_cached_account)
} else {
let maybe_storage_entry = self
.storage
.get_account_storage_entry(slot, store_id)
.map(|account_storage_entry| (account_storage_entry, offset));
LoadedAccountAccessor::Stored(maybe_storage_entry)
}
}
fn try_recycle_and_insert_store(
&self,
slot: Slot,
min_size: u64,
max_size: u64,
) -> Option<Arc<AccountStorageEntry>> {
let store = self.try_recycle_store(slot, min_size, max_size)?;
self.insert_store(slot, store.clone());
Some(store)
}
fn try_recycle_store(
&self,
slot: Slot,
min_size: u64,
max_size: u64,
) -> Option<Arc<AccountStorageEntry>> {
let mut max = 0;
let mut min = std::u64::MAX;
let mut avail = 0;
let mut recycle_stores = self.recycle_stores.write().unwrap();
for (i, (_recycled_time, store)) in recycle_stores.iter().enumerate() {
if Arc::strong_count(store) == 1 {
max = std::cmp::max(store.accounts.capacity(), max);
min = std::cmp::min(store.accounts.capacity(), min);
avail += 1;
if store.accounts.capacity() >= min_size && store.accounts.capacity() < max_size {
let ret = recycle_stores.remove_entry(i);
drop(recycle_stores);
let old_id = ret.append_vec_id();
ret.recycle(slot, self.next_id.fetch_add(1, Ordering::Relaxed));
debug!(
"recycling store: {} {:?} old_id: {}",
ret.append_vec_id(),
ret.get_path(),
old_id
);
return Some(ret);
}
}
}
debug!(
"no recycle stores max: {} min: {} len: {} looking: {}, {} avail: {}",
max,
min,
recycle_stores.entry_count(),
min_size,
max_size,
avail,
);
None
}
fn find_storage_candidate(&self, slot: Slot, size: usize) -> Arc<AccountStorageEntry> {
let mut create_extra = false;
let mut get_slot_stores = Measure::start("get_slot_stores");
let slot_stores_lock = self.storage.get_slot_stores(slot);
get_slot_stores.stop();
self.stats
.store_get_slot_store
.fetch_add(get_slot_stores.as_us(), Ordering::Relaxed);
let mut find_existing = Measure::start("find_existing");
if let Some(slot_stores_lock) = slot_stores_lock {
let slot_stores = slot_stores_lock.read().unwrap();
if !slot_stores.is_empty() {
if slot_stores.len() <= self.min_num_stores {
let mut total_accounts = 0;
for store in slot_stores.values() {
total_accounts += store.count();
}
// Create more stores so that when scanning the storage all CPUs have work
if (total_accounts / 16) >= slot_stores.len() {
create_extra = true;
}
}
// pick an available store at random by iterating from a random point
let to_skip = thread_rng().gen_range(0, slot_stores.len());
for (i, store) in slot_stores.values().cycle().skip(to_skip).enumerate() {
if store.try_available() {
let ret = store.clone();
drop(slot_stores);
if create_extra {
if self
.try_recycle_and_insert_store(slot, size as u64, std::u64::MAX)
.is_none()
{
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_and_insert_store(slot, self.file_size, "store extra");
} else {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
}
}
find_existing.stop();
self.stats
.store_find_existing
.fetch_add(find_existing.as_us(), Ordering::Relaxed);
return ret;
}
// looked at every store, bail...
if i == slot_stores.len() {
break;
}
}
}
}
find_existing.stop();
self.stats
.store_find_existing
.fetch_add(find_existing.as_us(), Ordering::Relaxed);
let store = if let Some(store) = self.try_recycle_store(slot, size as u64, std::u64::MAX) {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
store
} else {
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_store(slot, self.file_size, "store", &self.paths)
};
// try_available is like taking a lock on the store,
// preventing other threads from using it.
// It must succeed here and happen before insert,
// otherwise another thread could also grab it from the index.
assert!(store.try_available());
self.insert_store(slot, store.clone());
store
}
fn page_align(size: u64) -> u64 {
(size + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1)
}
fn has_space_available(&self, slot: Slot, size: u64) -> bool {
let slot_storage = self.storage.get_slot_stores(slot).unwrap();
let slot_storage_r = slot_storage.read().unwrap();
for (_id, store) in slot_storage_r.iter() {
if store.status() == AccountStorageStatus::Available
&& (store.accounts.capacity() - store.accounts.len() as u64) > size
{
return true;
}
}
false
}
fn create_store(
&self,
slot: Slot,
size: u64,
from: &str,
paths: &[PathBuf],
) -> Arc<AccountStorageEntry> {
let path_index = thread_rng().gen_range(0, paths.len());
let store = Arc::new(self.new_storage_entry(
slot,
Path::new(&paths[path_index]),
Self::page_align(size),
));
if store.append_vec_id() == CACHE_VIRTUAL_STORAGE_ID {
panic!("We've run out of storage ids!");
}
debug!(
"creating store: {} slot: {} len: {} size: {} from: {} path: {:?}",
store.append_vec_id(),
slot,
store.accounts.len(),
store.accounts.capacity(),
from,
store.accounts.get_path()
);
store
}
fn create_and_insert_store(
&self,
slot: Slot,
size: u64,
from: &str,
) -> Arc<AccountStorageEntry> {
self.create_and_insert_store_with_paths(slot, size, from, &self.paths)
}
fn create_and_insert_store_with_paths(
&self,
slot: Slot,
size: u64,
from: &str,
paths: &[PathBuf],
) -> Arc<AccountStorageEntry> {
let store = self.create_store(slot, size, from, paths);
let store_for_index = store.clone();
self.insert_store(slot, store_for_index);
store
}
fn insert_store(&self, slot: Slot, store: Arc<AccountStorageEntry>) {
let slot_storages: SlotStores = self.storage.get_slot_stores(slot).unwrap_or_else(||
// DashMap entry.or_insert() returns a RefMut, essentially a write lock,
// which is dropped after this block ends, minimizing time held by the lock.
// However, we still want to persist the reference to the `SlotStores` behind
// the lock, hence we clone it out, (`SlotStores` is an Arc so is cheap to clone).
self.storage
.0
.entry(slot)
.or_insert(Arc::new(RwLock::new(HashMap::new())))
.clone());
assert!(slot_storages
.write()
.unwrap()
.insert(store.append_vec_id(), store)
.is_none());
}
pub fn create_drop_bank_callback(
&self,
pruned_banks_sender: DroppedSlotsSender,
) -> SendDroppedBankCallback {
self.is_bank_drop_callback_enabled
.store(true, Ordering::SeqCst);
SendDroppedBankCallback::new(pruned_banks_sender)
}
/// This should only be called after the `Bank::drop()` runs in bank.rs, See BANK_DROP_SAFETY
/// comment below for more explanation.
/// `is_from_abs` is true if the caller is the AccountsBackgroundService
pub fn purge_slot(&self, slot: Slot, bank_id: BankId, is_from_abs: bool) {
if self.is_bank_drop_callback_enabled.load(Ordering::SeqCst) && !is_from_abs {
panic!("bad drop callpath detected; Bank::drop() must run serially with other logic in ABS like clean_accounts()")
}
// BANK_DROP_SAFETY: Because this function only runs once the bank is dropped,
// we know that there are no longer any ongoing scans on this bank, because scans require
// and hold a reference to the bank at the tip of the fork they're scanning. Hence it's
// safe to remove this bank_id from the `removed_bank_ids` list at this point.
if self
.accounts_index
.removed_bank_ids
.lock()
.unwrap()
.remove(&bank_id)
{
// If this slot was already cleaned up, no need to do any further cleans
return;
}
self.purge_slots(std::iter::once(&slot));
}
fn recycle_slot_stores(
&self,
total_removed_storage_entries: usize,
slot_stores: &[SlotStores],
) -> u64 {
let mut recycled_count = 0;
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_elapsed");
let mut recycle_stores = self.recycle_stores.write().unwrap();
recycle_stores_write_elapsed.stop();
for slot_entries in slot_stores {
let entry = slot_entries.read().unwrap();
for (_store_id, stores) in entry.iter() {
if recycle_stores.entry_count() > MAX_RECYCLE_STORES {
let dropped_count = total_removed_storage_entries - recycled_count;
self.stats
.dropped_stores
.fetch_add(dropped_count as u64, Ordering::Relaxed);
return recycle_stores_write_elapsed.as_us();
}
recycle_stores.add_entry(stores.clone());
recycled_count += 1;
}
}
recycle_stores_write_elapsed.as_us()
}
/// Purges every slot in `removed_slots` from both the cache and storage. This includes
/// entries in the accounts index, cache entries, and any backing storage entries.
fn purge_slots_from_cache_and_store<'a>(
&self,
removed_slots: impl Iterator<Item = &'a Slot>,
purge_stats: &PurgeStats,
) {
let mut remove_cache_elapsed_across_slots = 0;
let mut num_cached_slots_removed = 0;
let mut total_removed_cached_bytes = 0;
for remove_slot in removed_slots {
// This function is only currently safe with respect to `flush_slot_cache()` because
// both functions run serially in AccountsBackgroundService.
let mut remove_cache_elapsed = Measure::start("remove_cache_elapsed");
// Note: we cannot remove this slot from the slot cache until we've removed its
// entries from the accounts index first. This is because `scan_accounts()` relies on
// holding the index lock, finding the index entry, and then looking up the entry
// in the cache. If it fails to find that entry, it will panic in `get_loaded_account()`
if let Some(slot_cache) = self.accounts_cache.slot_cache(*remove_slot) {
// If the slot is still in the cache, remove the backing storages for
// the slot and from the Accounts Index
num_cached_slots_removed += 1;
total_removed_cached_bytes += slot_cache.total_bytes();
self.purge_slot_cache(*remove_slot, slot_cache);
remove_cache_elapsed.stop();
remove_cache_elapsed_across_slots += remove_cache_elapsed.as_us();
// Nobody else shoud have removed the slot cache entry yet
assert!(self.accounts_cache.remove_slot(*remove_slot).is_some());
} else {
self.purge_slot_storage(*remove_slot, purge_stats);
}
// It should not be possible that a slot is neither in the cache or storage. Even in
// a slot with all ticks, `Bank::new_from_parent()` immediately stores some sysvars
// on bank creation.
}
purge_stats
.remove_cache_elapsed
.fetch_add(remove_cache_elapsed_across_slots, Ordering::Relaxed);
purge_stats
.num_cached_slots_removed
.fetch_add(num_cached_slots_removed, Ordering::Relaxed);
purge_stats
.total_removed_cached_bytes
.fetch_add(total_removed_cached_bytes, Ordering::Relaxed);
}
/// Purge the backing storage entries for the given slot, does not purge from
/// the cache!
fn purge_dead_slots_from_storage<'a>(
&'a self,
removed_slots: impl Iterator<Item = &'a Slot> + Clone,
purge_stats: &PurgeStats,
) {
// Check all slots `removed_slots` are no longer "relevant" roots.
// Note that the slots here could have been rooted slots, but if they're passed here
// for removal it means:
// 1) All updates in that old root have been outdated by updates in newer roots
// 2) Those slots/roots should have already been purged from the accounts index root
// tracking metadata via `accounts_index.clean_dead_slot()`.
let mut safety_checks_elapsed = Measure::start("safety_checks_elapsed");
assert!(self
.accounts_index
.get_rooted_from_list(removed_slots.clone())
.is_empty());
safety_checks_elapsed.stop();
purge_stats
.safety_checks_elapsed
.fetch_add(safety_checks_elapsed.as_us(), Ordering::Relaxed);
let mut total_removed_storage_entries = 0;
let mut total_removed_stored_bytes = 0;
let mut all_removed_slot_storages = vec![];
let mut remove_storage_entries_elapsed = Measure::start("remove_storage_entries_elapsed");
for remove_slot in removed_slots {
// Remove the storage entries and collect some metrics
if let Some((_, slot_storages_to_be_removed)) = self.storage.0.remove(remove_slot) {
{
let r_slot_removed_storages = slot_storages_to_be_removed.read().unwrap();
total_removed_storage_entries += r_slot_removed_storages.len();
total_removed_stored_bytes += r_slot_removed_storages
.values()
.map(|i| i.accounts.capacity())
.sum::<u64>();
}
all_removed_slot_storages.push(slot_storages_to_be_removed.clone());
}
}
remove_storage_entries_elapsed.stop();
let num_stored_slots_removed = all_removed_slot_storages.len();
let recycle_stores_write_elapsed =
self.recycle_slot_stores(total_removed_storage_entries, &all_removed_slot_storages);
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
// Backing mmaps for removed storages entries explicitly dropped here outside
// of any locks
drop(all_removed_slot_storages);
drop_storage_entries_elapsed.stop();
purge_stats
.remove_storage_entries_elapsed
.fetch_add(remove_storage_entries_elapsed.as_us(), Ordering::Relaxed);
purge_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
purge_stats
.num_stored_slots_removed
.fetch_add(num_stored_slots_removed, Ordering::Relaxed);
purge_stats
.total_removed_storage_entries
.fetch_add(total_removed_storage_entries, Ordering::Relaxed);
purge_stats
.total_removed_stored_bytes
.fetch_add(total_removed_stored_bytes, Ordering::Relaxed);
purge_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed, Ordering::Relaxed);
}
fn purge_slot_cache(&self, purged_slot: Slot, slot_cache: SlotCache) {
let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new();
let pubkey_to_slot_set: Vec<(Pubkey, Slot)> = slot_cache
.iter()
.map(|account| {
purged_slot_pubkeys.insert((purged_slot, *account.key()));
(*account.key(), purged_slot)
})
.collect();
self.purge_slot_cache_pubkeys(purged_slot, purged_slot_pubkeys, pubkey_to_slot_set, true);
}
fn purge_slot_cache_pubkeys(
&self,
purged_slot: Slot,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
pubkey_to_slot_set: Vec<(Pubkey, Slot)>,
is_dead: bool,
) {
// Slot purged from cache should not exist in the backing store
assert!(self.storage.get_slot_stores(purged_slot).is_none());
let num_purged_keys = pubkey_to_slot_set.len();
let reclaims = self.purge_keys_exact(pubkey_to_slot_set.iter());
assert_eq!(reclaims.len(), num_purged_keys);
if is_dead {
self.remove_dead_slots_metadata(
std::iter::once(&purged_slot),
purged_slot_pubkeys,
None,
);
}
}
fn purge_slot_storage(&self, remove_slot: Slot, purge_stats: &PurgeStats) {
// Because AccountsBackgroundService synchronously flushes from the accounts cache
// and handles all Bank::drop() (the cleanup function that leads to this
// function call), then we don't need to worry above an overlapping cache flush
// with this function call. This means, if we get into this case, we can be
// confident that the entire state for this slot has been flushed to the storage
// already.
let mut scan_storages_elasped = Measure::start("scan_storages_elasped");
type ScanResult = ScanStorageResult<Pubkey, Arc<Mutex<HashSet<(Pubkey, Slot)>>>>;
let scan_result: ScanResult = self.scan_account_storage(
remove_slot,
|loaded_account: LoadedAccount| Some(*loaded_account.pubkey()),
|accum: &Arc<Mutex<HashSet<(Pubkey, Slot)>>>, loaded_account: LoadedAccount| {
accum
.lock()
.unwrap()
.insert((*loaded_account.pubkey(), remove_slot));
},
);
scan_storages_elasped.stop();
purge_stats
.scan_storages_elasped
.fetch_add(scan_storages_elasped.as_us(), Ordering::Relaxed);
let mut purge_accounts_index_elapsed = Measure::start("purge_accounts_index_elapsed");
let reclaims;
match scan_result {
ScanStorageResult::Cached(_) => {
panic!("Should not see cached keys in this `else` branch, since we checked this slot did not exist in the cache above");
}
ScanStorageResult::Stored(stored_keys) => {
// Purge this slot from the accounts index
reclaims = self.purge_keys_exact(stored_keys.lock().unwrap().iter());
}
}
purge_accounts_index_elapsed.stop();
purge_stats
.purge_accounts_index_elapsed
.fetch_add(purge_accounts_index_elapsed.as_us(), Ordering::Relaxed);
// `handle_reclaims()` should remove all the account index entries and
// storage entries
let mut handle_reclaims_elapsed = Measure::start("handle_reclaims_elapsed");
// Slot should be dead after removing all its account entries
let expected_dead_slot = Some(remove_slot);
self.handle_reclaims(
&reclaims,
expected_dead_slot,
Some(purge_stats),
Some(&mut ReclaimResult::default()),
false,
);
handle_reclaims_elapsed.stop();
purge_stats
.handle_reclaims_elapsed
.fetch_add(handle_reclaims_elapsed.as_us(), Ordering::Relaxed);
// After handling the reclaimed entries, this slot's
// storage entries should be purged from self.storage
assert!(self.storage.get_slot_stores(remove_slot).is_none());
}
#[allow(clippy::needless_collect)]
fn purge_slots<'a>(&self, slots: impl Iterator<Item = &'a Slot>) {
// `add_root()` should be called first
let mut safety_checks_elapsed = Measure::start("safety_checks_elapsed");
let non_roots = slots
// Only safe to check when there are duplciate versions of a slot
// because ReplayStage will not make new roots before dumping the
// duplicate slots first. Thus we will not be in a case where we
// root slot `S`, then try to dump some other version of slot `S`, the
// dumping has to finish first
//
// Also note roots are never removed via `remove_unrooted_slot()`, so
// it's safe to filter them out here as they won't need deletion from
// self.accounts_index.removed_bank_ids in `purge_slots_from_cache_and_store()`.
.filter(|slot| !self.accounts_index.is_root(**slot));
safety_checks_elapsed.stop();
self.external_purge_slots_stats
.safety_checks_elapsed
.fetch_add(safety_checks_elapsed.as_us(), Ordering::Relaxed);
self.purge_slots_from_cache_and_store(non_roots, &self.external_purge_slots_stats);
self.external_purge_slots_stats
.report("external_purge_slots_stats", Some(1000));
}
pub fn remove_unrooted_slots(&self, remove_slots: &[(Slot, BankId)]) {
let rooted_slots = self
.accounts_index
.get_rooted_from_list(remove_slots.iter().map(|(slot, _)| slot));
assert!(
rooted_slots.is_empty(),
"Trying to remove accounts for rooted slots {:?}",
rooted_slots
);
let RemoveUnrootedSlotsSynchronization {
slots_under_contention,
signal,
} = &self.remove_unrooted_slots_synchronization;
{
// Slots that are currently being flushed by flush_slot_cache()
let mut currently_contended_slots = slots_under_contention.lock().unwrap();
// Slots that are currently being flushed by flush_slot_cache() AND
// we want to remove in this function
let mut remaining_contended_flush_slots: Vec<Slot> = remove_slots
.iter()
.filter_map(|(remove_slot, _)| {
let is_being_flushed = currently_contended_slots.contains(remove_slot);
if !is_being_flushed {
// Reserve the slots that we want to purge that aren't currently
// being flushed to prevent cache from flushing those slots in
// the future.
//
// Note that the single replay thread has to remove a specific slot `N`
// before another version of the same slot can be replayed. This means
// multiple threads should not call `remove_unrooted_slots()` simultaneously
// with the same slot.
currently_contended_slots.insert(*remove_slot);
}
// If the cache is currently flushing this slot, add it to the list
Some(remove_slot).filter(|_| is_being_flushed)
})
.cloned()
.collect();
// Wait for cache flushes to finish
loop {
if !remaining_contended_flush_slots.is_empty() {
// Wait for the signal that the cache has finished flushing a slot
//
// Don't wait if the remaining_contended_flush_slots is empty, otherwise
// we may never get a signal since there's no cache flush thread to
// do the signaling
currently_contended_slots = signal.wait(currently_contended_slots).unwrap();
} else {
// There are no slots being flushed to wait on, so it's safe to continue
// to purging the slots we want to purge!
break;
}
// For each slot the cache flush has finished, mark that we're about to start
// purging these slots by reserving it in `currently_contended_slots`.
remaining_contended_flush_slots.retain(|flush_slot| {
let is_being_flushed = currently_contended_slots.contains(flush_slot);
if !is_being_flushed {
// Mark that we're about to delete this slot now
currently_contended_slots.insert(*flush_slot);
}
is_being_flushed
});
}
}
// Mark down these slots are about to be purged so that new attempts to scan these
// banks fail, and any ongoing scans over these slots will detect that they should abort
// their results
{
let mut locked_removed_bank_ids = self.accounts_index.removed_bank_ids.lock().unwrap();
for (_slot, remove_bank_id) in remove_slots.iter() {
locked_removed_bank_ids.insert(*remove_bank_id);
}
}
let remove_unrooted_purge_stats = PurgeStats::default();
self.purge_slots_from_cache_and_store(
remove_slots.iter().map(|(slot, _)| slot),
&remove_unrooted_purge_stats,
);
remove_unrooted_purge_stats.report("remove_unrooted_slots_purge_slots_stats", Some(0));
let mut currently_contended_slots = slots_under_contention.lock().unwrap();
for (remove_slot, _) in remove_slots {
assert!(currently_contended_slots.remove(remove_slot));
}
}
pub fn hash_stored_account(slot: Slot, account: &StoredAccountMeta) -> Hash {
Self::hash_account_data(
slot,
account.account_meta.lamports,
&account.account_meta.owner,
account.account_meta.executable,
account.account_meta.rent_epoch,
account.data,
&account.meta.pubkey,
)
}
pub fn hash_account<T: ReadableAccount>(slot: Slot, account: &T, pubkey: &Pubkey) -> Hash {
Self::hash_account_data(
slot,
account.lamports(),
account.owner(),
account.executable(),
account.rent_epoch(),
account.data(),
pubkey,
)
}
fn hash_frozen_account_data(account: &AccountSharedData) -> Hash {
let mut hasher = Hasher::default();
hasher.hash(account.data());
hasher.hash(account.owner().as_ref());
if account.executable() {
hasher.hash(&[1u8; 1]);
} else {
hasher.hash(&[0u8; 1]);
}
hasher.result()
}
fn hash_account_data(
slot: Slot,
lamports: u64,
owner: &Pubkey,
executable: bool,
rent_epoch: Epoch,
data: &[u8],
pubkey: &Pubkey,
) -> Hash {
if lamports == 0 {
return Hash::default();
}
let mut hasher = blake3::Hasher::new();
hasher.update(&lamports.to_le_bytes());
hasher.update(&slot.to_le_bytes());
hasher.update(&rent_epoch.to_le_bytes());
hasher.update(data);
if executable {
hasher.update(&[1u8; 1]);
} else {
hasher.update(&[0u8; 1]);
}
hasher.update(owner.as_ref());
hasher.update(pubkey.as_ref());
Hash(<[u8; solana_sdk::hash::HASH_BYTES]>::try_from(hasher.finalize().as_slice()).unwrap())
}
fn bulk_assign_write_version(&self, count: usize) -> StoredMetaWriteVersion {
self.write_version
.fetch_add(count as StoredMetaWriteVersion, Ordering::Relaxed)
}
fn write_accounts_to_storage<F: FnMut(Slot, usize) -> Arc<AccountStorageEntry>>(
&self,
slot: Slot,
hashes: &[impl Borrow<Hash>],
mut storage_finder: F,
accounts_and_meta_to_store: &[(StoredMeta, Option<&impl ReadableAccount>)],
) -> Vec<AccountInfo> {
assert_eq!(hashes.len(), accounts_and_meta_to_store.len());
let mut infos: Vec<AccountInfo> = Vec::with_capacity(accounts_and_meta_to_store.len());
let mut total_append_accounts_us = 0;
let mut total_storage_find_us = 0;
while infos.len() < accounts_and_meta_to_store.len() {
let mut storage_find = Measure::start("storage_finder");
let data_len = accounts_and_meta_to_store[infos.len()]
.1
.map(|account| account.data().len())
.unwrap_or_default();
let storage = storage_finder(slot, data_len + STORE_META_OVERHEAD);
storage_find.stop();
total_storage_find_us += storage_find.as_us();
let mut append_accounts = Measure::start("append_accounts");
let rvs = storage.accounts.append_accounts(
&accounts_and_meta_to_store[infos.len()..],
&hashes[infos.len()..],
);
assert!(!rvs.is_empty());
append_accounts.stop();
total_append_accounts_us += append_accounts.as_us();
if rvs.len() == 1 {
storage.set_status(AccountStorageStatus::Full);
// See if an account overflows the append vecs in the slot.
let data_len = (data_len + STORE_META_OVERHEAD) as u64;
if !self.has_space_available(slot, data_len) {
let special_store_size = std::cmp::max(data_len * 2, self.file_size);
if self
.try_recycle_and_insert_store(slot, special_store_size, std::u64::MAX)
.is_none()
{
self.stats
.create_store_count
.fetch_add(1, Ordering::Relaxed);
self.create_and_insert_store(slot, special_store_size, "large create");
} else {
self.stats
.recycle_store_count
.fetch_add(1, Ordering::Relaxed);
}
}
continue;
}
for (offsets, (_, account)) in rvs
.windows(2)
.zip(&accounts_and_meta_to_store[infos.len()..])
{
let stored_size = offsets[1] - offsets[0];
storage.add_account(stored_size);
infos.push(AccountInfo {
store_id: storage.append_vec_id(),
offset: offsets[0],
stored_size,
lamports: account
.map(|account| account.lamports())
.unwrap_or_default(),
});
}
// restore the state to available
storage.set_status(AccountStorageStatus::Available);
}
self.stats
.store_append_accounts
.fetch_add(total_append_accounts_us, Ordering::Relaxed);
self.stats
.store_find_store
.fetch_add(total_storage_find_us, Ordering::Relaxed);
infos
}
pub fn mark_slot_frozen(&self, slot: Slot) {
if let Some(slot_cache) = self.accounts_cache.slot_cache(slot) {
slot_cache.mark_slot_frozen();
slot_cache.report_slot_store_metrics();
}
self.accounts_cache.report_size();
}
pub fn expire_old_recycle_stores(&self) {
let mut recycle_stores_write_elapsed = Measure::start("recycle_stores_write_time");
let recycle_stores = self.recycle_stores.write().unwrap().expire_old_entries();
recycle_stores_write_elapsed.stop();
let mut drop_storage_entries_elapsed = Measure::start("drop_storage_entries_elapsed");
drop(recycle_stores);
drop_storage_entries_elapsed.stop();
self.clean_accounts_stats
.purge_stats
.drop_storage_entries_elapsed
.fetch_add(drop_storage_entries_elapsed.as_us(), Ordering::Relaxed);
self.clean_accounts_stats
.purge_stats
.recycle_stores_write_elapsed
.fetch_add(recycle_stores_write_elapsed.as_us(), Ordering::Relaxed);
}
// `force_flush` flushes all the cached roots `<= requested_flush_root`. It also then
// flushes:
// 1) Any remaining roots if there are > MAX_CACHE_SLOTS remaining slots in the cache,
// 2) It there are still > MAX_CACHE_SLOTS remaining slots in the cache, the excess
// unrooted slots
pub fn flush_accounts_cache(&self, force_flush: bool, requested_flush_root: Option<Slot>) {
#[cfg(not(test))]
assert!(requested_flush_root.is_some());
if !force_flush && self.accounts_cache.num_slots() <= MAX_CACHE_SLOTS {
return;
}
// Flush only the roots <= requested_flush_root, so that snapshotting has all
// the relevant roots in storage.
let mut flush_roots_elapsed = Measure::start("flush_roots_elapsed");
let mut account_bytes_saved = 0;
let mut num_accounts_saved = 0;
// Note even if force_flush is false, we will still flush all roots <= the
// given `requested_flush_root`, even if some of the later roots cannot be used for
// cleaning due to an ongoing scan
let (total_new_cleaned_roots, num_cleaned_roots_flushed) = self
.flush_rooted_accounts_cache(
requested_flush_root,
Some((&mut account_bytes_saved, &mut num_accounts_saved)),
);
flush_roots_elapsed.stop();
// Note we don't purge unrooted slots here because there may be ongoing scans/references
// for those slot, let the Bank::drop() implementation do cleanup instead on dead
// banks
// If there are > MAX_CACHE_SLOTS, then flush the excess ones to storage
let (total_new_excess_roots, num_excess_roots_flushed) =
if self.accounts_cache.num_slots() > MAX_CACHE_SLOTS {
// Start by flushing the roots
//
// Cannot do any cleaning on roots past `requested_flush_root` because future
// snapshots may need updates from those later slots, hence we pass `None`
// for `should_clean`.
self.flush_rooted_accounts_cache(None, None)
} else {
(0, 0)
};
let old_slots = self.accounts_cache.find_older_frozen_slots(MAX_CACHE_SLOTS);
let excess_slot_count = old_slots.len();
let mut unflushable_unrooted_slot_count = 0;
let max_flushed_root = self.accounts_cache.fetch_max_flush_root();
let old_slot_flush_stats: Vec<_> = old_slots
.into_iter()
.filter_map(|old_slot| {
// Don't flush slots that are known to be unrooted
if old_slot > max_flushed_root {
Some(self.flush_slot_cache(old_slot, None::<&mut fn(&_, &_) -> bool>))
} else {
unflushable_unrooted_slot_count += 1;
None
}
})
.collect();
info!(
"req_flush_root: {:?} old_slot_flushes: {:?}",
requested_flush_root, old_slot_flush_stats
);
datapoint_info!(
"accounts_db-flush_accounts_cache",
("total_new_cleaned_roots", total_new_cleaned_roots, i64),
("num_cleaned_roots_flushed", num_cleaned_roots_flushed, i64),
("total_new_excess_roots", total_new_excess_roots, i64),
("num_excess_roots_flushed", num_excess_roots_flushed, i64),
("excess_slot_count", excess_slot_count, i64),
(
"unflushable_unrooted_slot_count",
unflushable_unrooted_slot_count,
i64
),
(
"flush_roots_elapsed",
flush_roots_elapsed.as_us() as i64,
i64
),
("account_bytes_saved", account_bytes_saved, i64),
("num_accounts_saved", num_accounts_saved, i64),
);
// Flush a random slot out after every force flush to catch any inconsistencies
// between cache and written state (i.e. should cause a hash mismatch between validators
// that flush and don't flush if such a bug exists).
let num_slots_remaining = self.accounts_cache.num_slots();
if force_flush && num_slots_remaining >= FLUSH_CACHE_RANDOM_THRESHOLD {
// Don't flush slots that are known to be unrooted
let mut frozen_slots = self.accounts_cache.find_older_frozen_slots(0);
frozen_slots.retain(|s| *s > max_flushed_root);
// Remove a random index 0 <= i < `frozen_slots.len()`
let rand_slot = frozen_slots.choose(&mut thread_rng());
if let Some(rand_slot) = rand_slot {
let random_flush_stats =
self.flush_slot_cache(*rand_slot, None::<&mut fn(&_, &_) -> bool>);
info!(
"Flushed random slot: num_remaining: {} {:?}",
num_slots_remaining, random_flush_stats,
);
}
}
}
fn flush_rooted_accounts_cache(
&self,
requested_flush_root: Option<Slot>,
should_clean: Option<(&mut usize, &mut usize)>,
) -> (usize, usize) {
let max_clean_root = should_clean.as_ref().and_then(|_| {
// If there is a long running scan going on, this could prevent any cleaning
// based on updates from slots > `max_clean_root`.
self.max_clean_root(requested_flush_root)
});
// Use HashMap because HashSet doesn't provide Entry api
let mut written_accounts = HashMap::new();
// If `should_clean` is None, then`should_flush_f` is also None, which will cause
// `flush_slot_cache` to flush all accounts to storage without cleaning any accounts.
let mut should_flush_f = should_clean.map(|(account_bytes_saved, num_accounts_saved)| {
move |&pubkey: &Pubkey, account: &AccountSharedData| {
use std::collections::hash_map::Entry::{Occupied, Vacant};
let should_flush = match written_accounts.entry(pubkey) {
Vacant(vacant_entry) => {
vacant_entry.insert(());
true
}
Occupied(_occupied_entry) => {
*account_bytes_saved += account.data().len();
*num_accounts_saved += 1;
// If a later root already wrote this account, no point
// in flushing it
false
}
};
should_flush
}
});
// Always flush up to `requested_flush_root`, which is necessary for things like snapshotting.
let cached_roots: BTreeSet<Slot> = self.accounts_cache.clear_roots(requested_flush_root);
// Iterate from highest to lowest so that we don't need to flush earlier
// outdated updates in earlier roots
let mut num_roots_flushed = 0;
for &root in cached_roots.iter().rev() {
let should_flush_f = if let Some(max_clean_root) = max_clean_root {
if root > max_clean_root {
// Only if the root is greater than the `max_clean_root` do we
// have to prevent cleaning, otherwise, just default to `should_flush_f`
// for any slots <= `max_clean_root`
None
} else {
should_flush_f.as_mut()
}
} else {
should_flush_f.as_mut()
};
if self.flush_slot_cache(root, should_flush_f).is_some() {
num_roots_flushed += 1;
}
// Regardless of whether this slot was *just* flushed from the cache by the above
// `flush_slot_cache()`, we should update the `max_flush_root`.
// This is because some rooted slots may be flushed to storage *before* they are marked as root.
// This can occur for instance when:
// 1) The cache is overwhelmed, we we flushed some yet to be rooted frozen slots
// 2) Random evictions
// These slots may then *later* be marked as root, so we still need to handle updating the
// `max_flush_root` in the accounts cache.
self.accounts_cache.set_max_flush_root(root);
}
// Only add to the uncleaned roots set *after* we've flushed the previous roots,
// so that clean will actually be able to clean the slots.
let num_new_roots = cached_roots.len();
self.accounts_index.add_uncleaned_roots(cached_roots);
(num_new_roots, num_roots_flushed)
}
fn do_flush_slot_cache(
&self,
slot: Slot,
slot_cache: &SlotCache,
mut should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>,
) -> FlushStats {
let mut num_purged = 0;
let mut total_size = 0;
let mut num_flushed = 0;
let iter_items: Vec<_> = slot_cache.iter().collect();
let mut purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = HashSet::new();
let mut pubkey_to_slot_set: Vec<(Pubkey, Slot)> = vec![];
let (accounts, hashes): (Vec<(&Pubkey, &AccountSharedData)>, Vec<Hash>) = iter_items
.iter()
.filter_map(|iter_item| {
let key = iter_item.key();
let account = &iter_item.value().account;
let should_flush = should_flush_f
.as_mut()
.map(|should_flush_f| should_flush_f(key, account))
.unwrap_or(true);
if should_flush {
let hash = iter_item.value().hash();
total_size += (account.data().len() + STORE_META_OVERHEAD) as u64;
num_flushed += 1;
Some(((key, account), hash))
} else {
// If we don't flush, we have to remove the entry from the
// index, since it's equivalent to purging
purged_slot_pubkeys.insert((slot, *key));
pubkey_to_slot_set.push((*key, slot));
num_purged += 1;
None
}
})
.unzip();
let is_dead_slot = accounts.is_empty();
// Remove the account index entries from earlier roots that are outdated by later roots.
// Safe because queries to the index will be reading updates from later roots.
self.purge_slot_cache_pubkeys(slot, purged_slot_pubkeys, pubkey_to_slot_set, is_dead_slot);
if !is_dead_slot {
let aligned_total_size = Self::page_align(total_size);
// This ensures that all updates are written to an AppendVec, before any
// updates to the index happen, so anybody that sees a real entry in the index,
// will be able to find the account in storage
let flushed_store =
self.create_and_insert_store(slot, aligned_total_size, "flush_slot_cache");
self.store_accounts_frozen(
slot,
&accounts,
Some(&hashes),
Some(Box::new(move |_, _| flushed_store.clone())),
None,
);
// If the above sizing function is correct, just one AppendVec is enough to hold
// all the data for the slot
assert_eq!(
self.storage
.get_slot_stores(slot)
.unwrap()
.read()
.unwrap()
.len(),
1
);
}
// Remove this slot from the cache, which will to AccountsDb's new readers should look like an
// atomic switch from the cache to storage.
// There is some racy condition for existing readers who just has read exactly while
// flushing. That case is handled by retry_to_get_account_accessor()
assert!(self.accounts_cache.remove_slot(slot).is_some());
FlushStats {
slot,
num_flushed,
num_purged,
total_size,
}
}
/// `should_flush_f` is an optional closure that determines whether a given
/// account should be flushed. Passing `None` will by default flush all
/// accounts
fn flush_slot_cache(
&self,
slot: Slot,
should_flush_f: Option<&mut impl FnMut(&Pubkey, &AccountSharedData) -> bool>,
) -> Option<FlushStats> {
let is_being_purged = {
let mut slots_under_contention = self
.remove_unrooted_slots_synchronization
.slots_under_contention
.lock()
.unwrap();
// If we're purging this slot, don't flush it here
if slots_under_contention.contains(&slot) {
true
} else {
slots_under_contention.insert(slot);
false
}
};
if !is_being_purged {
let flush_stats = self.accounts_cache.slot_cache(slot).map(|slot_cache| {
#[cfg(test)]
{
// Give some time for cache flushing to occur here for unit tests
sleep(Duration::from_millis(self.load_delay));
}
// Since we added the slot to `slots_under_contention` AND this slot
// still exists in the cache, we know the slot cannot be removed
// by any other threads past this point. We are now responsible for
// flushing this slot.
self.do_flush_slot_cache(slot, &slot_cache, should_flush_f)
});
// Nobody else should have been purging this slot, so should not have been removed
// from `self.remove_unrooted_slots_synchronization`.
assert!(self
.remove_unrooted_slots_synchronization
.slots_under_contention
.lock()
.unwrap()
.remove(&slot));
// Signal to any threads blocked on `remove_unrooted_slots(slot)` that we have finished
// flushing
self.remove_unrooted_slots_synchronization
.signal
.notify_all();
flush_stats
} else {
None
}
}
fn write_accounts_to_cache(
&self,
slot: Slot,
hashes: Option<&[impl Borrow<Hash>]>,
accounts_and_meta_to_store: &[(StoredMeta, Option<&impl ReadableAccount>)],
) -> Vec<AccountInfo> {
let len = accounts_and_meta_to_store.len();
let hashes = hashes.map(|hashes| {
assert_eq!(hashes.len(), len);
hashes
});
accounts_and_meta_to_store
.iter()
.enumerate()
.map(|(i, (meta, account))| {
let hash = hashes.map(|hashes| hashes[i].borrow());
let account = account
.map(|account| account.to_account_shared_data())
.unwrap_or_default();
let account_info = AccountInfo {
store_id: CACHE_VIRTUAL_STORAGE_ID,
offset: CACHE_VIRTUAL_OFFSET,
stored_size: CACHE_VIRTUAL_STORED_SIZE,
lamports: account.lamports(),
};
let cached_account = self.accounts_cache.store(slot, &meta.pubkey, account, hash);
// hash this account in the bg
match &self.sender_bg_hasher {
Some(ref sender) => {
let _ = sender.send(cached_account);
}
None => (),
};
account_info
})
.collect()
}
fn store_accounts_to<
F: FnMut(Slot, usize) -> Arc<AccountStorageEntry>,
P: Iterator<Item = u64>,
>(
&self,
slot: Slot,
accounts: &[(&Pubkey, &impl ReadableAccount)],
hashes: Option<&[impl Borrow<Hash>]>,
storage_finder: F,
mut write_version_producer: P,
is_cached_store: bool,
) -> Vec<AccountInfo> {
let mut calc_stored_meta_time = Measure::start("calc_stored_meta");
let accounts_and_meta_to_store: Vec<_> = accounts
.iter()
.map(|(pubkey, account)| {
self.read_only_accounts_cache.remove(pubkey, slot);
// this is the source of Some(Account) or None.
// Some(Account) = store 'Account'
// None = store a default/empty account with 0 lamports
let (account, data_len) = if account.lamports() == 0 {
(None, 0)
} else {
(Some(*account), account.data().len() as u64)
};
let meta = StoredMeta {
write_version: write_version_producer.next().unwrap(),
pubkey: **pubkey,
data_len,
};
(meta, account)
})
.collect();
calc_stored_meta_time.stop();
self.stats
.calc_stored_meta
.fetch_add(calc_stored_meta_time.as_us(), Ordering::Relaxed);
if self.caching_enabled && is_cached_store {
self.write_accounts_to_cache(slot, hashes, &accounts_and_meta_to_store)
} else {
match hashes {
Some(hashes) => self.write_accounts_to_storage(
slot,
hashes,
storage_finder,
&accounts_and_meta_to_store,
),
None => {
// hash any accounts where we were lazy in calculating the hash
let mut hash_time = Measure::start("hash_accounts");
let mut stats = BankHashStats::default();
let len = accounts_and_meta_to_store.len();
let mut hashes = Vec::with_capacity(len);
for account in accounts {
stats.update(account.1);
let hash = Self::hash_account(slot, account.1, account.0);
hashes.push(hash);
}
hash_time.stop();
self.stats
.store_hash_accounts
.fetch_add(hash_time.as_us(), Ordering::Relaxed);
self.write_accounts_to_storage(
slot,
&hashes,
storage_finder,
&accounts_and_meta_to_store,
)
}
}
}
}
fn report_store_stats(&self) {
let mut total_count = 0;
let mut min = std::usize::MAX;
let mut min_slot = 0;
let mut max = 0;
let mut max_slot = 0;
let mut newest_slot = 0;
let mut oldest_slot = std::u64::MAX;
for iter_item in self.storage.0.iter() {
let slot = iter_item.key();
let slot_stores = iter_item.value().read().unwrap();
total_count += slot_stores.len();
if slot_stores.len() < min {
min = slot_stores.len();
min_slot = *slot;
}
if slot_stores.len() > max {
max = slot_stores.len();
max_slot = *slot;
}
if *slot > newest_slot {
newest_slot = *slot;
}
if *slot < oldest_slot {
oldest_slot = *slot;
}
}
info!("total_stores: {}, newest_slot: {}, oldest_slot: {}, max_slot: {} (num={}), min_slot: {} (num={})",
total_count, newest_slot, oldest_slot, max_slot, max, min_slot, min);
datapoint_info!(
"accounts_db-stores",
("total_count", total_count, i64),
(
"recycle_count",
self.recycle_stores.read().unwrap().entry_count() as u64,
i64
),
);
datapoint_info!(
"accounts_db-perf-stats",
(
"delta_hash_num",
self.stats.delta_hash_num.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_scan_us",
self.stats
.delta_hash_scan_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
(
"delta_hash_accumulate_us",
self.stats
.delta_hash_accumulate_time_total_us
.swap(0, Ordering::Relaxed),
i64
),
);
}
pub fn checked_iterative_sum_for_capitalization(total_cap: u64, new_cap: u64) -> u64 {
let new_total = total_cap as u128 + new_cap as u128;
AccountsHash::checked_cast_for_capitalization(new_total)
}
pub fn checked_sum_for_capitalization<T: Iterator<Item = u64>>(balances: T) -> u64 {
AccountsHash::checked_cast_for_capitalization(balances.map(|b| b as u128).sum::<u128>())
}
fn calculate_accounts_hash(
&self,
slot: Slot,
ancestors: &Ancestors,
check_hash: bool,
) -> Result<(Hash, u64), BankHashVerificationError> {
use BankHashVerificationError::*;
let mut collect = Measure::start("collect");
let keys: Vec<_> = self
.accounts_index
.account_maps
.iter()
.map(|btree| btree.read().unwrap().keys().cloned().collect::<Vec<_>>())
.flatten()
.collect();
collect.stop();
let mut scan = Measure::start("scan");
let mismatch_found = AtomicU64::new(0);
// Pick a chunk size big enough to allow us to produce output vectors that are smaller than the overall size.
// We'll also accumulate the lamports within each chunk and fewer chunks results in less contention to accumulate the sum.
let chunks = crate::accounts_hash::MERKLE_FANOUT.pow(4);
let total_lamports = Mutex::<u64>::new(0);
let get_hashes = || {
keys.par_chunks(chunks)
.map(|pubkeys| {
let mut sum = 0u128;
let result: Vec<Hash> = pubkeys
.iter()
.filter_map(|pubkey| {
if let AccountIndexGetResult::Found(lock, index) =
self.accounts_index.get(pubkey, Some(ancestors), Some(slot))
{
let (slot, account_info) = &lock.slot_list()[index];
if account_info.lamports != 0 {
// Because we're keeping the `lock' here, there is no need
// to use retry_to_get_account_accessor()
// In other words, flusher/shrinker/cleaner is blocked to
// cause any Accessor(None) situtation.
// Anyway this race condition concern is currently a moot
// point because calculate_accounts_hash() should not
// currently race with clean/shrink because the full hash
// is synchronous with clean/shrink in
// AccountsBackgroundService
self.get_account_accessor(
*slot,
pubkey,
account_info.store_id,
account_info.offset,
)
.get_loaded_account()
.and_then(
|loaded_account| {
let loaded_hash = loaded_account.loaded_hash();
let balance = account_info.lamports;
if check_hash {
let computed_hash =
loaded_account.compute_hash(*slot, pubkey);
if computed_hash != loaded_hash {
info!("hash mismatch found: computed: {}, loaded: {}, pubkey: {}", computed_hash, loaded_hash, pubkey);
mismatch_found
.fetch_add(1, Ordering::Relaxed);
return None;
}
}
sum += balance as u128;
Some(loaded_hash)
},
)
} else {
None
}
} else {
None
}
})
.collect();
let mut total = total_lamports.lock().unwrap();
*total =
AccountsHash::checked_cast_for_capitalization(*total as u128 + sum);
result
}).collect()
};
let hashes: Vec<Vec<Hash>> = if check_hash {
get_hashes()
} else {
self.thread_pool_clean.install(get_hashes)
};
if mismatch_found.load(Ordering::Relaxed) > 0 {
warn!(
"{} mismatched account hash(es) found",
mismatch_found.load(Ordering::Relaxed)
);
return Err(MismatchedAccountHash);
}
scan.stop();
let total_lamports = *total_lamports.lock().unwrap();
let mut hash_time = Measure::start("hash");
let (accumulated_hash, hash_total) = AccountsHash::calculate_hash(hashes);
hash_time.stop();
datapoint_info!(
"update_accounts_hash",
("accounts_scan", scan.as_us(), i64),
("hash", hash_time.as_us(), i64),
("hash_total", hash_total, i64),
("collect", collect.as_us(), i64),
);
Ok((accumulated_hash, total_lamports))
}
pub fn get_accounts_hash(&self, slot: Slot) -> Hash {
let bank_hashes = self.bank_hashes.read().unwrap();
let bank_hash_info = bank_hashes.get(&slot).unwrap();
bank_hash_info.snapshot_hash
}
pub fn update_accounts_hash(&self, slot: Slot, ancestors: &Ancestors) -> (Hash, u64) {
self.update_accounts_hash_with_index_option(true, false, slot, ancestors, None, false)
}
pub fn update_accounts_hash_test(&self, slot: Slot, ancestors: &Ancestors) -> (Hash, u64) {
self.update_accounts_hash_with_index_option(true, true, slot, ancestors, None, false)
}
fn scan_multiple_account_storages_one_slot<F, B>(
storages: &[Arc<AccountStorageEntry>],
scan_func: &F,
slot: Slot,
retval: &mut B,
) where
F: Fn(LoadedAccount, &mut B, Slot) + Send + Sync,
B: Send + Default,
{
// we have to call the scan_func in order of write_version within a slot if there are multiple storages per slot
let mut len = storages.len();
let mut progress = Vec::with_capacity(len);
let mut current = Vec::with_capacity(len);
for storage in storages {
let accounts = storage.accounts.accounts(0);
let mut iterator: std::vec::IntoIter<StoredAccountMeta<'_>> = accounts.into_iter();
if let Some(item) = iterator
.next()
.map(|stored_account| (stored_account.meta.write_version, Some(stored_account)))
{
current.push(item);
progress.push(iterator);
}
}
while !progress.is_empty() {
let mut min = current[0].0;
let mut min_index = 0;
for (i, (item, _)) in current.iter().enumerate().take(len).skip(1) {
if item < &min {
min_index = i;
min = *item;
}
}
let mut account = (0, None);
std::mem::swap(&mut account, &mut current[min_index]);
scan_func(LoadedAccount::Stored(account.1.unwrap()), retval, slot);
let next = progress[min_index]
.next()
.map(|stored_account| (stored_account.meta.write_version, Some(stored_account)));
match next {
Some(item) => {
current[min_index] = item;
}
None => {
current.remove(min_index);
progress.remove(min_index);
len -= 1;
}
}
}
}
/// Scan through all the account storage in parallel
fn scan_account_storage_no_bank<F, F2, B, C>(
accounts_cache_and_ancestors: Option<(
&AccountsCache,
&Ancestors,
&AccountInfoAccountsIndex,
)>,
snapshot_storages: &SortedStorages,
scan_func: F,
after_func: F2,
) -> Vec<C>
where
F: Fn(LoadedAccount, &mut B, Slot) + Send + Sync,
F2: Fn(B) -> C + Send + Sync,
B: Send + Default,
C: Send + Default,
{
// Without chunks, we end up with 1 output vec for each outer snapshot storage.
// This results in too many vectors to be efficient.
const MAX_ITEMS_PER_CHUNK: Slot = 5_000;
let chunks = 1 + (snapshot_storages.range_width() as Slot / MAX_ITEMS_PER_CHUNK);
(0..chunks)
.into_par_iter()
.map(|chunk| {
let mut retval = B::default();
let start = snapshot_storages.range().start + chunk * MAX_ITEMS_PER_CHUNK;
let end = std::cmp::min(start + MAX_ITEMS_PER_CHUNK, snapshot_storages.range().end);
for slot in start..end {
let sub_storages = snapshot_storages.get(slot);
let mut valid_slot = false;
if let Some(sub_storages) = sub_storages {
valid_slot = true;
Self::scan_multiple_account_storages_one_slot(
sub_storages,
&scan_func,
slot,
&mut retval,
);
}
if let Some((cache, ancestors, accounts_index)) = accounts_cache_and_ancestors {
if let Some(slot_cache) = cache.slot_cache(slot) {
if valid_slot
|| ancestors.contains_key(&slot)
|| accounts_index.is_root(slot)
{
let keys = slot_cache.get_all_pubkeys();
for key in keys {
if let Some(cached_account) = slot_cache.get_cloned(&key) {
let mut accessor = LoadedAccountAccessor::Cached(Some((
key,
Cow::Owned(cached_account),
)));
let account = accessor.get_loaded_account().unwrap();
scan_func(account, &mut retval, slot);
};
}
}
}
}
}
after_func(retval)
})
.collect()
}
fn calculate_accounts_hash_helper(
&self,
use_index: bool,
slot: Slot,
ancestors: &Ancestors,
check_hash: bool,
can_cached_slot_be_unflushed: bool,
) -> Result<(Hash, u64), BankHashVerificationError> {
if !use_index {
let accounts_cache_and_ancestors = if can_cached_slot_be_unflushed {
Some((&self.accounts_cache, ancestors, &self.accounts_index))
} else {
None
};
let mut collect_time = Measure::start("collect");
let (combined_maps, slots) = self.get_snapshot_storages(slot, Some(ancestors));
collect_time.stop();
let mut sort_time = Measure::start("sort_storages");
let min_root = self.accounts_index.min_root();
let storages = SortedStorages::new_with_slots(
combined_maps.iter().zip(slots.iter()),
min_root,
Some(slot),
);
sort_time.stop();
let timings = HashStats {
collect_snapshots_us: collect_time.as_us(),
storage_sort_us: sort_time.as_us(),
..HashStats::default()
};
Self::calculate_accounts_hash_without_index(
&storages,
Some(&self.thread_pool_clean),
timings,
check_hash,
accounts_cache_and_ancestors,
)
} else {
self.calculate_accounts_hash(slot, ancestors, check_hash)
}
}
fn calculate_accounts_hash_helper_with_verify(
&self,
use_index: bool,
debug_verify: bool,
slot: Slot,
ancestors: &Ancestors,
expected_capitalization: Option<u64>,
can_cached_slot_be_unflushed: bool,
check_hash: bool,
) -> Result<(Hash, u64), BankHashVerificationError> {
let (hash, total_lamports) = self.calculate_accounts_hash_helper(
use_index,
slot,
ancestors,
check_hash,
can_cached_slot_be_unflushed,
)?;
if debug_verify {
// calculate the other way (store or non-store) and verify results match.
let (hash_other, total_lamports_other) = self.calculate_accounts_hash_helper(
!use_index,
slot,
ancestors,
check_hash,
can_cached_slot_be_unflushed,
)?;
let success = hash == hash_other
&& total_lamports == total_lamports_other
&& total_lamports == expected_capitalization.unwrap_or(total_lamports);
assert!(success, "update_accounts_hash_with_index_option mismatch. hashes: {}, {}; lamports: {}, {}; expected lamports: {:?}, using index: {}, slot: {}", hash, hash_other, total_lamports, total_lamports_other, expected_capitalization, use_index, slot);
}
Ok((hash, total_lamports))
}
pub fn update_accounts_hash_with_index_option(
&self,
use_index: bool,
debug_verify: bool,
slot: Slot,
ancestors: &Ancestors,
expected_capitalization: Option<u64>,
can_cached_slot_be_unflushed: bool,
) -> (Hash, u64) {
let check_hash = false;
let (hash, total_lamports) = self
.calculate_accounts_hash_helper_with_verify(
use_index,
debug_verify,
slot,
ancestors,
expected_capitalization,
can_cached_slot_be_unflushed,
check_hash,
)
.unwrap(); // unwrap here will never fail since check_hash = false
let mut bank_hashes = self.bank_hashes.write().unwrap();
let mut bank_hash_info = bank_hashes.get_mut(&slot).unwrap();
bank_hash_info.snapshot_hash = hash;
(hash, total_lamports)
}
fn scan_snapshot_stores_with_cache(
storage: &SortedStorages,
mut stats: &mut crate::accounts_hash::HashStats,
bins: usize,
bin_range: &Range<usize>,
check_hash: bool,
accounts_cache_and_ancestors: Option<(
&AccountsCache,
&Ancestors,
&AccountInfoAccountsIndex,
)>,
) -> Result<Vec<Vec<Vec<CalculateHashIntermediate>>>, BankHashVerificationError> {
let bin_calculator = PubkeyBinCalculator16::new(bins);
assert!(bin_range.start < bins && bin_range.end <= bins && bin_range.start < bin_range.end);
let mut time = Measure::start("scan all accounts");
stats.num_snapshot_storage = storage.slot_count();
let mismatch_found = AtomicU64::new(0);
let range = bin_range.end - bin_range.start;
let sort_time = AtomicU64::new(0);
let result: Vec<Vec<Vec<CalculateHashIntermediate>>> = Self::scan_account_storage_no_bank(
accounts_cache_and_ancestors,
storage,
|loaded_account: LoadedAccount,
accum: &mut Vec<Vec<CalculateHashIntermediate>>,
slot: Slot| {
let pubkey = loaded_account.pubkey();
let mut pubkey_to_bin_index = bin_calculator.bin_from_pubkey(pubkey);
if !bin_range.contains(&pubkey_to_bin_index) {
return;
}
// when we are scanning with bin ranges, we don't need to use exact bin numbers. Subtract to make first bin we care about at index 0.
pubkey_to_bin_index -= bin_range.start;
let raw_lamports = loaded_account.lamports();
let zero_raw_lamports = raw_lamports == 0;
let balance = if zero_raw_lamports {
crate::accounts_hash::ZERO_RAW_LAMPORTS_SENTINEL
} else {
raw_lamports
};
let source_item = CalculateHashIntermediate::new_without_slot(
loaded_account.loaded_hash(),
balance,
*pubkey,
);
if check_hash {
let computed_hash = loaded_account.compute_hash(slot, pubkey);
if computed_hash != source_item.hash {
info!(
"hash mismatch found: computed: {}, loaded: {}, pubkey: {}",
computed_hash, source_item.hash, pubkey
);
mismatch_found.fetch_add(1, Ordering::Relaxed);
}
}
let max = accum.len();
if max == 0 {
accum.extend(vec![Vec::new(); range]);
}
accum[pubkey_to_bin_index].push(source_item);
},
|x| {
let (result, timing) = Self::sort_slot_storage_scan(x);
sort_time.fetch_add(timing, Ordering::Relaxed);
result
},
);
stats.sort_time_total_us += sort_time.load(Ordering::Relaxed);
if check_hash && mismatch_found.load(Ordering::Relaxed) > 0 {
warn!(
"{} mismatched account hash(es) found",
mismatch_found.load(Ordering::Relaxed)
);
return Err(BankHashVerificationError::MismatchedAccountHash);
}
time.stop();
stats.scan_time_total_us += time.as_us();
Ok(result)
}
fn sort_slot_storage_scan(
accum: Vec<Vec<CalculateHashIntermediate>>,
) -> (Vec<Vec<CalculateHashIntermediate>>, u64) {
let time = AtomicU64::new(0);
(
accum
.into_par_iter()
.map(|mut items| {
let mut sort_time = Measure::start("sort");
{
// sort_by vs unstable because slot and write_version are already in order
items.sort_by(AccountsHash::compare_two_hash_entries);
}
sort_time.stop();
time.fetch_add(sort_time.as_us(), Ordering::Relaxed);
items
})
.collect(),
time.load(Ordering::Relaxed),
)
}
// modeled after get_accounts_delta_hash
// intended to be faster than calculate_accounts_hash
pub fn calculate_accounts_hash_without_index(
storages: &SortedStorages,
thread_pool: Option<&ThreadPool>,
mut stats: HashStats,
check_hash: bool,
accounts_cache_and_ancestors: Option<(
&AccountsCache,
&Ancestors,
&AccountInfoAccountsIndex,
)>,
) -> Result<(Hash, u64), BankHashVerificationError> {
let mut scan_and_hash = move || {
// When calculating hashes, it is helpful to break the pubkeys found into bins based on the pubkey value.
// More bins means smaller vectors to sort, copy, etc.
const PUBKEY_BINS_FOR_CALCULATING_HASHES: usize = 65536;
// # of passes should be a function of the total # of accounts that are active.
// higher passes = slower total time, lower dynamic memory usage
// lower passes = faster total time, higher dynamic memory usage
// passes=2 cuts dynamic memory usage in approximately half.
let num_scan_passes: usize = 2;
let bins_per_pass = PUBKEY_BINS_FOR_CALCULATING_HASHES / num_scan_passes;
assert_eq!(
bins_per_pass * num_scan_passes,
PUBKEY_BINS_FOR_CALCULATING_HASHES
); // evenly divisible
let mut previous_pass = PreviousPass::default();
let mut final_result = (Hash::default(), 0);
for pass in 0..num_scan_passes {
let bounds = Range {
start: pass * bins_per_pass,
end: (pass + 1) * bins_per_pass,
};
let result = Self::scan_snapshot_stores_with_cache(
storages,
&mut stats,
PUBKEY_BINS_FOR_CALCULATING_HASHES,
&bounds,
check_hash,
accounts_cache_and_ancestors,
)?;
let (hash, lamports, for_next_pass) = AccountsHash::rest_of_hash_calculation(
result,
&mut stats,
pass == num_scan_passes - 1,
previous_pass,
bins_per_pass,
);
previous_pass = for_next_pass;
final_result = (hash, lamports);
}
Ok(final_result)
};
if let Some(thread_pool) = thread_pool {
thread_pool.install(scan_and_hash)
} else {
scan_and_hash()
}
}
pub fn verify_bank_hash_and_lamports(
&self,
slot: Slot,
ancestors: &Ancestors,
total_lamports: u64,
test_hash_calculation: bool,
) -> Result<(), BankHashVerificationError> {
use BankHashVerificationError::*;
let use_index = false;
let check_hash = true;
let can_cached_slot_be_unflushed = false;
let (calculated_hash, calculated_lamports) = self
.calculate_accounts_hash_helper_with_verify(
use_index,
test_hash_calculation,
slot,
ancestors,
None,
can_cached_slot_be_unflushed,
check_hash,
)?;
if calculated_lamports != total_lamports {
warn!(
"Mismatched total lamports: {} calculated: {}",
total_lamports, calculated_lamports
);
return Err(MismatchedTotalLamports(calculated_lamports, total_lamports));
}
let bank_hashes = self.bank_hashes.read().unwrap();
if let Some(found_hash_info) = bank_hashes.get(&slot) {
if calculated_hash == found_hash_info.snapshot_hash {
Ok(())
} else {
warn!(
"mismatched bank hash for slot {}: {} (calculated) != {} (expected)",
slot, calculated_hash, found_hash_info.snapshot_hash
);
Err(MismatchedBankHash)
}
} else {
Err(MissingBankHash)
}
}
/// Perform the scan for pubkeys that were written to in a slot
fn do_scan_slot_for_dirty_pubkeys(
&self,
slot: Slot,
) -> ScanStorageResult<Pubkey, DashSet<Pubkey>> {
self.scan_account_storage(
slot,
|loaded_account: LoadedAccount| Some(*loaded_account.pubkey()),
|accum: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
accum.insert(*loaded_account.pubkey());
},
)
}
/// Reduce the scan result of dirty pubkeys after calling `scan_account_storage()` into a
/// single vec of Pubkeys.
fn do_reduce_scan_slot_for_dirty_pubkeys(
scan_result: ScanStorageResult<Pubkey, DashSet<Pubkey>>,
) -> Vec<Pubkey> {
match scan_result {
ScanStorageResult::Cached(cached_result) => cached_result,
ScanStorageResult::Stored(stored_result) => {
stored_result.into_iter().collect::<Vec<_>>()
}
}
}
/// Scan a slot for dirty pubkeys
fn scan_slot_for_dirty_pubkeys(&self, slot: Slot) -> Vec<Pubkey> {
let dirty_pubkeys = self.do_scan_slot_for_dirty_pubkeys(slot);
Self::do_reduce_scan_slot_for_dirty_pubkeys(dirty_pubkeys)
}
/// Scan a slot in the account storage for dirty pubkeys and insert them into the list of
/// uncleaned pubkeys
///
/// This function is called in Bank::drop() when the bank is _not_ frozen, so that its pubkeys
/// are considered for cleanup.
pub fn scan_slot_and_insert_dirty_pubkeys_into_uncleaned_pubkeys(&self, slot: Slot) {
let dirty_pubkeys = self.scan_slot_for_dirty_pubkeys(slot);
self.uncleaned_pubkeys.insert(slot, dirty_pubkeys);
}
pub fn get_accounts_delta_hash(&self, slot: Slot) -> Hash {
let mut scan = Measure::start("scan");
let scan_result: ScanStorageResult<(Pubkey, Hash), DashMapVersionHash> = self
.scan_account_storage(
slot,
|loaded_account: LoadedAccount| {
// Cache only has one version per key, don't need to worry about versioning
Some((*loaded_account.pubkey(), loaded_account.loaded_hash()))
},
|accum: &DashMap<Pubkey, (u64, Hash)>, loaded_account: LoadedAccount| {
let loaded_write_version = loaded_account.write_version();
let loaded_hash = loaded_account.loaded_hash();
let should_insert =
if let Some(existing_entry) = accum.get(loaded_account.pubkey()) {
loaded_write_version > existing_entry.value().version()
} else {
true
};
if should_insert {
// Detected insertion is necessary, grabs the write lock to commit the write,
match accum.entry(*loaded_account.pubkey()) {
// Double check in case another thread interleaved a write between the read + write.
Occupied(mut occupied_entry) => {
if loaded_write_version > occupied_entry.get().version() {
occupied_entry.insert((loaded_write_version, loaded_hash));
}
}
Vacant(vacant_entry) => {
vacant_entry.insert((loaded_write_version, loaded_hash));
}
}
}
},
);
scan.stop();
let mut accumulate = Measure::start("accumulate");
let hashes: Vec<_> = match scan_result {
ScanStorageResult::Cached(cached_result) => cached_result,
ScanStorageResult::Stored(stored_result) => stored_result
.into_iter()
.map(|(pubkey, (_latest_write_version, hash))| (pubkey, hash))
.collect(),
};
let dirty_keys = hashes.iter().map(|(pubkey, _hash)| *pubkey).collect();
let ret = AccountsHash::accumulate_account_hashes(hashes);
accumulate.stop();
let mut uncleaned_time = Measure::start("uncleaned_index");
self.uncleaned_pubkeys.insert(slot, dirty_keys);
uncleaned_time.stop();
self.stats
.store_uncleaned_update
.fetch_add(uncleaned_time.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_scan_time_total_us
.fetch_add(scan.as_us(), Ordering::Relaxed);
self.stats
.delta_hash_accumulate_time_total_us
.fetch_add(accumulate.as_us(), Ordering::Relaxed);
self.stats.delta_hash_num.fetch_add(1, Ordering::Relaxed);
ret
}
fn update_index(
&self,
slot: Slot,
infos: Vec<AccountInfo>,
accounts: &[(&Pubkey, &impl ReadableAccount)],
) -> SlotList<AccountInfo> {
let mut reclaims = SlotList::<AccountInfo>::with_capacity(infos.len() * 2);
for (info, pubkey_account) in infos.into_iter().zip(accounts.iter()) {
let pubkey = pubkey_account.0;
self.accounts_index.upsert(
slot,
pubkey,
pubkey_account.1.owner(),
pubkey_account.1.data(),
&self.account_indexes,
info,
&mut reclaims,
);
}
reclaims
}
fn should_not_shrink(aligned_bytes: u64, total_bytes: u64, num_stores: usize) -> bool {
aligned_bytes + PAGE_SIZE > total_bytes && num_stores == 1
}
fn is_shrinking_productive(slot: Slot, stores: &[Arc<AccountStorageEntry>]) -> bool {
let mut alive_count = 0;
let mut stored_count = 0;
let mut alive_bytes = 0;
let mut total_bytes = 0;
for store in stores {
alive_count += store.count();
stored_count += store.approx_stored_count();
alive_bytes += store.alive_bytes();
total_bytes += store.total_bytes();
}
let aligned_bytes = Self::page_align(alive_bytes as u64);
if Self::should_not_shrink(aligned_bytes, total_bytes, stores.len()) {
trace!(
"shrink_slot_forced ({}, {}): not able to shrink at all: alive/stored: ({} / {}) ({}b / {}b) save: {}",
slot,
stores.len(),
alive_count,
stored_count,
aligned_bytes,
total_bytes,
total_bytes.saturating_sub(aligned_bytes),
);
return false;
}
true
}
fn is_candidate_for_shrink(&self, store: &Arc<AccountStorageEntry>) -> bool {
match self.shrink_ratio {
AccountShrinkThreshold::TotalSpace { shrink_ratio: _ } => {
Self::page_align(store.alive_bytes() as u64) < store.total_bytes()
}
AccountShrinkThreshold::IndividalStore { shrink_ratio } => {
(Self::page_align(store.alive_bytes() as u64) as f64 / store.total_bytes() as f64)
< shrink_ratio
}
}
}
fn remove_dead_accounts(
&self,
reclaims: SlotSlice<AccountInfo>,
expected_slot: Option<Slot>,
mut reclaimed_offsets: Option<&mut AppendVecOffsets>,
reset_accounts: bool,
) -> HashSet<Slot> {
let mut dead_slots = HashSet::new();
let mut new_shrink_candidates: ShrinkCandidates = HashMap::new();
for (slot, account_info) in reclaims {
// No cached accounts should make it here
assert_ne!(account_info.store_id, CACHE_VIRTUAL_STORAGE_ID);
if let Some(ref mut reclaimed_offsets) = reclaimed_offsets {
reclaimed_offsets
.entry(account_info.store_id)
.or_default()
.insert(account_info.offset);
}
if let Some(expected_slot) = expected_slot {
assert_eq!(*slot, expected_slot);
}
if let Some(store) = self
.storage
.get_account_storage_entry(*slot, account_info.store_id)
{
assert_eq!(
*slot, store.slot(),
"AccountDB::accounts_index corrupted. Storage pointed to: {}, expected: {}, should only point to one slot",
store.slot(), *slot
);
let count = store.remove_account(account_info.stored_size, reset_accounts);
if count == 0 {
self.dirty_stores
.insert((*slot, store.append_vec_id()), store.clone());
dead_slots.insert(*slot);
} else if self.caching_enabled
&& Self::is_shrinking_productive(*slot, &[store.clone()])
&& self.is_candidate_for_shrink(&store)
{
// Checking that this single storage entry is ready for shrinking,
// should be a sufficient indication that the slot is ready to be shrunk
// because slots should only have one storage entry, namely the one that was
// created by `flush_slot_cache()`.
{
new_shrink_candidates
.entry(*slot)
.or_default()
.insert(store.append_vec_id(), store);
}
}
}
}
if self.caching_enabled {
{
let mut shrink_candidate_slots = self.shrink_candidate_slots.lock().unwrap();
for (slot, slot_shrink_candidates) in new_shrink_candidates {
for (store_id, store) in slot_shrink_candidates {
// count could be == 0 if multiple accounts are removed
// at once
if store.count() != 0 {
debug!(
"adding: {} {} to shrink candidates: count: {}/{} bytes: {}/{}",
store_id,
slot,
store.approx_stored_count(),
store.count(),
store.alive_bytes(),
store.total_bytes()
);
shrink_candidate_slots
.entry(slot)
.or_default()
.insert(store_id, store);
}
}
}
}
}
dead_slots.retain(|slot| {
if let Some(slot_stores) = self.storage.get_slot_stores(*slot) {
for x in slot_stores.read().unwrap().values() {
if x.count() != 0 {
return false;
}
}
}
true
});
dead_slots
}
fn remove_dead_slots_metadata<'a>(
&'a self,
dead_slots_iter: impl Iterator<Item = &'a Slot> + Clone,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
// Should only be `Some` for non-cached slots
purged_stored_account_slots: Option<&mut AccountSlots>,
) {
self.clean_dead_slots_from_accounts_index(
dead_slots_iter.clone(),
purged_slot_pubkeys,
purged_stored_account_slots,
);
{
let mut bank_hashes = self.bank_hashes.write().unwrap();
for slot in dead_slots_iter {
bank_hashes.remove(slot);
}
}
}
fn clean_dead_slots_from_accounts_index<'a>(
&'a self,
dead_slots_iter: impl Iterator<Item = &'a Slot> + Clone,
purged_slot_pubkeys: HashSet<(Slot, Pubkey)>,
// Should only be `Some` for non-cached slots
purged_stored_account_slots: Option<&mut AccountSlots>,
) {
if let Some(purged_stored_account_slots) = purged_stored_account_slots {
for (slot, pubkey) in purged_slot_pubkeys {
purged_stored_account_slots
.entry(pubkey)
.or_default()
.insert(slot);
self.accounts_index.unref_from_storage(&pubkey);
}
}
let mut accounts_index_root_stats = AccountsIndexRootsStats::default();
let mut rooted_cleaned_count = 0;
let mut unrooted_cleaned_count = 0;
let dead_slots: Vec<_> = dead_slots_iter
.map(|slot| {
if let Some(latest) = self.accounts_index.clean_dead_slot(*slot) {
rooted_cleaned_count += 1;
accounts_index_root_stats = latest;
} else {
unrooted_cleaned_count += 1;
}
*slot
})
.collect();
info!("remove_dead_slots_metadata: slots {:?}", dead_slots);
accounts_index_root_stats.rooted_cleaned_count += rooted_cleaned_count;
accounts_index_root_stats.unrooted_cleaned_count += unrooted_cleaned_count;
self.clean_accounts_stats
.latest_accounts_index_roots_stats
.update(&accounts_index_root_stats);
}
fn clean_stored_dead_slots(
&self,
dead_slots: &HashSet<Slot>,
purged_account_slots: Option<&mut AccountSlots>,
) {
let mut measure = Measure::start("clean_stored_dead_slots-ms");
let mut stores: Vec<Arc<AccountStorageEntry>> = vec![];
for slot in dead_slots.iter() {
if let Some(slot_storage) = self.storage.get_slot_stores(*slot) {
for store in slot_storage.read().unwrap().values() {
stores.push(store.clone());
}
}
}
let purged_slot_pubkeys: HashSet<(Slot, Pubkey)> = {
self.thread_pool_clean.install(|| {
stores
.into_par_iter()
.map(|store| {
let accounts = store.all_accounts();
accounts
.into_iter()
.map(|account| (store.slot(), account.meta.pubkey))
.collect::<HashSet<(Slot, Pubkey)>>()
})
.reduce(HashSet::new, |mut reduced, store_pubkeys| {
reduced.extend(store_pubkeys);
reduced
})
})
};
self.remove_dead_slots_metadata(
dead_slots.iter(),
purged_slot_pubkeys,
purged_account_slots,
);
measure.stop();
inc_new_counter_info!("clean_stored_dead_slots-ms", measure.as_ms() as usize);
}
pub(crate) fn freeze_accounts(&mut self, ancestors: &Ancestors, account_pubkeys: &[Pubkey]) {
for account_pubkey in account_pubkeys {
if let Some((account, _slot)) = self.load_without_fixed_root(ancestors, account_pubkey)
{
let frozen_account_info = FrozenAccountInfo {
hash: Self::hash_frozen_account_data(&account),
lamports: account.lamports(),
};
warn!(
"Account {} is now frozen at lamports={}, hash={}",
account_pubkey, frozen_account_info.lamports, frozen_account_info.hash
);
self.frozen_accounts
.insert(*account_pubkey, frozen_account_info);
} else {
panic!(
"Unable to freeze an account that does not exist: {}",
account_pubkey
);
}
}
}
/// Cause a panic if frozen accounts would be affected by data in `accounts`
fn assert_frozen_accounts(&self, accounts: &[(&Pubkey, &AccountSharedData)]) {
if self.frozen_accounts.is_empty() {
return;
}
for (account_pubkey, account) in accounts.iter() {
if let Some(frozen_account_info) = self.frozen_accounts.get(*account_pubkey) {
if account.lamports() < frozen_account_info.lamports {
FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed);
panic!(
"Frozen account {} modified. Lamports decreased from {} to {}",
account_pubkey,
frozen_account_info.lamports,
account.lamports(),
)
}
let hash = Self::hash_frozen_account_data(account);
if hash != frozen_account_info.hash {
FROZEN_ACCOUNT_PANIC.store(true, Ordering::Relaxed);
panic!(
"Frozen account {} modified. Hash changed from {} to {}",
account_pubkey, frozen_account_info.hash, hash,
)
}
}
}
}
pub fn store_cached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) {
self.store(slot, accounts, self.caching_enabled);
}
/// Store the account update.
pub fn store_uncached(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)]) {
self.store(slot, accounts, false);
}
fn store(&self, slot: Slot, accounts: &[(&Pubkey, &AccountSharedData)], is_cached_store: bool) {
// If all transactions in a batch are errored,
// it's possible to get a store with no accounts.
if accounts.is_empty() {
return;
}
self.assert_frozen_accounts(accounts);
let mut stats = BankHashStats::default();
let mut total_data = 0;
accounts.iter().for_each(|(_pubkey, account)| {
total_data += account.data().len();
stats.update(*account);
});
self.stats
.store_total_data
.fetch_add(total_data as u64, Ordering::Relaxed);
let mut bank_hashes = self.bank_hashes.write().unwrap();
let slot_info = bank_hashes
.entry(slot)
.or_insert_with(BankHashInfo::default);
slot_info.stats.merge(&stats);
// we use default hashes for now since the same account may be stored to the cache multiple times
self.store_accounts_unfrozen(slot, accounts, None, is_cached_store);
self.report_store_timings();
}
fn report_store_timings(&self) {
let last = self.stats.last_store_report.load(Ordering::Relaxed);
let now = solana_sdk::timing::timestamp();
if now.saturating_sub(last) > 1000
&& self.stats.last_store_report.compare_exchange(
last,
now,
Ordering::Relaxed,
Ordering::Relaxed,
) == Ok(last)
{
let (read_only_cache_hits, read_only_cache_misses) =
self.read_only_accounts_cache.get_and_reset_stats();
datapoint_info!(
"accounts_db_store_timings",
(
"hash_accounts",
self.stats.store_hash_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"store_accounts",
self.stats.store_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"update_index",
self.stats.store_update_index.swap(0, Ordering::Relaxed),
i64
),
(
"handle_reclaims",
self.stats.store_handle_reclaims.swap(0, Ordering::Relaxed),
i64
),
(
"append_accounts",
self.stats.store_append_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"find_storage",
self.stats.store_find_store.swap(0, Ordering::Relaxed),
i64
),
(
"num_accounts",
self.stats.store_num_accounts.swap(0, Ordering::Relaxed),
i64
),
(
"total_data",
self.stats.store_total_data.swap(0, Ordering::Relaxed),
i64
),
(
"read_only_accounts_cache_entries",
self.read_only_accounts_cache.cache_len(),
i64
),
(
"read_only_accounts_cache_data_size",
self.read_only_accounts_cache.data_size(),
i64
),
("read_only_accounts_cache_hits", read_only_cache_hits, i64),
(
"read_only_accounts_cache_misses",
read_only_cache_misses,
i64
),
(
"calc_stored_meta_us",
self.stats.calc_stored_meta.swap(0, Ordering::Relaxed),
i64
),
);
let recycle_stores = self.recycle_stores.read().unwrap();
datapoint_info!(
"accounts_db_store_timings2",
(
"recycle_store_count",
self.stats.recycle_store_count.swap(0, Ordering::Relaxed),
i64
),
(
"current_recycle_store_count",
recycle_stores.entry_count(),
i64
),
(
"current_recycle_store_bytes",
recycle_stores.total_bytes(),
i64
),
(
"create_store_count",
self.stats.create_store_count.swap(0, Ordering::Relaxed),
i64
),
(
"store_get_slot_store",
self.stats.store_get_slot_store.swap(0, Ordering::Relaxed),
i64
),
(
"store_find_existing",
self.stats.store_find_existing.swap(0, Ordering::Relaxed),
i64
),
(
"dropped_stores",
self.stats.dropped_stores.swap(0, Ordering::Relaxed),
i64
),
);
}
}
fn store_accounts_unfrozen(
&self,
slot: Slot,
accounts: &[(&Pubkey, &AccountSharedData)],
hashes: Option<&[&Hash]>,
is_cached_store: bool,
) {
// This path comes from a store to a non-frozen slot.
// If a store is dead here, then a newer update for
// each pubkey in the store must exist in another
// store in the slot. Thus it is safe to reset the store and
// re-use it for a future store op. The pubkey ref counts should still
// hold just 1 ref from this slot.
let reset_accounts = true;
self.store_accounts_custom(
slot,
accounts,
hashes,
None::<StorageFinder>,
None::<Box<dyn Iterator<Item = u64>>>,
is_cached_store,
reset_accounts,
);
}
fn store_accounts_frozen<'a>(
&'a self,
slot: Slot,
accounts: &[(&Pubkey, &impl ReadableAccount)],
hashes: Option<&[impl Borrow<Hash>]>,
storage_finder: Option<StorageFinder<'a>>,
write_version_producer: Option<Box<dyn Iterator<Item = StoredMetaWriteVersion>>>,
) -> StoreAccountsTiming {
// stores on a frozen slot should not reset
// the append vec so that hashing could happen on the store
// and accounts in the append_vec can be unrefed correctly
let reset_accounts = false;
let is_cached_store = false;
self.store_accounts_custom(
slot,
accounts,
hashes,
storage_finder,
write_version_producer,
is_cached_store,
reset_accounts,
)
}
fn store_accounts_custom<'a>(
&'a self,
slot: Slot,
accounts: &[(&Pubkey, &impl ReadableAccount)],
hashes: Option<&[impl Borrow<Hash>]>,
storage_finder: Option<StorageFinder<'a>>,
write_version_producer: Option<Box<dyn Iterator<Item = u64>>>,
is_cached_store: bool,
reset_accounts: bool,
) -> StoreAccountsTiming {
let storage_finder: StorageFinder<'a> = storage_finder
.unwrap_or_else(|| Box::new(move |slot, size| self.find_storage_candidate(slot, size)));
let write_version_producer: Box<dyn Iterator<Item = u64>> = write_version_producer
.unwrap_or_else(|| {
let mut current_version = self.bulk_assign_write_version(accounts.len());
Box::new(std::iter::from_fn(move || {
let ret = current_version;
current_version += 1;
Some(ret)
}))
});
self.stats
.store_num_accounts
.fetch_add(accounts.len() as u64, Ordering::Relaxed);
let mut store_accounts_time = Measure::start("store_accounts");
let infos = self.store_accounts_to(
slot,
accounts,
hashes,
storage_finder,
write_version_producer,
is_cached_store,
);
store_accounts_time.stop();
self.stats
.store_accounts
.fetch_add(store_accounts_time.as_us(), Ordering::Relaxed);
let mut update_index_time = Measure::start("update_index");
// If the cache was flushed, then because `update_index` occurs
// after the account are stored by the above `store_accounts_to`
// call and all the accounts are stored, all reads after this point
// will know to not check the cache anymore
let mut reclaims = self.update_index(slot, infos, accounts);
// For each updated account, `reclaims` should only have at most one
// item (if the account was previously updated in this slot).
// filter out the cached reclaims as those don't actually map
// to anything that needs to be cleaned in the backing storage
// entries
if self.caching_enabled {
reclaims.retain(|(_, r)| r.store_id != CACHE_VIRTUAL_STORAGE_ID);
if is_cached_store {
assert!(reclaims.is_empty());
}
}
update_index_time.stop();
self.stats
.store_update_index
.fetch_add(update_index_time.as_us(), Ordering::Relaxed);
// A store for a single slot should:
// 1) Only make "reclaims" for the same slot
// 2) Should not cause any slots to be removed from the storage
// database because
// a) this slot has at least one account (the one being stored),
// b)From 1) we know no other slots are included in the "reclaims"
//
// From 1) and 2) we guarantee passing `no_purge_stats` == None, which is
// equivalent to asserting there will be no dead slots, is safe.
let no_purge_stats = None;
let mut handle_reclaims_time = Measure::start("handle_reclaims");
self.handle_reclaims(&reclaims, Some(slot), no_purge_stats, None, reset_accounts);
handle_reclaims_time.stop();
self.stats
.store_handle_reclaims
.fetch_add(handle_reclaims_time.as_us(), Ordering::Relaxed);
StoreAccountsTiming {
store_accounts_elapsed: store_accounts_time.as_us(),
update_index_elapsed: update_index_time.as_us(),
handle_reclaims_elapsed: handle_reclaims_time.as_us(),
}
}
pub fn add_root(&self, slot: Slot) {
self.accounts_index.add_root(slot, self.caching_enabled);
if self.caching_enabled {
self.accounts_cache.add_root(slot);
}
if let Some(slot_stores) = self.storage.get_slot_stores(slot) {
for (store_id, store) in slot_stores.read().unwrap().iter() {
self.dirty_stores.insert((slot, *store_id), store.clone());
}
}
}
pub fn get_snapshot_storages(
&self,
snapshot_slot: Slot,
ancestors: Option<&Ancestors>,
) -> (SnapshotStorages, Vec<Slot>) {
let mut m = Measure::start("get slots");
let slots = self
.storage
.0
.iter()
.map(|k| *k.key() as Slot)
.collect::<Vec<_>>();
m.stop();
let mut m2 = Measure::start("filter");
let chunk_size = 5_000;
let wide = self.thread_pool_clean.install(|| {
slots
.par_chunks(chunk_size)
.map(|slots| {
slots
.iter()
.filter_map(|slot| {
if *slot <= snapshot_slot
&& (self.accounts_index.is_root(*slot)
|| ancestors
.map(|ancestors| ancestors.contains_key(slot))
.unwrap_or_default())
{
self.storage.0.get(slot).map_or_else(
|| None,
|item| {
let storages = item
.value()
.read()
.unwrap()
.values()
.filter(|x| x.has_accounts())
.cloned()
.collect::<Vec<_>>();
if !storages.is_empty() {
Some((storages, *slot))
} else {
None
}
},
)
} else {
None
}
})
.collect::<Vec<(SnapshotStorage, Slot)>>()
})
.collect::<Vec<_>>()
});
m2.stop();
let mut m3 = Measure::start("flatten");
// some slots we found above may not have been a root or met the slot # constraint.
// So the resulting 'slots' vector we return will be a subset of the raw keys we got initially.
let mut slots = Vec::with_capacity(slots.len());
let result = wide
.into_iter()
.flatten()
.map(|(storage, slot)| {
slots.push(slot);
storage
})
.collect::<Vec<_>>();
m3.stop();
debug!(
"hash_total: get slots: {}, filter: {}, flatten: {}",
m.as_us(),
m2.as_us(),
m3.as_us()
);
(result, slots)
}
fn process_storage_slot(
storage_maps: &[Arc<AccountStorageEntry>],
) -> GenerateIndexAccountsMap<'_> {
let num_accounts = storage_maps
.iter()
.map(|storage| storage.approx_stored_count())
.sum();
let mut accounts_map = GenerateIndexAccountsMap::with_capacity(num_accounts);
storage_maps.iter().for_each(|storage| {
let accounts = storage.all_accounts();
accounts.into_iter().for_each(|stored_account| {
let this_version = stored_account.meta.write_version;
match accounts_map.entry(stored_account.meta.pubkey) {
std::collections::hash_map::Entry::Vacant(entry) => {
entry.insert((this_version, storage.append_vec_id(), stored_account));
}
std::collections::hash_map::Entry::Occupied(mut entry) => {
let occupied_version = entry.get().0;
if occupied_version < this_version {
entry.insert((this_version, storage.append_vec_id(), stored_account));
} else {
assert!(occupied_version != this_version);
}
}
}
})
});
accounts_map
}
fn generate_index_for_slot<'a>(
&self,
accounts_map: GenerateIndexAccountsMap<'a>,
slot: &Slot,
) -> u64 {
if accounts_map.is_empty() {
return 0;
}
let secondary = !self.account_indexes.is_empty();
let len = accounts_map.len();
let items = accounts_map
.into_iter()
.map(|(pubkey, (_, store_id, stored_account))| {
if secondary {
self.accounts_index.update_secondary_indexes(
&pubkey,
&stored_account.account_meta.owner,
stored_account.data,
&self.account_indexes,
);
}
(
pubkey,
AccountInfo {
store_id,
offset: stored_account.offset,
stored_size: stored_account.stored_size,
lamports: stored_account.account_meta.lamports,
},
)
});
let (dirty_pubkeys, insert_us) = self
.accounts_index
.insert_new_if_missing_into_primary_index(*slot, len, items);
// dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for
// a given pubkey. If there is just a single item, there is no cleaning to
// be done on that pubkey. Use only those pubkeys with multiple updates.
if !dirty_pubkeys.is_empty() {
self.uncleaned_pubkeys.insert(*slot, dirty_pubkeys);
}
insert_us
}
#[allow(clippy::needless_collect)]
pub fn generate_index(&self, limit_load_slot_count_from_snapshot: Option<usize>, verify: bool) {
let mut slots = self.storage.all_slots();
#[allow(clippy::stable_sort_primitive)]
slots.sort();
if let Some(limit) = limit_load_slot_count_from_snapshot {
slots.truncate(limit); // get rid of the newer slots and keep just the older
}
// pass == 0 always runs and generates the index
// pass == 1 only runs if verify == true.
// verify checks that all the expected items are in the accounts index and measures how long it takes to look them all up
let passes = if verify { 2 } else { 1 };
for pass in 0..passes {
let total_processed_slots_across_all_threads = AtomicU64::new(0);
let outer_slots_len = slots.len();
let chunk_size = (outer_slots_len / 7) + 1; // approximately 400k slots in a snapshot
let mut index_time = Measure::start("index");
let insertion_time_us = AtomicU64::new(0);
let scan_time: u64 = slots
.par_chunks(chunk_size)
.map(|slots| {
let mut log_status = MultiThreadProgress::new(
&total_processed_slots_across_all_threads,
2,
outer_slots_len as u64,
);
let mut scan_time_sum = 0;
for (index, slot) in slots.iter().enumerate() {
let mut scan_time = Measure::start("scan");
log_status.report(index as u64);
let storage_maps: Vec<Arc<AccountStorageEntry>> = self
.storage
.get_slot_storage_entries(*slot)
.unwrap_or_default();
let accounts_map = Self::process_storage_slot(&storage_maps);
scan_time.stop();
scan_time_sum += scan_time.as_us();
let insert_us = if pass == 0 {
// generate index
self.generate_index_for_slot(accounts_map, slot)
} else {
// verify index matches expected and measure the time to get all items
assert!(verify);
let mut lookup_time = Measure::start("lookup_time");
for account in accounts_map.into_iter() {
let (key, account_info) = account;
let lock = self.accounts_index.get_account_maps_read_lock(&key);
let x = lock.get(&key).unwrap();
let sl = x.slot_list.read().unwrap();
let mut count = 0;
for (slot2, account_info2) in sl.iter() {
if slot2 == slot {
count += 1;
let ai = AccountInfo {
store_id: account_info.1,
offset: account_info.2.offset,
stored_size: account_info.2.stored_size,
lamports: account_info.2.account_meta.lamports,
};
assert_eq!(&ai, account_info2);
}
}
assert_eq!(1, count);
}
lookup_time.stop();
lookup_time.as_us()
};
insertion_time_us.fetch_add(insert_us, Ordering::Relaxed);
}
scan_time_sum
})
.sum();
index_time.stop();
let mut min_bin_size = usize::MAX;
let mut max_bin_size = usize::MIN;
let total_items = self
.accounts_index
.account_maps
.iter()
.map(|map_bin| {
let len = map_bin.read().unwrap().len();
min_bin_size = std::cmp::min(min_bin_size, len);
max_bin_size = std::cmp::max(max_bin_size, len);
len
})
.sum();
let mut timings = GenerateIndexTimings {
scan_time,
index_time: index_time.as_us(),
insertion_time_us: insertion_time_us.load(Ordering::Relaxed),
min_bin_size,
max_bin_size,
total_items,
..GenerateIndexTimings::default()
};
if pass == 0 {
// Need to add these last, otherwise older updates will be cleaned
for slot in &slots {
self.accounts_index.add_root(*slot, false);
}
self.initialize_storage_count_and_alive_bytes(&mut timings);
}
timings.report();
}
}
fn calculate_storage_count_and_alive_bytes(
&self,
timings: &mut GenerateIndexTimings,
) -> HashMap<usize, (usize, usize)> {
// look at every account in the account index and calculate for each storage: stored_size and count
let mut storage_size_accounts_map_time = Measure::start("storage_size_accounts_map");
let mut maps = self
.accounts_index
.account_maps
.par_iter()
.map(|bin_map| {
let mut stored_sizes_and_counts = HashMap::new();
bin_map.read().unwrap().values().for_each(|entry| {
entry
.slot_list
.read()
.unwrap()
.iter()
.for_each(|(_slot, account_entry)| {
let storage_entry_meta = stored_sizes_and_counts
.entry(account_entry.store_id)
.or_insert((0, 0));
storage_entry_meta.0 += account_entry.stored_size;
storage_entry_meta.1 += 1;
})
});
stored_sizes_and_counts
})
.collect::<Vec<_>>();
storage_size_accounts_map_time.stop();
timings.storage_size_accounts_map_us = storage_size_accounts_map_time.as_us();
// flatten/merge the HashMaps from the parallel iteration above
let mut storage_size_accounts_map_flatten_time =
Measure::start("storage_size_accounts_map_flatten_time");
let mut stored_sizes_and_counts = maps.pop().unwrap_or_default();
for map in maps {
for (store_id, meta) in map.into_iter() {
let storage_entry_meta = stored_sizes_and_counts.entry(store_id).or_insert((0, 0));
storage_entry_meta.0 += meta.0;
storage_entry_meta.1 += meta.1;
}
}
storage_size_accounts_map_flatten_time.stop();
timings.storage_size_accounts_map_flatten_us =
storage_size_accounts_map_flatten_time.as_us();
stored_sizes_and_counts
}
fn set_storage_count_and_alive_bytes(
&self,
stored_sizes_and_counts: HashMap<usize, (usize, usize)>,
timings: &mut GenerateIndexTimings,
) {
// store count and size for each storage
let mut storage_size_storages_time = Measure::start("storage_size_storages");
for slot_stores in self.storage.0.iter() {
for (id, store) in slot_stores.value().read().unwrap().iter() {
// Should be default at this point
assert_eq!(store.alive_bytes(), 0);
if let Some((stored_size, count)) = stored_sizes_and_counts.get(id) {
trace!("id: {} setting count: {} cur: {}", id, count, store.count(),);
store.count_and_status.write().unwrap().0 = *count;
store.alive_bytes.store(*stored_size, Ordering::SeqCst);
} else {
trace!("id: {} clearing count", id);
store.count_and_status.write().unwrap().0 = 0;
}
}
}
storage_size_storages_time.stop();
timings.storage_size_storages_us = storage_size_storages_time.as_us();
}
fn initialize_storage_count_and_alive_bytes(&self, timings: &mut GenerateIndexTimings) {
let stored_sizes_and_counts = self.calculate_storage_count_and_alive_bytes(timings);
self.set_storage_count_and_alive_bytes(stored_sizes_and_counts, timings);
}
pub(crate) fn print_accounts_stats(&self, label: &str) {
self.print_index(label);
self.print_count_and_status(label);
info!("recycle_stores:");
let recycle_stores = self.recycle_stores.read().unwrap();
for (recycled_time, entry) in recycle_stores.iter() {
info!(
" slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {} (recycled: {:?})",
entry.slot(),
entry.append_vec_id(),
*entry.count_and_status.read().unwrap(),
entry.approx_store_count.load(Ordering::Relaxed),
entry.accounts.len(),
entry.accounts.capacity(),
recycled_time,
);
}
}
fn print_index(&self, label: &str) {
let mut roots: Vec<_> = self.accounts_index.all_roots();
#[allow(clippy::stable_sort_primitive)]
roots.sort();
info!("{}: accounts_index roots: {:?}", label, roots,);
self.accounts_index.account_maps.iter().for_each(|i| {
for (pubkey, account_entry) in i.read().unwrap().iter() {
info!(" key: {} ref_count: {}", pubkey, account_entry.ref_count(),);
info!(
" slots: {:?}",
*account_entry.slot_list.read().unwrap()
);
}
});
}
fn print_count_and_status(&self, label: &str) {
let mut slots: Vec<_> = self.storage.all_slots();
#[allow(clippy::stable_sort_primitive)]
slots.sort();
info!("{}: count_and status for {} slots:", label, slots.len());
for slot in &slots {
let slot_stores = self.storage.get_slot_stores(*slot).unwrap();
let r_slot_stores = slot_stores.read().unwrap();
let mut ids: Vec<_> = r_slot_stores.keys().cloned().collect();
#[allow(clippy::stable_sort_primitive)]
ids.sort();
for id in &ids {
let entry = r_slot_stores.get(id).unwrap();
info!(
" slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}",
slot,
id,
*entry.count_and_status.read().unwrap(),
entry.approx_store_count.load(Ordering::Relaxed),
entry.accounts.len(),
entry.accounts.capacity(),
);
}
}
}
}
#[cfg(test)]
impl AccountsDb {
pub fn new_sized(paths: Vec<PathBuf>, file_size: u64) -> Self {
AccountsDb {
file_size,
..AccountsDb::new(paths, &ClusterType::Development)
}
}
pub fn new_sized_no_extra_stores(paths: Vec<PathBuf>, file_size: u64) -> Self {
AccountsDb {
file_size,
min_num_stores: 0,
..AccountsDb::new(paths, &ClusterType::Development)
}
}
pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option<AppendVecId> {
let ancestors = vec![(slot, 1)].into_iter().collect();
let result = self.accounts_index.get(pubkey, Some(&ancestors), None);
result.map(|(list, index)| list.slot_list()[index].1.store_id)
}
pub fn alive_account_count_in_slot(&self, slot: Slot) -> usize {
self.storage
.get_slot_stores(slot)
.map(|storages| storages.read().unwrap().values().map(|s| s.count()).sum())
.unwrap_or(0)
}
}
/// Legacy shrink functions to support non-cached path.
/// Should be able to be deleted after cache path is the only path.
impl AccountsDb {
// Reads all accounts in given slot's AppendVecs and filter only to alive,
// then create a minimum AppendVec filled with the alive.
// v1 path shrinks all stores in the slot
//
// Requires all stores in the slot to be re-written otherwise the accounts_index
// store ref count could become incorrect.
fn do_shrink_slot_v1(&self, slot: Slot, forced: bool) -> usize {
trace!("shrink_stale_slot: slot: {}", slot);
if let Some(stores_lock) = self.storage.get_slot_stores(slot) {
let stores: Vec<_> = stores_lock.read().unwrap().values().cloned().collect();
let mut alive_count = 0;
let mut stored_count = 0;
let mut written_bytes = 0;
let mut total_bytes = 0;
for store in &stores {
alive_count += store.count();
stored_count += store.approx_stored_count();
written_bytes += store.written_bytes();
total_bytes += store.total_bytes();
}
if alive_count == stored_count && stores.len() == 1 {
trace!(
"shrink_stale_slot ({}): not able to shrink at all: alive/stored: {} / {} {}",
slot,
alive_count,
stored_count,
if forced { " (forced)" } else { "" },
);
return 0;
} else if !forced {
let sparse_by_count = (alive_count as f32 / stored_count as f32) <= 0.8;
let sparse_by_bytes = (written_bytes as f32 / total_bytes as f32) <= 0.8;
let not_sparse = !sparse_by_count && !sparse_by_bytes;
let too_small_to_shrink = total_bytes <= PAGE_SIZE;
if not_sparse || too_small_to_shrink {
return 0;
}
info!(
"shrink_stale_slot ({}): not_sparse: {} count: {}/{} byte: {}/{}",
slot, not_sparse, alive_count, stored_count, written_bytes, total_bytes,
);
}
self.do_shrink_slot_stores(slot, stores.iter(), false)
} else {
0
}
}
fn do_reset_uncleaned_roots_v1(
&self,
candidates: &mut MutexGuard<Vec<Slot>>,
max_clean_root: Option<Slot>,
) {
let previous_roots = self.accounts_index.reset_uncleaned_roots(max_clean_root);
candidates.extend(previous_roots);
}
#[cfg(test)]
fn reset_uncleaned_roots_v1(&self) {
self.do_reset_uncleaned_roots_v1(&mut self.shrink_candidate_slots_v1.lock().unwrap(), None);
}
fn do_shrink_stale_slot_v1(&self, slot: Slot) -> usize {
self.do_shrink_slot_v1(slot, false)
}
fn do_shrink_slot_forced_v1(&self, slot: Slot) {
self.do_shrink_slot_v1(slot, true);
}
fn shrink_stale_slot_v1(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> usize {
let mut shrunken_account_total = 0;
let mut shrunk_slot_count = 0;
let start = Instant::now();
let num_roots = self.accounts_index.num_roots();
loop {
if let Some(slot) = self.do_next_shrink_slot_v1(candidates) {
shrunken_account_total += self.do_shrink_stale_slot_v1(slot);
} else {
return 0;
}
if start.elapsed().as_millis() > 100 || shrunk_slot_count > num_roots / 10 {
debug!(
"do_shrink_stale_slot_v1: {} {} {}us",
shrunk_slot_count,
candidates.len(),
start.elapsed().as_micros()
);
break;
}
shrunk_slot_count += 1;
}
shrunken_account_total
}
// Infinitely returns rooted roots in cyclic order
fn do_next_shrink_slot_v1(&self, candidates: &mut MutexGuard<Vec<Slot>>) -> Option<Slot> {
// At this point, a lock (= candidates) is ensured to be held to keep
// do_reset_uncleaned_roots() (in clean_accounts()) from updating candidates.
// Also, candidates in the lock may be swapped here if it's empty.
let next = candidates.pop();
if next.is_some() {
next
} else {
let mut new_all_slots = self.all_root_slots_in_index();
let next = new_all_slots.pop();
// refresh candidates for later calls!
**candidates = new_all_slots;
next
}
}
#[cfg(test)]
fn next_shrink_slot_v1(&self) -> Option<Slot> {
let mut candidates = self.shrink_candidate_slots_v1.lock().unwrap();
self.do_next_shrink_slot_v1(&mut candidates)
}
pub fn process_stale_slot_v1(&self) -> usize {
let mut measure = Measure::start("stale_slot_shrink-ms");
let candidates = self.shrink_candidate_slots_v1.try_lock();
if candidates.is_err() {
// skip and return immediately if locked by clean_accounts()
// the calling background thread will just retry later.
return 0;
}
// hold this lock as long as this shrinking process is running to avoid conflicts
// with clean_accounts().
let mut candidates = candidates.unwrap();
let count = self.shrink_stale_slot_v1(&mut candidates);
measure.stop();
inc_new_counter_info!("stale_slot_shrink-ms", measure.as_ms() as usize);
count
}
#[cfg(test)]
fn shrink_all_stale_slots_v1(&self) {
for slot in self.all_slots_in_storage() {
self.do_shrink_stale_slot_v1(slot);
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::{
accounts_hash::MERKLE_FANOUT,
accounts_index::RefCount,
accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude},
append_vec::{test_utils::TempFile, AccountMeta},
inline_spl_token_v2_0,
};
use assert_matches::assert_matches;
use rand::{thread_rng, Rng};
use solana_sdk::{
account::{accounts_equal, Account, AccountSharedData, ReadableAccount, WritableAccount},
hash::HASH_BYTES,
pubkey::PUBKEY_BYTES,
};
use std::{
iter::FromIterator,
str::FromStr,
thread::{self, sleep, Builder, JoinHandle},
time::Duration,
};
fn linear_ancestors(end_slot: u64) -> Ancestors {
let mut ancestors: Ancestors = vec![(0, 0)].into_iter().collect();
for i in 1..end_slot {
ancestors.insert(i, (i - 1) as usize);
}
ancestors
}
fn empty_storages<'a>() -> SortedStorages<'a> {
SortedStorages::new(&[])
}
impl AccountsDb {
fn scan_snapshot_stores(
storage: &SortedStorages,
stats: &mut crate::accounts_hash::HashStats,
bins: usize,
bin_range: &Range<usize>,
check_hash: bool,
) -> Result<Vec<Vec<Vec<CalculateHashIntermediate>>>, BankHashVerificationError> {
Self::scan_snapshot_stores_with_cache(storage, stats, bins, bin_range, check_hash, None)
}
}
#[test]
#[should_panic(
expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end"
)]
fn test_accountsdb_scan_snapshot_stores_illegal_range_start() {
let mut stats = HashStats::default();
let bounds = Range { start: 2, end: 2 };
AccountsDb::scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false).unwrap();
}
#[test]
#[should_panic(
expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end"
)]
fn test_accountsdb_scan_snapshot_stores_illegal_range_end() {
let mut stats = HashStats::default();
let bounds = Range { start: 1, end: 3 };
AccountsDb::scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false).unwrap();
}
#[test]
#[should_panic(
expected = "bin_range.start < bins && bin_range.end <= bins &&\\n bin_range.start < bin_range.end"
)]
fn test_accountsdb_scan_snapshot_stores_illegal_range_inverse() {
let mut stats = HashStats::default();
let bounds = Range { start: 1, end: 0 };
AccountsDb::scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds, false).unwrap();
}
fn sample_storages_and_account_in_slot(
slot: Slot,
) -> (SnapshotStorages, Vec<CalculateHashIntermediate>) {
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey0 = Pubkey::new(&[0u8; 32]);
let pubkey127 = Pubkey::new(&[0x7fu8; 32]);
let pubkey128 = Pubkey::new(&[0x80u8; 32]);
let pubkey255 = Pubkey::new(&[0xffu8; 32]);
let mut raw_expected = vec![
CalculateHashIntermediate::new_without_slot(Hash::default(), 1, pubkey0),
CalculateHashIntermediate::new_without_slot(Hash::default(), 128, pubkey127),
CalculateHashIntermediate::new_without_slot(Hash::default(), 129, pubkey128),
CalculateHashIntermediate::new_without_slot(Hash::default(), 256, pubkey255),
];
let expected_hashes = vec![
Hash::from_str("5K3NW73xFHwgTWVe4LyCg4QfQda8f88uZj2ypDx2kmmH").unwrap(),
Hash::from_str("84ozw83MZ8oeSF4hRAg7SeW1Tqs9LMXagX1BrDRjtZEx").unwrap(),
Hash::from_str("5XqtnEJ41CG2JWNp7MAg9nxkRUAnyjLxfsKsdrLxQUbC").unwrap(),
Hash::from_str("DpvwJcznzwULYh19Zu5CuAA4AT6WTBe4H6n15prATmqj").unwrap(),
];
let mut raw_accounts = Vec::default();
for i in 0..raw_expected.len() {
raw_accounts.push(AccountSharedData::new(
raw_expected[i].lamports,
1,
AccountSharedData::default().owner(),
));
let hash = AccountsDb::hash_account(slot, &raw_accounts[i], &raw_expected[i].pubkey);
if slot == 1 {
assert_eq!(hash, expected_hashes[i]);
}
raw_expected[i].hash = hash;
}
let to_store = raw_accounts
.iter()
.zip(raw_expected.iter())
.map(|(account, intermediate)| (&intermediate.pubkey, account))
.collect::<Vec<_>>();
accounts.store_uncached(slot, &to_store[..]);
accounts.add_root(slot);
let (storages, slots) = accounts.get_snapshot_storages(slot, None);
assert_eq!(storages.len(), slots.len());
storages
.iter()
.zip(slots.iter())
.for_each(|(storages, slot)| {
for storage in storages {
assert_eq!(&storage.slot(), slot);
}
});
(storages, raw_expected)
}
fn sample_storages_and_accounts() -> (SnapshotStorages, Vec<CalculateHashIntermediate>) {
sample_storages_and_account_in_slot(1)
}
fn get_storage_refs(input: &[SnapshotStorage]) -> SortedStorages {
SortedStorages::new(input)
}
#[test]
fn test_accountsdb_scan_snapshot_stores() {
solana_logger::setup();
let (storages, raw_expected) = sample_storages_and_accounts();
let bins = 1;
let mut stats = HashStats::default();
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
assert_eq!(result, vec![vec![raw_expected.clone()]]);
let bins = 2;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[0].push(raw_expected[1].clone());
expected[bins - 1].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
let bins = 4;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[1].push(raw_expected[1].clone());
expected[2].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
let bins = 256;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); bins];
expected[0].push(raw_expected[0].clone());
expected[127].push(raw_expected[1].clone());
expected[128].push(raw_expected[2].clone());
expected[bins - 1].push(raw_expected.last().unwrap().clone());
assert_eq!(result, vec![expected]);
}
#[test]
fn test_accountsdb_scan_snapshot_stores_2nd_chunk() {
// enough stores to get to 2nd chunk
let bins = 1;
const MAX_ITEMS_PER_CHUNK: usize = 5_000;
let slot = MAX_ITEMS_PER_CHUNK as Slot;
let (storages, raw_expected) = sample_storages_and_account_in_slot(slot);
let storage_data = vec![(&storages[0], slot)];
let sorted_storages =
SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK + 1);
let mut stats = HashStats::default();
let result = AccountsDb::scan_snapshot_stores(
&sorted_storages,
&mut stats,
bins,
&Range {
start: 0,
end: bins,
},
false,
)
.unwrap();
assert_eq!(result.len(), 2); // 2 chunks
assert_eq!(result[0].len(), 0); // nothing found in first slots
assert_eq!(result[1].len(), bins);
assert_eq!(result[1], vec![raw_expected]);
}
#[test]
fn test_accountsdb_scan_snapshot_stores_binning() {
let mut stats = HashStats::default();
let (storages, raw_expected) = sample_storages_and_accounts();
// just the first bin of 2
let bins = 2;
let half_bins = bins / 2;
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 0,
end: half_bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); half_bins];
expected[0].push(raw_expected[0].clone());
expected[0].push(raw_expected[1].clone());
assert_eq!(result, vec![expected]);
// just the second bin of 2
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: 1,
end: bins,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); half_bins];
let starting_bin_index = 0;
expected[starting_bin_index].push(raw_expected[2].clone());
expected[starting_bin_index].push(raw_expected[3].clone());
assert_eq!(result, vec![expected]);
// 1 bin at a time of 4
let bins = 4;
for (bin, expected_item) in raw_expected.iter().enumerate().take(bins) {
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: bin,
end: bin + 1,
},
false,
)
.unwrap();
let mut expected = vec![Vec::new(); 1];
expected[0].push(expected_item.clone());
assert_eq!(result, vec![expected]);
}
let bins = 256;
let bin_locations = vec![0, 127, 128, 255];
for bin in 0..bins {
let result = AccountsDb::scan_snapshot_stores(
&get_storage_refs(&storages),
&mut stats,
bins,
&Range {
start: bin,
end: bin + 1,
},
false,
)
.unwrap();
let mut expected = vec![];
if let Some(index) = bin_locations.iter().position(|&r| r == bin) {
expected = vec![Vec::new(); 1];
expected[0].push(raw_expected[index].clone());
}
assert_eq!(result, vec![expected]);
}
}
#[test]
fn test_accountsdb_scan_snapshot_stores_binning_2nd_chunk() {
// enough stores to get to 2nd chunk
// range is for only 1 bin out of 256.
let bins = 256;
const MAX_ITEMS_PER_CHUNK: usize = 5_000;
let slot = MAX_ITEMS_PER_CHUNK as Slot;
let (storages, raw_expected) = sample_storages_and_account_in_slot(slot);
let storage_data = vec![(&storages[0], slot)];
let sorted_storages =
SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK + 1);
let mut stats = HashStats::default();
let result = AccountsDb::scan_snapshot_stores(
&sorted_storages,
&mut stats,
bins,
&Range {
start: 127,
end: 128,
},
false,
)
.unwrap();
assert_eq!(result.len(), 2); // 2 chunks
assert_eq!(result[0].len(), 0); // nothing found in first slots
let mut expected = vec![Vec::new(); 1];
expected[0].push(raw_expected[1].clone());
assert_eq!(result[1].len(), 1);
assert_eq!(result[1], expected);
}
#[test]
fn test_accountsdb_calculate_accounts_hash_without_index_simple() {
solana_logger::setup();
let (storages, _size, _slot_expected) = sample_storage();
let result = AccountsDb::calculate_accounts_hash_without_index(
&get_storage_refs(&storages),
None,
HashStats::default(),
false,
None,
)
.unwrap();
let expected_hash = Hash::from_str("GKot5hBsd81kMupNCXHaqbhv3huEbxAFMLnpcX2hniwn").unwrap();
assert_eq!(result, (expected_hash, 0));
}
#[test]
fn test_accountsdb_calculate_accounts_hash_without_index() {
solana_logger::setup();
let (storages, raw_expected) = sample_storages_and_accounts();
let expected_hash =
AccountsHash::compute_merkle_root_loop(raw_expected.clone(), MERKLE_FANOUT, |item| {
item.hash
});
let sum = raw_expected.iter().map(|item| item.lamports).sum();
let result = AccountsDb::calculate_accounts_hash_without_index(
&get_storage_refs(&storages),
None,
HashStats::default(),
false,
None,
)
.unwrap();
assert_eq!(result, (expected_hash, sum));
}
fn sample_storage() -> (SnapshotStorages, usize, Slot) {
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let arc = Arc::new(data);
let storages = vec![vec![arc]];
(storages, size, slot_expected)
}
#[test]
fn test_accountsdb_scan_account_storage_no_bank() {
solana_logger::setup();
let expected = 1;
let tf = crate::append_vec::test_utils::get_append_vec_path(
"test_accountsdb_scan_account_storage_no_bank",
);
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let mut data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let av = AppendVec::new(&tf.path, true, 1024 * 1024);
data.accounts = av;
let arc = Arc::new(data);
let storages = vec![vec![arc]];
let pubkey = solana_sdk::pubkey::new_rand();
let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner());
let sm = StoredMeta {
data_len: 1,
pubkey,
write_version: 1,
};
storages[0][0]
.accounts
.append_accounts(&[(sm, Some(&acc))], &[&Hash::default()]);
let calls = AtomicU64::new(0);
let result = AccountsDb::scan_account_storage_no_bank(
None,
&get_storage_refs(&storages),
|loaded_account: LoadedAccount, accum: &mut Vec<u64>, slot: Slot| {
calls.fetch_add(1, Ordering::Relaxed);
assert_eq!(loaded_account.pubkey(), &pubkey);
assert_eq!(slot_expected, slot);
accum.push(expected);
},
|a| a,
);
assert_eq!(calls.load(Ordering::Relaxed), 1);
assert_eq!(result, vec![vec![expected]]);
}
#[test]
fn test_accountsdb_scan_account_storage_no_bank_one_slot() {
solana_logger::setup();
let expected = 1;
let tf = crate::append_vec::test_utils::get_append_vec_path(
"test_accountsdb_scan_account_storage_no_bank",
);
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let slot_expected: Slot = 0;
let size: usize = 123;
let mut data = AccountStorageEntry::new(&paths[0], slot_expected, 0, size as u64);
let av = AppendVec::new(&tf.path, true, 1024 * 1024);
data.accounts = av;
let arc = Arc::new(data);
let storages = vec![vec![arc]];
let pubkey = solana_sdk::pubkey::new_rand();
let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner());
let sm = StoredMeta {
data_len: 1,
pubkey,
write_version: 1,
};
storages[0][0]
.accounts
.append_accounts(&[(sm, Some(&acc))], &[&Hash::default()]);
let calls = AtomicU64::new(0);
let mut accum = Vec::new();
let scan_func = |loaded_account: LoadedAccount, accum: &mut Vec<u64>, slot: Slot| {
calls.fetch_add(1, Ordering::Relaxed);
assert_eq!(loaded_account.pubkey(), &pubkey);
assert_eq!(slot_expected, slot);
accum.push(expected);
};
AccountsDb::scan_multiple_account_storages_one_slot(
&storages[0],
&scan_func,
slot_expected,
&mut accum,
);
assert_eq!(calls.load(Ordering::Relaxed), 1);
assert_eq!(accum, vec![expected]);
}
fn sample_storage_with_entries(
tf: &TempFile,
write_version: StoredMetaWriteVersion,
slot: Slot,
pubkey: &Pubkey,
) -> SnapshotStorages {
let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let size: usize = 123;
let mut data = AccountStorageEntry::new(&paths[0], slot, 0, size as u64);
let av = AppendVec::new(&tf.path, true, 1024 * 1024);
data.accounts = av;
let arc = Arc::new(data);
let storages = vec![vec![arc]];
let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner());
let sm = StoredMeta {
data_len: 1,
pubkey: *pubkey,
write_version,
};
storages[0][0]
.accounts
.append_accounts(&[(sm, Some(&acc))], &[&Hash::default()]);
storages
}
#[test]
fn test_accountsdb_scan_multiple_account_storage_no_bank_one_slot() {
solana_logger::setup();
let slot_expected: Slot = 0;
let tf = crate::append_vec::test_utils::get_append_vec_path(
"test_accountsdb_scan_account_storage_no_bank",
);
let write_version1 = 0;
let write_version2 = 1;
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
for swap in [false, true].iter() {
let mut storages = [
sample_storage_with_entries(&tf, write_version1, slot_expected, &pubkey1)
.remove(0)
.remove(0),
sample_storage_with_entries(&tf, write_version2, slot_expected, &pubkey2)
.remove(0)
.remove(0),
];
if *swap {
storages[..].swap(0, 1);
}
let calls = AtomicU64::new(0);
let scan_func = |loaded_account: LoadedAccount, accum: &mut Vec<u64>, slot: Slot| {
calls.fetch_add(1, Ordering::Relaxed);
let write_version = loaded_account.write_version();
let first = loaded_account.pubkey() == &pubkey1 && write_version == write_version1;
assert!(
first || loaded_account.pubkey() == &pubkey2 && write_version == write_version2
);
assert_eq!(slot_expected, slot);
if first {
assert!(accum.is_empty());
} else {
assert!(accum.len() == 1);
}
accum.push(write_version);
};
let mut accum = Vec::new();
AccountsDb::scan_multiple_account_storages_one_slot(
&storages,
&scan_func,
slot_expected,
&mut accum,
);
assert_eq!(calls.load(Ordering::Relaxed), storages.len() as u64);
assert_eq!(accum, vec![write_version1, write_version2]);
}
}
#[test]
fn test_accountsdb_add_root() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0, 0))
);
}
#[test]
fn test_accountsdb_latest_ancestor() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
let account1 = AccountSharedData::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
let accounts: Vec<AccountSharedData> = db.unchecked_scan_accounts(
"",
&ancestors,
|accounts: &mut Vec<AccountSharedData>, option| {
accounts.push(option.1.take_account());
},
);
assert_eq!(accounts, vec![account1]);
}
#[test]
fn test_accountsdb_latest_ancestor_with_root() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key, &account0)]);
let account1 = AccountSharedData::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
}
#[test]
fn test_accountsdb_root_one_slot() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
// store value 1 in the "root", i.e. db zero
db.store_uncached(0, &[(&key, &account0)]);
// now we have:
//
// root0 -> key.lamports==1
// / \
// / \
// key.lamports==0 <- slot1 \
// slot2 -> key.lamports==1
// (via root0)
// store value 0 in one child
let account1 = AccountSharedData::new(0, 0, &key);
db.store_uncached(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account (but could also accept "None", which is implemented
// at the Accounts level)
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account1
);
// we should see 1 token in slot 2
let ancestors = vec![(0, 0), (2, 2)].into_iter().collect();
assert_eq!(
&db.load_without_fixed_root(&ancestors, &key).unwrap().0,
&account0
);
db.add_root(0);
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account1, 1))
);
let ancestors = vec![(2, 2)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0, 0))
); // original value
}
#[test]
fn test_accountsdb_add_root_many() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.unwrap();
let default_account = AccountSharedData::from(Account {
lamports: (idx + 1) as u64,
..Account::default()
});
assert_eq!((default_account, 0), account);
}
db.add_root(0);
// check that all the accounts appear with a new root
for _ in 1..100 {
let idx = thread_rng().gen_range(0, 99);
let ancestors = vec![(0, 0)].into_iter().collect();
let account0 = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.unwrap();
let ancestors = vec![(1, 1)].into_iter().collect();
let account1 = db
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.unwrap();
let default_account = AccountSharedData::from(Account {
lamports: (idx + 1) as u64,
..Account::default()
});
assert_eq!(&default_account, &account0.0);
assert_eq!(&default_account, &account1.0);
}
}
#[test]
fn test_accountsdb_count_stores() {
solana_logger::setup();
let db = AccountsDb::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 2, DEFAULT_FILE_SIZE as usize / 3, 0);
assert!(check_storage(&db, 0, 2));
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 3, &pubkey);
db.store_uncached(1, &[(&pubkey, &account)]);
db.store_uncached(1, &[(&pubkeys[0], &account)]);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 2);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
// adding root doesn't change anything
db.get_accounts_delta_hash(1);
db.add_root(1);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 2);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
// overwrite old rooted account version; only the r_slot_0_stores.count() should be
// decremented
db.store_uncached(2, &[(&pubkeys[0], &account)]);
db.clean_accounts(None, false);
{
let slot_0_stores = &db.storage.get_slot_stores(0).unwrap();
let slot_1_stores = &db.storage.get_slot_stores(1).unwrap();
let r_slot_0_stores = slot_0_stores.read().unwrap();
let r_slot_1_stores = slot_1_stores.read().unwrap();
assert_eq!(r_slot_0_stores.len(), 1);
assert_eq!(r_slot_1_stores.len(), 1);
assert_eq!(r_slot_0_stores.get(&0).unwrap().count(), 1);
assert_eq!(r_slot_1_stores[&1].count(), 2);
assert_eq!(r_slot_0_stores.get(&0).unwrap().approx_stored_count(), 2);
assert_eq!(r_slot_1_stores[&1].approx_stored_count(), 2);
}
}
#[test]
fn test_accounts_unsquashed() {
let key = Pubkey::default();
// 1 token in the "root", i.e. db zero
let db0 = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account0 = AccountSharedData::new(1, 0, &key);
db0.store_uncached(0, &[(&key, &account0)]);
// 0 lamports in the child
let account1 = AccountSharedData::new(0, 0, &key);
db0.store_uncached(1, &[(&key, &account1)]);
// masking accounts is done at the Accounts level, at accountsDB we see
// original account
let ancestors = vec![(0, 0), (1, 1)].into_iter().collect();
assert_eq!(
db0.load_without_fixed_root(&ancestors, &key),
Some((account1, 1))
);
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
db0.load_without_fixed_root(&ancestors, &key),
Some((account0, 0))
);
}
fn run_test_remove_unrooted_slot(is_cached: bool) {
let unrooted_slot = 9;
let unrooted_bank_id = 9;
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
if is_cached {
db.store_cached(unrooted_slot, &[(&key, &account0)]);
} else {
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
}
db.bank_hashes
.write()
.unwrap()
.insert(unrooted_slot, BankHashInfo::default());
assert!(db
.accounts_index
.get(&key, Some(&ancestors), None)
.is_some());
assert_load_account(&db, unrooted_slot, key, 1);
// Purge the slot
db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]);
assert!(db.load_without_fixed_root(&ancestors, &key).is_none());
assert!(db.bank_hashes.read().unwrap().get(&unrooted_slot).is_none());
assert!(db.accounts_cache.slot_cache(unrooted_slot).is_none());
assert!(db.storage.0.get(&unrooted_slot).is_none());
assert!(db.accounts_index.get_account_read_entry(&key).is_none());
assert!(db
.accounts_index
.get(&key, Some(&ancestors), None)
.is_none());
// Test we can store for the same slot again and get the right information
let account0 = AccountSharedData::new(2, 0, &key);
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
assert_load_account(&db, unrooted_slot, key, 2);
}
#[test]
fn test_remove_unrooted_slot_cached() {
run_test_remove_unrooted_slot(true);
}
#[test]
fn test_remove_unrooted_slot_storage() {
run_test_remove_unrooted_slot(false);
}
#[test]
fn test_remove_unrooted_slot_snapshot() {
solana_logger::setup();
let unrooted_slot = 9;
let unrooted_bank_id = 9;
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(unrooted_slot, &[(&key, &account0)]);
// Purge the slot
db.remove_unrooted_slots(&[(unrooted_slot, unrooted_bank_id)]);
// Add a new root
let key2 = solana_sdk::pubkey::new_rand();
let new_root = unrooted_slot + 1;
db.store_uncached(new_root, &[(&key2, &account0)]);
db.add_root(new_root);
// Simulate reconstruction from snapshot
let db = reconstruct_accounts_db_via_serialization(&db, new_root);
// Check root account exists
assert_load_account(&db, new_root, key2, 1);
// Check purged account stays gone
let unrooted_slot_ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
assert!(db
.load_without_fixed_root(&unrooted_slot_ancestors, &key)
.is_none());
}
fn create_account(
accounts: &AccountsDb,
pubkeys: &mut Vec<Pubkey>,
slot: Slot,
num: usize,
space: usize,
num_vote: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for t in 0..num {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((t + 1) as u64, space, AccountSharedData::default().owner());
pubkeys.push(pubkey);
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
accounts.store_uncached(slot, &[(&pubkey, &account)]);
}
for t in 0..num_vote {
let pubkey = solana_sdk::pubkey::new_rand();
let account =
AccountSharedData::new((num + t + 1) as u64, space, &solana_vote_program::id());
pubkeys.push(pubkey);
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
accounts.store_uncached(slot, &[(&pubkey, &account)]);
}
}
fn update_accounts(accounts: &AccountsDb, pubkeys: &[Pubkey], slot: Slot, range: usize) {
for _ in 1..1000 {
let idx = thread_rng().gen_range(0, range);
let ancestors = vec![(slot, 0)].into_iter().collect();
if let Some((mut account, _)) =
accounts.load_without_fixed_root(&ancestors, &pubkeys[idx])
{
account.checked_add_lamports(1).unwrap();
accounts.store_uncached(slot, &[(&pubkeys[idx], &account)]);
if account.lamports() == 0 {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkeys[idx])
.is_none());
} else {
let default_account = AccountSharedData::from(Account {
lamports: account.lamports(),
..Account::default()
});
assert_eq!(default_account, account);
}
}
}
}
fn check_storage(accounts: &AccountsDb, slot: Slot, count: usize) -> bool {
assert_eq!(
accounts
.storage
.get_slot_stores(slot)
.unwrap()
.read()
.unwrap()
.len(),
1
);
let slot_storages = accounts.storage.get_slot_stores(slot).unwrap();
let mut total_count: usize = 0;
let r_slot_storages = slot_storages.read().unwrap();
for store in r_slot_storages.values() {
assert_eq!(store.status(), AccountStorageStatus::Available);
total_count += store.count();
}
assert_eq!(total_count, count);
let (expected_store_count, actual_store_count): (usize, usize) = (
r_slot_storages
.values()
.map(|s| s.approx_stored_count())
.sum(),
r_slot_storages
.values()
.map(|s| s.all_accounts().len())
.sum(),
);
assert_eq!(expected_store_count, actual_store_count);
total_count == count
}
fn check_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
for _ in 0..num {
let idx = thread_rng().gen_range(0, num);
let account = accounts.load_without_fixed_root(&ancestors, &pubkeys[idx]);
let account1 = Some((
AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
),
slot,
));
assert_eq!(account, account1);
}
}
#[allow(clippy::needless_range_loop)]
fn modify_accounts(
accounts: &AccountsDb,
pubkeys: &[Pubkey],
slot: Slot,
num: usize,
count: usize,
) {
for idx in 0..num {
let account = AccountSharedData::new(
(idx + count) as u64,
0,
AccountSharedData::default().owner(),
);
accounts.store_uncached(slot, &[(&pubkeys[idx], &account)]);
}
}
#[test]
fn test_account_one() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(1).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 1, 0, 0);
let ancestors = vec![(0, 0)].into_iter().collect();
let account = db.load_without_fixed_root(&ancestors, &pubkeys[0]).unwrap();
let default_account = AccountSharedData::from(Account {
lamports: 1,
..Account::default()
});
assert_eq!((default_account, 0), account);
}
#[test]
fn test_account_many() {
let (_accounts_dirs, paths) = get_temp_accounts_paths(2).unwrap();
let db = AccountsDb::new(paths, &ClusterType::Development);
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&db, &mut pubkeys, 0, 100, 0, 0);
check_accounts(&db, &pubkeys, 0, 100, 1);
}
#[test]
fn test_account_update() {
let accounts = AccountsDb::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
update_accounts(&accounts, &pubkeys, 0, 99);
assert!(check_storage(&accounts, 0, 100));
}
#[test]
fn test_account_grow_many() {
let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap();
let size = 4096;
let accounts = AccountsDb::new_sized(paths, size);
let mut keys = vec![];
for i in 0..9 {
let key = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(i + 1, size as usize / 4, &key);
accounts.store_uncached(0, &[(&key, &account)]);
keys.push(key);
}
let ancestors = vec![(0, 0)].into_iter().collect();
for (i, key) in keys.iter().enumerate() {
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, key)
.unwrap()
.0
.lamports(),
(i as u64) + 1
);
}
let mut append_vec_histogram = HashMap::new();
let mut all_storages = vec![];
for slot_storage in accounts.storage.0.iter() {
all_storages.extend(slot_storage.read().unwrap().values().cloned())
}
for storage in all_storages {
*append_vec_histogram.entry(storage.slot()).or_insert(0) += 1;
}
for count in append_vec_histogram.values() {
assert!(*count >= 2);
}
}
#[test]
fn test_account_grow() {
let accounts = AccountsDb::new_single();
let status = [AccountStorageStatus::Available, AccountStorageStatus::Full];
let pubkey1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey1);
accounts.store_uncached(0, &[(&pubkey1, &account1)]);
{
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert_eq!(r_stores.len(), 1);
assert_eq!(r_stores[&0].count(), 1);
assert_eq!(r_stores[&0].status(), AccountStorageStatus::Available);
}
let pubkey2 = solana_sdk::pubkey::new_rand();
let account2 = AccountSharedData::new(1, DEFAULT_FILE_SIZE as usize / 2, &pubkey2);
accounts.store_uncached(0, &[(&pubkey2, &account2)]);
{
assert_eq!(accounts.storage.0.len(), 1);
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert_eq!(r_stores.len(), 2);
assert_eq!(r_stores[&0].count(), 1);
assert_eq!(r_stores[&0].status(), AccountStorageStatus::Full);
assert_eq!(r_stores[&1].count(), 1);
assert_eq!(r_stores[&1].status(), AccountStorageStatus::Available);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey1)
.unwrap()
.0,
account1
);
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey2)
.unwrap()
.0,
account2
);
// lots of stores, but 7 storages should be enough for everything
for _ in 0..25 {
accounts.store_uncached(0, &[(&pubkey1, &account1)]);
{
assert_eq!(accounts.storage.0.len(), 1);
let stores = &accounts.storage.get_slot_stores(0).unwrap();
let r_stores = stores.read().unwrap();
assert!(r_stores.len() <= 7);
assert_eq!(r_stores[&0].status(), status[0]);
}
let ancestors = vec![(0, 0)].into_iter().collect();
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey1)
.unwrap()
.0,
account1
);
assert_eq!(
accounts
.load_without_fixed_root(&ancestors, &pubkey2)
.unwrap()
.0,
account2
);
}
}
#[test]
fn test_lazy_gc_slot() {
solana_logger::setup();
//This test is pedantic
//A slot is purged when a non root bank is cleaned up. If a slot is behind root but it is
//not root, it means we are retaining dead banks.
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let id = {
let (lock, idx) = accounts
.accounts_index
.get(&pubkey, Some(&ancestors), None)
.unwrap();
lock.slot_list()[idx].1.store_id
};
accounts.get_accounts_delta_hash(0);
accounts.add_root(1);
//slot is still there, since gc is lazy
assert!(accounts
.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.get(&id)
.is_some());
//store causes clean
accounts.store_uncached(1, &[(&pubkey, &account)]);
// generate delta state for slot 1, so clean operates on it.
accounts.get_accounts_delta_hash(1);
//slot is gone
accounts.print_accounts_stats("pre-clean");
accounts.clean_accounts(None, false);
assert!(accounts.storage.0.get(&0).is_none());
//new value is there
let ancestors = vec![(1, 1)].into_iter().collect();
assert_eq!(
accounts.load_without_fixed_root(&ancestors, &pubkey),
Some((account, 1))
);
}
impl AccountsDb {
fn all_account_count_in_append_vec(&self, slot: Slot) -> usize {
let slot_storage = self.storage.get_slot_stores(slot);
if let Some(slot_storage) = slot_storage {
let r_slot_storage = slot_storage.read().unwrap();
let count = r_slot_storage
.values()
.map(|store| store.all_accounts().len())
.sum();
let stored_count: usize = r_slot_storage
.values()
.map(|store| store.approx_stored_count())
.sum();
assert_eq!(stored_count, count);
count
} else {
0
}
}
fn ref_count_for_pubkey(&self, pubkey: &Pubkey) -> RefCount {
self.accounts_index.ref_count_from_storage(pubkey)
}
}
#[test]
fn test_clean_zero_lamport_and_dead_slot() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store two accounts
accounts.store_uncached(0, &[(&pubkey1, &account)]);
accounts.store_uncached(0, &[(&pubkey2, &account)]);
// Make sure both accounts are in the same AppendVec in slot 0, which
// will prevent pubkey1 from being cleaned up later even when it's a
// zero-lamport account
let ancestors = vec![(0, 1)].into_iter().collect();
let (slot1, account_info1) = accounts
.accounts_index
.get(&pubkey1, Some(&ancestors), None)
.map(|(account_list1, index1)| account_list1.slot_list()[index1].clone())
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.get(&pubkey2, Some(&ancestors), None)
.map(|(account_list2, index2)| account_list2.slot_list()[index2].clone())
.unwrap();
assert_eq!(slot1, 0);
assert_eq!(slot1, slot2);
assert_eq!(account_info1.store_id, account_info2.store_id);
// Update account 1 in slot 1
accounts.store_uncached(1, &[(&pubkey1, &account)]);
// Update account 1 as zero lamports account
accounts.store_uncached(2, &[(&pubkey1, &zero_lamport_account)]);
// Pubkey 1 was the only account in slot 1, and it was updated in slot 2, so
// slot 1 should be purged
accounts.add_root(0);
accounts.add_root(1);
accounts.add_root(2);
// Slot 1 should be removed, slot 0 cannot be removed because it still has
// the latest update for pubkey 2
accounts.clean_accounts(None, false);
assert!(accounts.storage.get_slot_stores(0).is_some());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 1 should be cleaned because all it's accounts are
// zero lamports, and are not present in any other slot's
// storage entries
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
}
#[test]
fn test_clean_multiple_zero_lamport_decrements_index_ref_count() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store 2 accounts in slot 0, then update account 1 in two more slots
accounts.store_uncached(0, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_uncached(0, &[(&pubkey2, &zero_lamport_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_uncached(2, &[(&pubkey1, &zero_lamport_account)]);
// Root all slots
accounts.add_root(0);
accounts.add_root(1);
accounts.add_root(2);
// Account ref counts should match how many slots they were stored in
// Account 1 = 3 slots; account 2 = 1 slot
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 3);
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 1);
accounts.clean_accounts(None, false);
// Slots 0 and 1 should each have been cleaned because all of their
// accounts are zero lamports
assert!(accounts.storage.get_slot_stores(0).is_none());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 2 only has a zero lamport account as well. But, calc_delete_dependencies()
// should exclude slot 2 from the clean due to changes in other slots
assert!(accounts.storage.get_slot_stores(2).is_some());
// Index ref counts should be consistent with the slot stores. Account 1 ref count
// should be 1 since slot 2 is the only alive slot; account 2 should have a ref
// count of 0 due to slot 0 being dead
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 1);
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey2), 0);
accounts.clean_accounts(None, false);
// Slot 2 will now be cleaned, which will leave account 1 with a ref count of 0
assert!(accounts.storage.get_slot_stores(2).is_none());
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
}
#[test]
fn test_clean_zero_lamport_and_old_roots() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store a zero-lamport account
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &zero_lamport_account)]);
// Simulate rooting the zero-lamport account, should be a
// candidate for cleaning
accounts.add_root(0);
accounts.add_root(1);
// Slot 0 should be removed, and
// zero-lamport account should be cleaned
accounts.clean_accounts(None, false);
assert!(accounts.storage.get_slot_stores(0).is_none());
assert!(accounts.storage.get_slot_stores(1).is_none());
// Slot 0 should be cleaned because all it's accounts have been
// updated in the rooted slot 1
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
// Slot 1 should be cleaned because all it's accounts are
// zero lamports, and are not present in any other slot's
// storage entries
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
// zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
fn test_clean_old_with_normal_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &account)]);
// simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
accounts.clean_accounts(None, false);
//now old state is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
}
#[test]
fn test_clean_old_with_zero_lamport_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_account)]);
accounts.store_uncached(0, &[(&pubkey2, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 2);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
accounts.print_accounts_stats("");
accounts.clean_accounts(None, false);
//Old state behind zero-lamport account is cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 2);
}
#[test]
fn test_clean_old_with_both_normal_and_zero_lamport_accounts() {
solana_logger::setup();
let mut accounts = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
spl_token_mint_index_enabled(),
false,
AccountShrinkThreshold::default(),
);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
// Set up account to be added to secondary index
let mint_key = Pubkey::new_unique();
let mut account_data_with_mint =
vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()];
account_data_with_mint[..PUBKEY_BYTES].clone_from_slice(&(mint_key.to_bytes()));
let mut normal_account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
normal_account.set_owner(inline_spl_token_v2_0::id());
normal_account.set_data(account_data_with_mint.clone());
let mut zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
zero_account.set_owner(inline_spl_token_v2_0::id());
zero_account.set_data(account_data_with_mint);
//store an account
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(0, &[(&pubkey1, &normal_account)]);
accounts.store_uncached(1, &[(&pubkey1, &zero_account)]);
accounts.store_uncached(0, &[(&pubkey2, &normal_account)]);
accounts.store_uncached(2, &[(&pubkey2, &normal_account)]);
//simulate slots are rooted after while
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.get_accounts_delta_hash(2);
accounts.add_root(2);
//even if rooted, old state isn't cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 2);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert_eq!(accounts.alive_account_count_in_slot(2), 1);
// Secondary index should still find both pubkeys
let mut found_accounts = HashSet::new();
let index_key = IndexKey::SplTokenMint(mint_key);
let bank_id = 0;
accounts
.accounts_index
.index_scan_accounts(&Ancestors::default(), bank_id, index_key, |key, _| {
found_accounts.insert(*key);
})
.unwrap();
assert_eq!(found_accounts.len(), 2);
assert!(found_accounts.contains(&pubkey1));
assert!(found_accounts.contains(&pubkey2));
{
accounts.account_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude {
exclude: true,
keys: [mint_key].iter().cloned().collect::<HashSet<Pubkey>>(),
});
// Secondary index can't be used - do normal scan: should still find both pubkeys
let found_accounts = accounts
.index_scan_accounts(
&Ancestors::default(),
bank_id,
index_key,
|collection: &mut HashSet<Pubkey>, account| {
collection.insert(*account.unwrap().0);
},
)
.unwrap();
assert!(!found_accounts.1);
assert_eq!(found_accounts.0.len(), 2);
assert!(found_accounts.0.contains(&pubkey1));
assert!(found_accounts.0.contains(&pubkey2));
accounts.account_indexes.keys = None;
// Secondary index can now be used since it isn't marked as excluded
let found_accounts = accounts
.index_scan_accounts(
&Ancestors::default(),
bank_id,
index_key,
|collection: &mut HashSet<Pubkey>, account| {
collection.insert(*account.unwrap().0);
},
)
.unwrap();
assert!(found_accounts.1);
assert_eq!(found_accounts.0.len(), 2);
assert!(found_accounts.0.contains(&pubkey1));
assert!(found_accounts.0.contains(&pubkey2));
accounts.account_indexes.keys = None;
}
accounts.clean_accounts(None, false);
//both zero lamport and normal accounts are cleaned up
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
// The only store to slot 1 was a zero lamport account, should
// be purged by zero-lamport cleaning logic because slot 1 is
// rooted
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
assert_eq!(accounts.alive_account_count_in_slot(2), 1);
// `pubkey1`, a zero lamport account, should no longer exist in accounts index
// because it has been removed by the clean
assert!(accounts.accounts_index.get(&pubkey1, None, None).is_none());
// Secondary index should have purged `pubkey1` as well
let mut found_accounts = vec![];
accounts
.accounts_index
.index_scan_accounts(
&Ancestors::default(),
bank_id,
IndexKey::SplTokenMint(mint_key),
|key, _| found_accounts.push(*key),
)
.unwrap();
assert_eq!(found_accounts, vec![pubkey2]);
}
#[test]
fn test_clean_max_slot_zero_lamport_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// store an account, make it a zero lamport account
// in slot 1
accounts.store_uncached(0, &[(&pubkey, &account)]);
accounts.store_uncached(1, &[(&pubkey, &zero_account)]);
// simulate slots are rooted after while
accounts.add_root(0);
accounts.add_root(1);
// Only clean up to account 0, should not purge slot 0 based on
// updates in later slots in slot 1
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
accounts.clean_accounts(Some(0), false);
assert_eq!(accounts.alive_account_count_in_slot(0), 1);
assert_eq!(accounts.alive_account_count_in_slot(1), 1);
assert!(accounts.accounts_index.get(&pubkey, None, None).is_some());
// Now the account can be cleaned up
accounts.clean_accounts(Some(1), false);
assert_eq!(accounts.alive_account_count_in_slot(0), 0);
assert_eq!(accounts.alive_account_count_in_slot(1), 0);
// The zero lamport account, should no longer exist in accounts index
// because it has been removed
assert!(accounts.accounts_index.get(&pubkey, None, None).is_none());
}
#[test]
fn test_uncleaned_roots_with_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
//store an account
accounts.store_uncached(0, &[(&pubkey, &account)]);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None, false);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
fn test_uncleaned_roots_with_no_account() {
solana_logger::setup();
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
// simulate slots are rooted after while
accounts.add_root(0);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 1);
//now uncleaned roots are cleaned up
accounts.clean_accounts(None, false);
assert_eq!(accounts.accounts_index.uncleaned_roots_len(), 0);
}
#[test]
fn test_accounts_db_serialize1() {
solana_logger::setup();
let accounts = AccountsDb::new_single();
let mut pubkeys: Vec<Pubkey> = vec![];
// Create 100 accounts in slot 0
create_account(&accounts, &mut pubkeys, 0, 100, 0, 0);
accounts.clean_accounts(None, false);
check_accounts(&accounts, &pubkeys, 0, 100, 1);
// do some updates to those accounts and re-check
modify_accounts(&accounts, &pubkeys, 0, 100, 2);
assert!(check_storage(&accounts, 0, 100));
check_accounts(&accounts, &pubkeys, 0, 100, 2);
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
let mut pubkeys1: Vec<Pubkey> = vec![];
// CREATE SLOT 1
let latest_slot = 1;
// Modify the first 10 of the accounts from slot 0 in slot 1
modify_accounts(&accounts, &pubkeys, latest_slot, 10, 3);
// Overwrite account 30 from slot 0 with lamports=0 into slot 1.
// Slot 1 should now have 10 + 1 = 11 accounts
let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
accounts.store_uncached(latest_slot, &[(&pubkeys[30], &account)]);
// Create 10 new accounts in slot 1, should now have 11 + 10 = 21
// accounts
create_account(&accounts, &mut pubkeys1, latest_slot, 10, 0, 0);
accounts.get_accounts_delta_hash(latest_slot);
accounts.add_root(latest_slot);
assert!(check_storage(&accounts, 1, 21));
// CREATE SLOT 2
let latest_slot = 2;
let mut pubkeys2: Vec<Pubkey> = vec![];
// Modify first 20 of the accounts from slot 0 in slot 2
modify_accounts(&accounts, &pubkeys, latest_slot, 20, 4);
accounts.clean_accounts(None, false);
// Overwrite account 31 from slot 0 with lamports=0 into slot 2.
// Slot 2 should now have 20 + 1 = 21 accounts
let account = AccountSharedData::new(0, 0, AccountSharedData::default().owner());
accounts.store_uncached(latest_slot, &[(&pubkeys[31], &account)]);
// Create 10 new accounts in slot 2. Slot 2 should now have
// 21 + 10 = 31 accounts
create_account(&accounts, &mut pubkeys2, latest_slot, 10, 0, 0);
accounts.get_accounts_delta_hash(latest_slot);
accounts.add_root(latest_slot);
assert!(check_storage(&accounts, 2, 31));
accounts.clean_accounts(None, false);
// The first 20 accounts of slot 0 have been updated in slot 2, as well as
// accounts 30 and 31 (overwritten with zero-lamport accounts in slot 1 and
// slot 2 respectively), so only 78 accounts are left in slot 0's storage entries.
assert!(check_storage(&accounts, 0, 78));
// 10 of the 21 accounts have been modified in slot 2, so only 11
// accounts left in slot 1.
assert!(check_storage(&accounts, 1, 11));
assert!(check_storage(&accounts, 2, 31));
let daccounts = reconstruct_accounts_db_via_serialization(&accounts, latest_slot);
assert_eq!(
daccounts.write_version.load(Ordering::Relaxed),
accounts.write_version.load(Ordering::Relaxed)
);
assert_eq!(
daccounts.next_id.load(Ordering::Relaxed),
accounts.next_id.load(Ordering::Relaxed)
);
// Get the hash for the latest slot, which should be the only hash in the
// bank_hashes map on the deserialized AccountsDb
assert_eq!(daccounts.bank_hashes.read().unwrap().len(), 2);
assert_eq!(
daccounts.bank_hashes.read().unwrap().get(&latest_slot),
accounts.bank_hashes.read().unwrap().get(&latest_slot)
);
daccounts.print_count_and_status("daccounts");
// Don't check the first 35 accounts which have not been modified on slot 0
check_accounts(&daccounts, &pubkeys[35..], 0, 65, 37);
check_accounts(&daccounts, &pubkeys1, 1, 10, 1);
assert!(check_storage(&daccounts, 0, 100));
assert!(check_storage(&daccounts, 1, 21));
assert!(check_storage(&daccounts, 2, 31));
let ancestors = linear_ancestors(latest_slot);
assert_eq!(
daccounts.update_accounts_hash(latest_slot, &ancestors),
accounts.update_accounts_hash(latest_slot, &ancestors)
);
}
fn assert_load_account(
accounts: &AccountsDb,
slot: Slot,
pubkey: Pubkey,
expected_lamports: u64,
) {
let ancestors = vec![(slot, 0)].into_iter().collect();
let (account, slot) = accounts
.load_without_fixed_root(&ancestors, &pubkey)
.unwrap();
assert_eq!((account.lamports(), slot), (expected_lamports, slot));
}
fn assert_not_load_account(accounts: &AccountsDb, slot: Slot, pubkey: Pubkey) {
let ancestors = vec![(slot, 0)].into_iter().collect();
assert!(accounts
.load_without_fixed_root(&ancestors, &pubkey)
.is_none());
}
fn reconstruct_accounts_db_via_serialization(accounts: &AccountsDb, slot: Slot) -> AccountsDb {
let daccounts =
crate::serde_snapshot::reconstruct_accounts_db_via_serialization(accounts, slot);
daccounts.print_count_and_status("daccounts");
daccounts
}
fn assert_no_stores(accounts: &AccountsDb, slot: Slot) {
let slot_stores = accounts.storage.get_slot_stores(slot);
let r_slot_stores = slot_stores.as_ref().map(|slot_stores| {
let r_slot_stores = slot_stores.read().unwrap();
info!("{:?}", *r_slot_stores);
r_slot_stores
});
assert!(r_slot_stores.is_none() || r_slot_stores.unwrap().is_empty());
}
#[test]
fn test_accounts_db_purge_keep_live() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let account2 = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey2 = solana_sdk::pubkey::new_rand();
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let accounts = AccountsDb::new_single();
accounts.add_root(0);
// Step A
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
// Store another live account to slot 1 which will prevent any purge
// since the store count will not be zero
accounts.store_uncached(current_slot, &[(&pubkey2, &account2)]);
accounts.add_root(current_slot);
let (slot1, account_info1) = accounts
.accounts_index
.get(&pubkey, None, None)
.map(|(account_list1, index1)| account_list1.slot_list()[index1].clone())
.unwrap();
let (slot2, account_info2) = accounts
.accounts_index
.get(&pubkey2, None, None)
.map(|(account_list2, index2)| account_list2.slot_list()[index2].clone())
.unwrap();
assert_eq!(slot1, current_slot);
assert_eq!(slot1, slot2);
assert_eq!(account_info1.store_id, account_info2.store_id);
// Step B
current_slot += 1;
let zero_lamport_slot = current_slot;
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
current_slot += 1;
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
accounts.clean_accounts(None, false);
accounts.print_accounts_stats("post_purge");
// The earlier entry for pubkey in the account index is purged,
let (slot_list_len, index_slot) = {
let account_entry = accounts
.accounts_index
.get_account_read_entry(&pubkey)
.unwrap();
let slot_list = account_entry.slot_list();
(slot_list.len(), slot_list[0].0)
};
assert_eq!(slot_list_len, 1);
// Zero lamport entry was not the one purged
assert_eq!(index_slot, zero_lamport_slot);
// The ref count should still be 2 because no slots were purged
assert_eq!(accounts.ref_count_for_pubkey(&pubkey), 2);
// storage for slot 1 had 2 accounts, now has 1 after pubkey 1
// was reclaimed
check_storage(&accounts, 1, 1);
// storage for slot 2 had 1 accounts, now has 1
check_storage(&accounts, 2, 1);
}
#[test]
fn test_accounts_db_purge1() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let accounts = AccountsDb::new_single();
accounts.add_root(0);
let mut current_slot = 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
// Otherwise slot 2 will not be removed
current_slot += 1;
accounts.set_hash(current_slot, current_slot - 1);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_purge");
let ancestors = linear_ancestors(current_slot);
info!("ancestors: {:?}", ancestors);
let hash = accounts.update_accounts_hash_test(current_slot, &ancestors);
accounts.clean_accounts(None, false);
assert_eq!(
accounts.update_accounts_hash_test(current_slot, &ancestors),
hash
);
accounts.print_accounts_stats("post_purge");
// Make sure the index is for pubkey cleared
assert!(accounts
.accounts_index
.get_account_read_entry(&pubkey)
.is_none());
// slot 1 & 2 should not have any stores
assert_no_stores(&accounts, 1);
assert_no_stores(&accounts, 2);
}
#[test]
fn test_accounts_db_serialize_zero_and_free() {
solana_logger::setup();
let some_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let account2 = AccountSharedData::new(some_lamport + 1, no_data, &owner);
let pubkey2 = solana_sdk::pubkey::new_rand();
let filler_account = AccountSharedData::new(some_lamport, no_data, &owner);
let filler_account_pubkey = solana_sdk::pubkey::new_rand();
let accounts = AccountsDb::new_single();
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account2)]);
// Store enough accounts such that an additional store for slot 2 is created.
while accounts
.storage
.get_slot_stores(current_slot)
.unwrap()
.read()
.unwrap()
.len()
< 2
{
accounts.store_uncached(current_slot, &[(&filler_account_pubkey, &filler_account)]);
}
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
accounts.print_accounts_stats("accounts");
accounts.clean_accounts(None, false);
accounts.print_accounts_stats("accounts_post_purge");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("reconstructed");
assert_load_account(&accounts, current_slot, pubkey, zero_lamport);
}
fn with_chained_zero_lamport_accounts<F>(f: F)
where
F: Fn(AccountsDb, Slot) -> AccountsDb,
{
let some_lamport = 223;
let zero_lamport = 0;
let dummy_lamport = 999;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let account2 = AccountSharedData::new(some_lamport + 100_001, no_data, &owner);
let account3 = AccountSharedData::new(some_lamport + 100_002, no_data, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let purged_pubkey1 = solana_sdk::pubkey::new_rand();
let purged_pubkey2 = solana_sdk::pubkey::new_rand();
let dummy_account = AccountSharedData::new(dummy_lamport, no_data, &owner);
let dummy_pubkey = Pubkey::default();
let accounts = AccountsDb::new_single();
let mut current_slot = 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_accounts_stats("pre_f");
accounts.update_accounts_hash(4, &Ancestors::default());
let accounts = f(accounts, current_slot);
accounts.print_accounts_stats("post_f");
assert_load_account(&accounts, current_slot, pubkey, some_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
accounts
.verify_bank_hash_and_lamports(4, &Ancestors::default(), 1222, true)
.unwrap();
}
#[test]
fn test_accounts_purge_chained_purge_before_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
accounts.clean_accounts(None, false);
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
fn test_accounts_purge_chained_purge_after_snapshot_restore() {
solana_logger::setup();
with_chained_zero_lamport_accounts(|accounts, current_slot| {
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_accounts_stats("after_reconstruct");
accounts.clean_accounts(None, false);
reconstruct_accounts_db_via_serialization(&accounts, current_slot)
});
}
#[test]
#[ignore]
fn test_store_account_stress() {
let slot = 42;
let num_threads = 2;
let min_file_bytes = std::mem::size_of::<StoredMeta>()
+ std::mem::size_of::<crate::append_vec::AccountMeta>();
let db = Arc::new(AccountsDb::new_sized(Vec::new(), min_file_bytes as u64));
db.add_root(slot);
let thread_hdls: Vec<_> = (0..num_threads)
.map(|_| {
let db = db.clone();
std::thread::Builder::new()
.name("account-writers".to_string())
.spawn(move || {
let pubkey = solana_sdk::pubkey::new_rand();
let mut account = AccountSharedData::new(1, 0, &pubkey);
let mut i = 0;
loop {
let account_bal = thread_rng().gen_range(1, 99);
account.set_lamports(account_bal);
db.store_uncached(slot, &[(&pubkey, &account)]);
let (account, slot) = db
.load_without_fixed_root(&Ancestors::default(), &pubkey)
.unwrap_or_else(|| {
panic!("Could not fetch stored account {}, iter {}", pubkey, i)
});
assert_eq!(slot, slot);
assert_eq!(account.lamports(), account_bal);
i += 1;
}
})
.unwrap()
})
.collect();
for t in thread_hdls {
t.join().unwrap();
}
}
#[test]
fn test_accountsdb_scan_accounts() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let key0 = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key0, &account0)]);
let key1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(2, 0, &key);
db.store_uncached(1, &[(&key1, &account1)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let accounts: Vec<AccountSharedData> = db.unchecked_scan_accounts(
"",
&ancestors,
|accounts: &mut Vec<AccountSharedData>, option| {
accounts.push(option.1.take_account());
},
);
assert_eq!(accounts, vec![account0]);
let ancestors = vec![(1, 1), (0, 0)].into_iter().collect();
let accounts: Vec<AccountSharedData> = db.unchecked_scan_accounts(
"",
&ancestors,
|accounts: &mut Vec<AccountSharedData>, option| {
accounts.push(option.1.take_account());
},
);
assert_eq!(accounts.len(), 2);
}
#[test]
fn test_cleanup_key_not_removed() {
solana_logger::setup();
let db = AccountsDb::new_single();
let key = Pubkey::default();
let key0 = solana_sdk::pubkey::new_rand();
let account0 = AccountSharedData::new(1, 0, &key);
db.store_uncached(0, &[(&key0, &account0)]);
let key1 = solana_sdk::pubkey::new_rand();
let account1 = AccountSharedData::new(2, 0, &key);
db.store_uncached(1, &[(&key1, &account1)]);
db.print_accounts_stats("pre");
let slots: HashSet<Slot> = vec![1].into_iter().collect();
let purge_keys = vec![(key1, slots)];
db.purge_keys_exact(purge_keys.iter());
let account2 = AccountSharedData::new(3, 0, &key);
db.store_uncached(2, &[(&key1, &account2)]);
db.print_accounts_stats("post");
let ancestors = vec![(2, 0)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key1)
.unwrap()
.0
.lamports(),
3
);
}
#[test]
fn test_store_large_account() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let data_len = DEFAULT_FILE_SIZE as usize + 7;
let account = AccountSharedData::new(1, data_len, &key);
db.store_uncached(0, &[(&key, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
let ret = db.load_without_fixed_root(&ancestors, &key).unwrap();
assert_eq!(ret.0.data().len(), data_len);
}
#[test]
fn test_hash_frozen_account_data() {
let account = AccountSharedData::new(1, 42, &Pubkey::default());
let hash = AccountsDb::hash_frozen_account_data(&account);
assert_ne!(hash, Hash::default()); // Better not be the default Hash
// Lamports changes to not affect the hash
let mut account_modified = account.clone();
account_modified.checked_sub_lamports(1).unwrap();
assert_eq!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Rent epoch may changes to not affect the hash
let mut account_modified = account.clone();
account_modified.set_rent_epoch(account_modified.rent_epoch() + 1);
assert_eq!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Account data may not be modified
let mut account_modified = account.clone();
account_modified.data_as_mut_slice()[0] = 42;
assert_ne!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Owner may not be modified
let mut account_modified = account.clone();
account_modified
.set_owner(Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap());
assert_ne!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
// Executable may not be modified
let mut account_modified = account;
account_modified.set_executable(true);
assert_ne!(
hash,
AccountsDb::hash_frozen_account_data(&account_modified)
);
}
#[test]
fn test_frozen_account_lamport_increase() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut account = AccountSharedData::new(1, 42, &frozen_pubkey);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
// Store with no account changes is ok
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
// Store with an increase in lamports is ok
account.set_lamports(2);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
// Store with an decrease that does not go below the frozen amount of lamports is tolerated
account.set_lamports(1);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
// A store of any value over the frozen value of '1' across different slots is also ok
account.set_lamports(3);
db.store_uncached(1, &[(&frozen_pubkey, &account)]);
account.set_lamports(2);
db.store_uncached(2, &[(&frozen_pubkey, &account)]);
account.set_lamports(1);
db.store_uncached(3, &[(&frozen_pubkey, &account)]);
}
#[test]
#[should_panic(
expected = "Frozen account My11111111111111111111111111111111111111111 modified. Lamports decreased from 1 to 0"
)]
fn test_frozen_account_lamport_decrease() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut account = AccountSharedData::new(1, 42, &frozen_pubkey);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
// Store with a decrease below the frozen amount of lamports is not ok
account.checked_sub_lamports(1).unwrap();
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
}
#[test]
#[should_panic(
expected = "Unable to freeze an account that does not exist: My11111111111111111111111111111111111111111"
)]
fn test_frozen_account_nonexistent() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
}
#[test]
#[should_panic(
expected = "Frozen account My11111111111111111111111111111111111111111 modified. Hash changed from 8wHcxDkjiwdrkPAsDnmNrF1UDGJFAtZzPQBSVweY3yRA to JdscGYB1uczVssmYuJusDD1Bfe6wpNeeho8XjcH8inN"
)]
fn test_frozen_account_data_modified() {
let frozen_pubkey =
Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let mut account = AccountSharedData::new(1, 42, &frozen_pubkey);
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
let ancestors = vec![(0, 0)].into_iter().collect();
db.freeze_accounts(&ancestors, &[frozen_pubkey]);
account.data_as_mut_slice()[0] = 42;
db.store_uncached(0, &[(&frozen_pubkey, &account)]);
}
#[test]
fn test_stored_readable_account() {
let lamports = 1;
let owner = Pubkey::new_unique();
let executable = true;
let rent_epoch = 2;
let meta = StoredMeta {
write_version: 5,
pubkey: Pubkey::new_unique(),
data_len: 7,
};
let account_meta = AccountMeta {
lamports,
owner,
executable,
rent_epoch,
};
let data = Vec::new();
let account = Account {
lamports,
owner,
executable,
rent_epoch,
data: data.clone(),
};
let offset = 99;
let stored_size = 101;
let hash = Hash::new_unique();
let stored_account = StoredAccountMeta {
meta: &meta,
account_meta: &account_meta,
data: &data,
offset,
stored_size,
hash: &hash,
};
assert!(accounts_equal(&account, &stored_account));
}
#[test]
fn test_hash_stored_account() {
// This test uses some UNSAFE trick to detect most of account's field
// addition and deletion without changing the hash code
const ACCOUNT_DATA_LEN: usize = 3;
// the type of InputTuple elements must not contain references;
// they should be simple scalars or data blobs
type InputTuple = (
Slot,
StoredMeta,
AccountMeta,
[u8; ACCOUNT_DATA_LEN],
usize, // for StoredAccountMeta::offset
Hash,
);
const INPUT_LEN: usize = std::mem::size_of::<InputTuple>();
type InputBlob = [u8; INPUT_LEN];
let mut blob: InputBlob = [0u8; INPUT_LEN];
// spray memory with decreasing counts so that, data layout can be detected.
for (i, byte) in blob.iter_mut().enumerate() {
*byte = (INPUT_LEN - i) as u8;
}
//UNSAFE: forcibly cast the special byte pattern to actual account fields.
let (slot, meta, account_meta, data, offset, hash): InputTuple =
unsafe { std::mem::transmute::<InputBlob, InputTuple>(blob) };
let stored_account = StoredAccountMeta {
meta: &meta,
account_meta: &account_meta,
data: &data,
offset,
stored_size: CACHE_VIRTUAL_STORED_SIZE,
hash: &hash,
};
let account = stored_account.clone_account();
let expected_account_hash = if cfg!(debug_assertions) {
Hash::from_str("4StuvYHFd7xuShVXB94uHHvpqGMCaacdZnYB74QQkPA1").unwrap()
} else {
Hash::from_str("33ruy7m3Xto7irYfsBSN74aAzQwCQxsfoZxXuZy2Rra3").unwrap()
};
assert_eq!(
AccountsDb::hash_stored_account(slot, &stored_account),
expected_account_hash,
"StoredAccountMeta's data layout might be changed; update hashing if needed."
);
assert_eq!(
AccountsDb::hash_account(slot, &account, &stored_account.meta.pubkey),
expected_account_hash,
"Account-based hashing must be consistent with StoredAccountMeta-based one."
);
}
#[test]
fn test_bank_hash_stats() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let some_data_len = 5;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
let mut account = db.load_without_fixed_root(&ancestors, &key).unwrap().0;
account.checked_sub_lamports(1).unwrap();
account.set_executable(true);
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
let bank_hashes = db.bank_hashes.read().unwrap();
let bank_hash = bank_hashes.get(&some_slot).unwrap();
assert_eq!(bank_hash.stats.num_updated_accounts, 1);
assert_eq!(bank_hash.stats.num_removed_accounts, 1);
assert_eq!(bank_hash.stats.num_lamports_stored, 1);
assert_eq!(bank_hash.stats.total_data_len, 2 * some_data_len as u64);
assert_eq!(bank_hash.stats.num_executable_accounts, 1);
}
#[test]
fn test_calculate_accounts_hash_check_hash_mismatch() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
// put wrong hash value in store so we get a mismatch
db.store_accounts_unfrozen(
some_slot,
&[(&key, &account)],
Some(&[&Hash::default()]),
false,
);
db.add_root(some_slot);
let check_hash = true;
assert!(db
.calculate_accounts_hash_helper(false, some_slot, &ancestors, check_hash, false)
.is_err());
assert!(db
.calculate_accounts_hash_helper(true, some_slot, &ancestors, check_hash, false)
.is_err());
}
#[test]
fn test_calculate_accounts_hash_check_hash() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
let check_hash = true;
assert_eq!(
db.calculate_accounts_hash_helper(false, some_slot, &ancestors, check_hash, false)
.unwrap(),
db.calculate_accounts_hash_helper(true, some_slot, &ancestors, check_hash, false)
.unwrap(),
);
}
#[test]
fn test_verify_bank_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Ok(_)
);
db.bank_hashes.write().unwrap().remove(&some_slot).unwrap();
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MissingBankHash)
);
let some_bank_hash = Hash::new(&[0xca; HASH_BYTES]);
let bank_hash_info = BankHashInfo {
hash: some_bank_hash,
snapshot_hash: Hash::new(&[0xca; HASH_BYTES]),
stats: BankHashStats::default(),
};
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, bank_hash_info);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MismatchedBankHash)
);
}
#[test]
fn test_verify_bank_capitalization() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = solana_sdk::pubkey::new_rand();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.store_uncached(some_slot, &[(&key, &account)]);
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Ok(_)
);
let native_account_pubkey = solana_sdk::pubkey::new_rand();
db.store_uncached(
some_slot,
&[(
&native_account_pubkey,
&solana_sdk::native_loader::create_loadable_account_for_test("foo"),
)],
);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 2, true),
Ok(_)
);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 10, true),
Err(MismatchedTotalLamports(expected, actual)) if expected == 2 && actual == 10
);
}
#[test]
fn test_verify_bank_hash_no_account() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let some_slot: Slot = 0;
let ancestors = vec![(some_slot, 0)].into_iter().collect();
db.bank_hashes
.write()
.unwrap()
.insert(some_slot, BankHashInfo::default());
db.add_root(some_slot);
db.update_accounts_hash_test(some_slot, &ancestors);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 0, true),
Ok(_)
);
}
#[test]
fn test_verify_bank_hash_bad_account_hash() {
use BankHashVerificationError::*;
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let some_data_len = 0;
let some_slot: Slot = 0;
let account = AccountSharedData::new(1, some_data_len, &key);
let ancestors = vec![(some_slot, 0)].into_iter().collect();
let accounts = &[(&key, &account)];
// update AccountsDb's bank hash
{
let mut bank_hashes = db.bank_hashes.write().unwrap();
bank_hashes
.entry(some_slot)
.or_insert_with(BankHashInfo::default);
}
// provide bogus account hashes
let some_hash = Hash::new(&[0xca; HASH_BYTES]);
db.store_accounts_unfrozen(some_slot, accounts, Some(&[&some_hash]), false);
db.add_root(some_slot);
assert_matches!(
db.verify_bank_hash_and_lamports(some_slot, &ancestors, 1, true),
Err(MismatchedAccountHash)
);
}
#[test]
fn test_storage_finder() {
solana_logger::setup();
let db = AccountsDb::new_sized(Vec::new(), 16 * 1024);
let key = solana_sdk::pubkey::new_rand();
let lamports = 100;
let data_len = 8190;
let account = AccountSharedData::new(lamports, data_len, &solana_sdk::pubkey::new_rand());
// pre-populate with a smaller empty store
db.create_and_insert_store(1, 8192, "test_storage_finder");
db.store_uncached(1, &[(&key, &account)]);
}
#[test]
fn test_get_snapshot_storages_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
assert!(db.get_snapshot_storages(0, None).0.is_empty());
}
#[test]
fn test_get_snapshot_storages_only_older_than_or_equal_to_snapshot_slot() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let before_slot = 0;
let base_slot = before_slot + 1;
let after_slot = base_slot + 1;
db.add_root(base_slot);
db.store_uncached(base_slot, &[(&key, &account)]);
assert!(db.get_snapshot_storages(before_slot, None).0.is_empty());
assert_eq!(1, db.get_snapshot_storages(base_slot, None).0.len());
assert_eq!(1, db.get_snapshot_storages(after_slot, None).0.len());
}
#[test]
fn test_get_snapshot_storages_only_non_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
db.storage
.get_slot_stores(base_slot)
.unwrap()
.write()
.unwrap()
.clear();
db.add_root(base_slot);
assert!(db.get_snapshot_storages(after_slot, None).0.is_empty());
db.store_uncached(base_slot, &[(&key, &account)]);
assert_eq!(1, db.get_snapshot_storages(after_slot, None).0.len());
}
#[test]
fn test_get_snapshot_storages_only_roots() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
assert!(db.get_snapshot_storages(after_slot, None).0.is_empty());
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot, None).0.len());
}
#[test]
fn test_get_snapshot_storages_exclude_empty() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let key = Pubkey::default();
let account = AccountSharedData::new(1, 0, &key);
let base_slot = 0;
let after_slot = base_slot + 1;
db.store_uncached(base_slot, &[(&key, &account)]);
db.add_root(base_slot);
assert_eq!(1, db.get_snapshot_storages(after_slot, None).0.len());
db.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.values()
.next()
.unwrap()
.remove_account(0, true);
assert!(db.get_snapshot_storages(after_slot, None).0.is_empty());
}
#[test]
#[should_panic(expected = "double remove of account in slot: 0/store: 0!!")]
fn test_storage_remove_account_double_remove() {
let accounts = AccountsDb::new(Vec::new(), &ClusterType::Development);
let pubkey = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
accounts.store_uncached(0, &[(&pubkey, &account)]);
let storage_entry = accounts
.storage
.get_slot_stores(0)
.unwrap()
.read()
.unwrap()
.values()
.next()
.unwrap()
.clone();
storage_entry.remove_account(0, true);
storage_entry.remove_account(0, true);
}
#[test]
fn test_accounts_purge_long_chained_after_snapshot_restore() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(old_lamport, no_data, &owner);
let account2 = AccountSharedData::new(old_lamport + 100_001, no_data, &owner);
let account3 = AccountSharedData::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = AccountSharedData::new(99_999_999, no_data, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let pubkey = solana_sdk::pubkey::new_rand();
let dummy_pubkey = solana_sdk::pubkey::new_rand();
let purged_pubkey1 = solana_sdk::pubkey::new_rand();
let purged_pubkey2 = solana_sdk::pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDb::new_single();
// create intermediate updates to purged_pubkey1 so that
// generate_index must add slots as root last at once
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey, &account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &account2)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &account3)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&purged_pubkey2, &zero_lamport_account)]);
accounts.add_root(current_slot);
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.add_root(current_slot);
accounts.print_count_and_status("before reconstruct");
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.print_count_and_status("before purge zero");
accounts.clean_accounts(None, false);
accounts.print_count_and_status("after purge zero");
assert_load_account(&accounts, current_slot, pubkey, old_lamport);
assert_load_account(&accounts, current_slot, purged_pubkey1, 0);
assert_load_account(&accounts, current_slot, purged_pubkey2, 0);
}
fn do_full_clean_refcount(store1_first: bool, store_size: u64) {
let pubkey1 = Pubkey::from_str("My11111111111111111111111111111111111111111").unwrap();
let pubkey2 = Pubkey::from_str("My22211111111111111111111111111111111111111").unwrap();
let pubkey3 = Pubkey::from_str("My33311111111111111111111111111111111111111").unwrap();
let old_lamport = 223;
let zero_lamport = 0;
let dummy_lamport = 999_999;
// size data so only 1 fits in a 4k store
let data_size = 2200;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(old_lamport, data_size, &owner);
let account2 = AccountSharedData::new(old_lamport + 100_001, data_size, &owner);
let account3 = AccountSharedData::new(old_lamport + 100_002, data_size, &owner);
let account4 = AccountSharedData::new(dummy_lamport, data_size, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, data_size, &owner);
let mut current_slot = 0;
let accounts = AccountsDb::new_sized_no_extra_stores(Vec::new(), store_size);
// A: Initialize AccountsDb with pubkey1 and pubkey2
current_slot += 1;
if store1_first {
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
} else {
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
info!("post A");
accounts.print_accounts_stats("Post-A");
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
// Stores to same pubkey, same slot only count once towards the
// ref count
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.print_accounts_stats("Post-B pre-clean");
accounts.clean_accounts(None, false);
info!("post B");
accounts.print_accounts_stats("Post-B");
// C: more updates to trigger clean of previous updates
current_slot += 1;
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account3)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account3)]);
accounts.store_uncached(current_slot, &[(&pubkey3, &account4)]);
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
info!("post C");
accounts.print_accounts_stats("Post-C");
// D: Make all keys 0-lamport, cleans all keys
current_slot += 1;
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &zero_lamport_account)]);
accounts.store_uncached(current_slot, &[(&pubkey3, &zero_lamport_account)]);
let snapshot_stores = accounts.get_snapshot_storages(current_slot, None).0;
let total_accounts: usize = snapshot_stores
.iter()
.flatten()
.map(|s| s.all_accounts().len())
.sum();
assert!(!snapshot_stores.is_empty());
assert!(total_accounts > 0);
info!("post D");
accounts.print_accounts_stats("Post-D");
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false);
accounts.print_accounts_stats("Post-D clean");
let total_accounts_post_clean: usize = snapshot_stores
.iter()
.flatten()
.map(|s| s.all_accounts().len())
.sum();
assert_eq!(total_accounts, total_accounts_post_clean);
// should clean all 3 pubkeys
assert_eq!(accounts.ref_count_for_pubkey(&pubkey1), 0);
assert_eq!(accounts.ref_count_for_pubkey(&pubkey2), 0);
assert_eq!(accounts.ref_count_for_pubkey(&pubkey3), 0);
}
#[test]
fn test_full_clean_refcount() {
solana_logger::setup();
// Setup 3 scenarios which try to differentiate between pubkey1 being in an
// Available slot or a Full slot which would cause a different reset behavior
// when pubkey1 is cleaned and therefor cause the ref count to be incorrect
// preventing a removal of that key.
//
// do stores with a 4mb size so only 1 store is created per slot
do_full_clean_refcount(false, 4 * 1024 * 1024);
// do stores with a 4k size and store pubkey1 first
do_full_clean_refcount(false, 4096);
// do stores with a 4k size and store pubkey1 2nd
do_full_clean_refcount(true, 4096);
}
#[test]
fn test_accounts_clean_after_snapshot_restore_then_old_revives() {
solana_logger::setup();
let old_lamport = 223;
let zero_lamport = 0;
let no_data = 0;
let dummy_lamport = 999_999;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(old_lamport, no_data, &owner);
let account2 = AccountSharedData::new(old_lamport + 100_001, no_data, &owner);
let account3 = AccountSharedData::new(old_lamport + 100_002, no_data, &owner);
let dummy_account = AccountSharedData::new(dummy_lamport, no_data, &owner);
let zero_lamport_account = AccountSharedData::new(zero_lamport, no_data, &owner);
let pubkey1 = solana_sdk::pubkey::new_rand();
let pubkey2 = solana_sdk::pubkey::new_rand();
let dummy_pubkey = solana_sdk::pubkey::new_rand();
let mut current_slot = 0;
let accounts = AccountsDb::new_single();
// A: Initialize AccountsDb with pubkey1 and pubkey2
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey1, &account)]);
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// B: Test multiple updates to pubkey1 in a single slot/storage
current_slot += 1;
assert_eq!(0, accounts.alive_account_count_in_slot(current_slot));
assert_eq!(1, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
accounts.store_uncached(current_slot, &[(&pubkey1, &account2)]);
assert_eq!(1, accounts.alive_account_count_in_slot(current_slot));
// Stores to same pubkey, same slot only count once towards the
// ref count
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// C: Yet more update to trigger lazy clean of step A
current_slot += 1;
assert_eq!(2, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &account3)]);
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// D: Make pubkey1 0-lamport; also triggers clean of step B
current_slot += 1;
assert_eq!(3, accounts.ref_count_for_pubkey(&pubkey1));
accounts.store_uncached(current_slot, &[(&pubkey1, &zero_lamport_account)]);
accounts.clean_accounts(None, false);
assert_eq!(
// Removed one reference from the dead slot (reference only counted once
// even though there were two stores to the pubkey in that slot)
3, /* == 3 - 1 + 1 */
accounts.ref_count_for_pubkey(&pubkey1)
);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// E: Avoid missing bank hash error
current_slot += 1;
accounts.store_uncached(current_slot, &[(&dummy_pubkey, &dummy_account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// At this point, there is no index entries for A and B
// If step C and step D should be purged, snapshot restore would cause
// pubkey1 to be revived as the state of step A.
// So, prevent that from happening by introducing refcount
accounts.clean_accounts(None, false);
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts.clean_accounts(None, false);
info!("pubkey: {}", pubkey1);
accounts.print_accounts_stats("pre_clean");
assert_load_account(&accounts, current_slot, pubkey1, zero_lamport);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
// F: Finally, make Step A cleanable
current_slot += 1;
accounts.store_uncached(current_slot, &[(&pubkey2, &account)]);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
// Do clean
accounts.clean_accounts(None, false);
// 2nd clean needed to clean-up pubkey1
accounts.clean_accounts(None, false);
// Ensure pubkey2 is cleaned from the index finally
assert_not_load_account(&accounts, current_slot, pubkey1);
assert_load_account(&accounts, current_slot, pubkey2, old_lamport);
assert_load_account(&accounts, current_slot, dummy_pubkey, dummy_lamport);
}
#[test]
fn test_clean_stored_dead_slots_empty() {
let accounts = AccountsDb::new_single();
let mut dead_slots = HashSet::new();
dead_slots.insert(10);
accounts.clean_stored_dead_slots(&dead_slots, None);
}
#[test]
fn test_shrink_all_slots_none() {
for startup in &[false, true] {
let accounts = AccountsDb::new_single();
for _ in 0..10 {
accounts.shrink_candidate_slots();
}
accounts.shrink_all_slots(*startup);
}
}
#[test]
fn test_shrink_next_slots() {
let mut accounts = AccountsDb::new_single();
accounts.caching_enabled = false;
let mut current_slot = 7;
assert_eq!(
vec![None, None, None],
(0..3)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>()
);
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
assert_eq!(
vec![Some(7), Some(7), Some(7)],
(0..3)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>()
);
current_slot += 1;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
let slots = (0..6)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>();
// Because the origin of this data is HashMap (not BTreeMap), key order is arbitrary per cycle.
assert!(
vec![Some(7), Some(8), Some(7), Some(8), Some(7), Some(8)] == slots
|| vec![Some(8), Some(7), Some(8), Some(7), Some(8), Some(7)] == slots
);
}
#[test]
fn test_shrink_reset_uncleaned_roots() {
let mut accounts = AccountsDb::new_single();
accounts.caching_enabled = false;
accounts.reset_uncleaned_roots_v1();
assert_eq!(
*accounts.shrink_candidate_slots_v1.lock().unwrap(),
vec![] as Vec<Slot>
);
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.get_accounts_delta_hash(2);
accounts.add_root(2);
accounts.reset_uncleaned_roots_v1();
let actual_slots = accounts.shrink_candidate_slots_v1.lock().unwrap().clone();
assert_eq!(actual_slots, vec![] as Vec<Slot>);
accounts.reset_uncleaned_roots_v1();
let mut actual_slots = accounts.shrink_candidate_slots_v1.lock().unwrap().clone();
actual_slots.sort_unstable();
assert_eq!(actual_slots, vec![0, 1, 2]);
accounts.accounts_index.clear_roots();
let mut actual_slots = (0..5)
.map(|_| accounts.next_shrink_slot_v1())
.collect::<Vec<_>>();
actual_slots.sort();
assert_eq!(actual_slots, vec![None, None, Some(0), Some(1), Some(2)],);
}
#[test]
fn test_shrink_stale_slots_processed() {
solana_logger::setup();
for startup in &[false, true] {
let accounts = AccountsDb::new_single();
let pubkey_count = 100;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 10;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
accounts.shrink_all_slots(*startup);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
let no_ancestors = Ancestors::default();
accounts.update_accounts_hash(current_slot, &no_ancestors);
accounts
.verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300, true)
.unwrap();
let accounts = reconstruct_accounts_db_via_serialization(&accounts, current_slot);
accounts
.verify_bank_hash_and_lamports(current_slot, &no_ancestors, 22300, true)
.unwrap();
// repeating should be no-op
accounts.shrink_all_slots(*startup);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
}
#[test]
fn test_shrink_candidate_slots() {
solana_logger::setup();
let accounts = AccountsDb::new_single();
let pubkey_count = 30000;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 25000;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Only, try to shrink stale slots, nothing happens because 90/100
// is not small enough to do a shrink
accounts.shrink_candidate_slots();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Now, do full-shrink.
accounts.shrink_all_slots(false);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_select_candidates_by_total_usage() {
solana_logger::setup();
// case 1: no candidates
let accounts = AccountsDb::new_single();
let mut candidates: ShrinkCandidates = HashMap::new();
let output_candidates =
accounts.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO);
assert_eq!(0, output_candidates.len());
// case 2: two candidates, only one selected
let dummy_path = Path::new("");
let dummy_slot = 12;
let dummy_size = 2 * PAGE_SIZE;
let dummy_id1 = 22;
let entry1 = Arc::new(AccountStorageEntry::new(
dummy_path, dummy_slot, dummy_id1, dummy_size,
));
entry1.alive_bytes.store(8000, Ordering::Relaxed);
candidates
.entry(dummy_slot)
.or_default()
.insert(entry1.append_vec_id(), entry1.clone());
let dummy_id2 = 44;
let entry2 = Arc::new(AccountStorageEntry::new(
dummy_path, dummy_slot, dummy_id2, dummy_size,
));
entry2.alive_bytes.store(3000, Ordering::Relaxed);
candidates
.entry(dummy_slot)
.or_default()
.insert(entry2.append_vec_id(), entry2.clone());
let output_candidates =
accounts.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO);
assert_eq!(1, output_candidates.len());
assert_eq!(1, output_candidates[&dummy_slot].len());
assert!(output_candidates[&dummy_slot].contains(&entry2.append_vec_id()));
// case 3: two candidates, both are selected
candidates.clear();
let dummy_size = 4 * PAGE_SIZE;
let dummy_id1 = 22;
let entry1 = Arc::new(AccountStorageEntry::new(
dummy_path, dummy_slot, dummy_id1, dummy_size,
));
entry1.alive_bytes.store(3500, Ordering::Relaxed);
candidates
.entry(dummy_slot)
.or_default()
.insert(entry1.append_vec_id(), entry1.clone());
let dummy_id2 = 44;
let dummy_slot2 = 44;
let entry2 = Arc::new(AccountStorageEntry::new(
dummy_path,
dummy_slot2,
dummy_id2,
dummy_size,
));
entry2.alive_bytes.store(3000, Ordering::Relaxed);
candidates
.entry(dummy_slot2)
.or_default()
.insert(entry2.append_vec_id(), entry2.clone());
let output_candidates =
accounts.select_candidates_by_total_usage(&candidates, DEFAULT_ACCOUNTS_SHRINK_RATIO);
assert_eq!(2, output_candidates.len());
assert_eq!(1, output_candidates[&dummy_slot].len());
assert_eq!(1, output_candidates[&dummy_slot2].len());
assert!(output_candidates[&dummy_slot].contains(&entry1.append_vec_id()));
assert!(output_candidates[&dummy_slot2].contains(&entry2.append_vec_id()));
}
#[test]
fn test_shrink_stale_slots_skipped() {
solana_logger::setup();
let mut accounts = AccountsDb::new_single();
accounts.caching_enabled = false;
let pubkey_count = 30000;
let pubkeys: Vec<_> = (0..pubkey_count)
.map(|_| solana_sdk::pubkey::new_rand())
.collect();
let some_lamport = 223;
let no_data = 0;
let owner = *AccountSharedData::default().owner();
let account = AccountSharedData::new(some_lamport, no_data, &owner);
let mut current_slot = 0;
current_slot += 1;
for pubkey in &pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
let shrink_slot = current_slot;
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
current_slot += 1;
let pubkey_count_after_shrink = 25000;
let updated_pubkeys = &pubkeys[0..pubkey_count - pubkey_count_after_shrink];
for pubkey in updated_pubkeys {
accounts.store_uncached(current_slot, &[(pubkey, &account)]);
}
accounts.get_accounts_delta_hash(current_slot);
accounts.add_root(current_slot);
accounts.clean_accounts(None, false);
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Only, try to shrink stale slots.
accounts.shrink_all_stale_slots_v1();
assert_eq!(
pubkey_count,
accounts.all_account_count_in_append_vec(shrink_slot)
);
// Now, do full-shrink.
accounts.shrink_all_slots(false);
assert_eq!(
pubkey_count_after_shrink,
accounts.all_account_count_in_append_vec(shrink_slot)
);
}
#[test]
fn test_delete_dependencies() {
solana_logger::setup();
let accounts_index = AccountsIndex::default();
let key0 = Pubkey::new_from_array([0u8; 32]);
let key1 = Pubkey::new_from_array([1u8; 32]);
let key2 = Pubkey::new_from_array([2u8; 32]);
let info0 = AccountInfo {
store_id: 0,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info1 = AccountInfo {
store_id: 1,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info2 = AccountInfo {
store_id: 2,
offset: 0,
stored_size: 0,
lamports: 0,
};
let info3 = AccountInfo {
store_id: 3,
offset: 0,
stored_size: 0,
lamports: 0,
};
let mut reclaims = vec![];
accounts_index.upsert(
0,
&key0,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info0,
&mut reclaims,
);
accounts_index.upsert(
1,
&key0,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info1.clone(),
&mut reclaims,
);
accounts_index.upsert(
1,
&key1,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info1,
&mut reclaims,
);
accounts_index.upsert(
2,
&key1,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info2.clone(),
&mut reclaims,
);
accounts_index.upsert(
2,
&key2,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info2,
&mut reclaims,
);
accounts_index.upsert(
3,
&key2,
&Pubkey::default(),
&[],
&AccountSecondaryIndexes::default(),
info3,
&mut reclaims,
);
accounts_index.add_root(0, false);
accounts_index.add_root(1, false);
accounts_index.add_root(2, false);
accounts_index.add_root(3, false);
let mut purges = HashMap::new();
let (key0_entry, _) = accounts_index.get(&key0, None, None).unwrap();
purges.insert(key0, accounts_index.roots_and_ref_count(&key0_entry, None));
let (key1_entry, _) = accounts_index.get(&key1, None, None).unwrap();
purges.insert(key1, accounts_index.roots_and_ref_count(&key1_entry, None));
let (key2_entry, _) = accounts_index.get(&key2, None, None).unwrap();
purges.insert(key2, accounts_index.roots_and_ref_count(&key2_entry, None));
for (key, (list, ref_count)) in &purges {
info!(" purge {} ref_count {} =>", key, ref_count);
for x in list {
info!(" {:?}", x);
}
}
let mut store_counts = HashMap::new();
store_counts.insert(0, (0, HashSet::from_iter(vec![key0])));
store_counts.insert(1, (0, HashSet::from_iter(vec![key0, key1])));
store_counts.insert(2, (0, HashSet::from_iter(vec![key1, key2])));
store_counts.insert(3, (1, HashSet::from_iter(vec![key2])));
AccountsDb::calc_delete_dependencies(&purges, &mut store_counts);
let mut stores: Vec<_> = store_counts.keys().cloned().collect();
stores.sort_unstable();
for store in &stores {
info!(
"store: {:?} : {:?}",
store,
store_counts.get(store).unwrap()
);
}
for x in 0..3 {
assert!(store_counts[&x].0 >= 1);
}
}
#[test]
fn test_account_balance_for_capitalization_sysvar() {
let normal_sysvar = solana_sdk::account::create_account_for_test(
&solana_sdk::slot_history::SlotHistory::default(),
);
assert_eq!(normal_sysvar.lamports(), 1);
}
#[test]
fn test_account_balance_for_capitalization_native_program() {
let normal_native_program =
solana_sdk::native_loader::create_loadable_account_for_test("foo");
assert_eq!(normal_native_program.lamports(), 1);
}
#[test]
fn test_checked_sum_for_capitalization_normal() {
assert_eq!(
AccountsDb::checked_sum_for_capitalization(vec![1, 2].into_iter()),
3
);
}
#[test]
#[should_panic(expected = "overflow is detected while summing capitalization")]
fn test_checked_sum_for_capitalization_overflow() {
assert_eq!(
AccountsDb::checked_sum_for_capitalization(vec![1, u64::max_value()].into_iter()),
3
);
}
#[test]
fn test_store_overhead() {
solana_logger::setup();
let accounts = AccountsDb::new_single();
let account = AccountSharedData::default();
let pubkey = solana_sdk::pubkey::new_rand();
accounts.store_uncached(0, &[(&pubkey, &account)]);
let slot_stores = accounts.storage.get_slot_stores(0).unwrap();
let mut total_len = 0;
for (_id, store) in slot_stores.read().unwrap().iter() {
total_len += store.accounts.len();
}
info!("total: {}", total_len);
assert!(total_len < STORE_META_OVERHEAD);
}
#[test]
fn test_store_clean_after_shrink() {
solana_logger::setup();
let accounts = AccountsDb::new_with_config(
vec![],
&ClusterType::Development,
AccountSecondaryIndexes::default(),
true,
AccountShrinkThreshold::default(),
);
let account = AccountSharedData::new(1, 16 * 4096, &Pubkey::default());
let pubkey1 = solana_sdk::pubkey::new_rand();
accounts.store_cached(0, &[(&pubkey1, &account)]);
let pubkey2 = solana_sdk::pubkey::new_rand();
accounts.store_cached(0, &[(&pubkey2, &account)]);
let zero_account = AccountSharedData::new(0, 1, &Pubkey::default());
accounts.store_cached(1, &[(&pubkey1, &zero_account)]);
// Add root 0 and flush separately
accounts.get_accounts_delta_hash(0);
accounts.add_root(0);
accounts.flush_accounts_cache(true, None);
// clear out the dirty keys
accounts.clean_accounts(None, false);
// flush 1
accounts.get_accounts_delta_hash(1);
accounts.add_root(1);
accounts.flush_accounts_cache(true, None);
accounts.print_accounts_stats("pre-clean");
// clean to remove pubkey1 from 0,
// shrink to shrink pubkey1 from 0
// then another clean to remove pubkey1 from slot 1
accounts.clean_accounts(None, false);
accounts.shrink_candidate_slots();
accounts.clean_accounts(None, false);
accounts.print_accounts_stats("post-clean");
assert_eq!(accounts.accounts_index.ref_count_from_storage(&pubkey1), 0);
}
#[test]
fn test_store_reuse() {
solana_logger::setup();
let accounts = AccountsDb::new_sized(vec![], 4096);
let size = 100;
let num_accounts: usize = 100;
let mut keys = Vec::new();
for i in 0..num_accounts {
let account = AccountSharedData::new((i + 1) as u64, size, &Pubkey::default());
let pubkey = solana_sdk::pubkey::new_rand();
accounts.store_uncached(0, &[(&pubkey, &account)]);
keys.push(pubkey);
}
accounts.add_root(0);
for (i, key) in keys[1..].iter().enumerate() {
let account =
AccountSharedData::new((1 + i + num_accounts) as u64, size, &Pubkey::default());
accounts.store_uncached(1, &[(key, &account)]);
}
accounts.add_root(1);
accounts.clean_accounts(None, false);
accounts.shrink_all_slots(false);
// Clean again to flush the dirty stores
// and allow them to be recycled in the next step
accounts.clean_accounts(None, false);
accounts.print_accounts_stats("post-shrink");
let num_stores = accounts.recycle_stores.read().unwrap().entry_count();
assert!(num_stores > 0);
let mut account_refs = Vec::new();
let num_to_store = 20;
for (i, key) in keys[..num_to_store].iter().enumerate() {
let account = AccountSharedData::new(
(1 + i + 2 * num_accounts) as u64,
i + 20,
&Pubkey::default(),
);
accounts.store_uncached(2, &[(key, &account)]);
account_refs.push(account);
}
assert!(accounts.recycle_stores.read().unwrap().entry_count() < num_stores);
accounts.print_accounts_stats("post-store");
let mut ancestors = Ancestors::default();
ancestors.insert(1, 0);
ancestors.insert(2, 1);
for (key, account_ref) in keys[..num_to_store].iter().zip(account_refs) {
assert_eq!(
accounts.load_without_fixed_root(&ancestors, key).unwrap().0,
account_ref
);
}
}
#[test]
fn test_zero_lamport_new_root_not_cleaned() {
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account_key = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store zero lamport account into slots 0 and 1, root both slots
db.store_uncached(0, &[(&account_key, &zero_lamport_account)]);
db.store_uncached(1, &[(&account_key, &zero_lamport_account)]);
db.get_accounts_delta_hash(0);
db.add_root(0);
db.get_accounts_delta_hash(1);
db.add_root(1);
// Only clean zero lamport accounts up to slot 0
db.clean_accounts(Some(0), false);
// Should still be able to find zero lamport account in slot 1
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &account_key),
Some((zero_lamport_account, 1))
);
}
#[test]
fn test_store_load_cached() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let slot = 0;
db.store_cached(slot, &[(&key, &account0)]);
// Load with no ancestors and no root will return nothing
assert!(db
.load_without_fixed_root(&Ancestors::default(), &key)
.is_none());
// Load with ancestors not equal to `slot` will return nothing
let ancestors = vec![(slot + 1, 1)].into_iter().collect();
assert!(db.load_without_fixed_root(&ancestors, &key).is_none());
// Load with ancestors equal to `slot` will return the account
let ancestors = vec![(slot, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0.clone(), slot))
);
// Adding root will return the account even without ancestors
db.add_root(slot);
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key),
Some((account0, slot))
);
}
#[test]
fn test_store_flush_load_cached() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let key = Pubkey::default();
let account0 = AccountSharedData::new(1, 0, &key);
let slot = 0;
db.store_cached(slot, &[(&key, &account0)]);
db.mark_slot_frozen(slot);
// No root was added yet, requires an ancestor to find
// the account
db.flush_accounts_cache(true, None);
let ancestors = vec![(slot, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0.clone(), slot))
);
// Add root then flush
db.add_root(slot);
db.flush_accounts_cache(true, None);
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key),
Some((account0, slot))
);
}
#[test]
fn test_flush_accounts_cache() {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let unrooted_slot = 4;
let root5 = 5;
let root6 = 6;
let unrooted_key = solana_sdk::pubkey::new_rand();
let key5 = solana_sdk::pubkey::new_rand();
let key6 = solana_sdk::pubkey::new_rand();
db.store_cached(unrooted_slot, &[(&unrooted_key, &account0)]);
db.store_cached(root5, &[(&key5, &account0)]);
db.store_cached(root6, &[(&key6, &account0)]);
for slot in &[unrooted_slot, root5, root6] {
db.mark_slot_frozen(*slot);
}
db.add_root(root5);
db.add_root(root6);
// Unrooted slot should be able to be fetched before the flush
let ancestors = vec![(unrooted_slot, 1)].into_iter().collect();
assert_eq!(
db.load_without_fixed_root(&ancestors, &unrooted_key),
Some((account0.clone(), unrooted_slot))
);
db.flush_accounts_cache(true, None);
// After the flush, the unrooted slot is still in the cache
assert!(db
.load_without_fixed_root(&ancestors, &unrooted_key)
.is_some());
assert!(db
.accounts_index
.get_account_read_entry(&unrooted_key)
.is_some());
assert_eq!(db.accounts_cache.num_slots(), 1);
assert!(db.accounts_cache.slot_cache(unrooted_slot).is_some());
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key5),
Some((account0.clone(), root5))
);
assert_eq!(
db.load_without_fixed_root(&Ancestors::default(), &key6),
Some((account0, root6))
);
}
#[test]
fn test_flush_accounts_cache_if_needed() {
run_test_flush_accounts_cache_if_needed(0, 2 * MAX_CACHE_SLOTS);
run_test_flush_accounts_cache_if_needed(2 * MAX_CACHE_SLOTS, 0);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS - 1, 0);
run_test_flush_accounts_cache_if_needed(0, MAX_CACHE_SLOTS - 1);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS, 0);
run_test_flush_accounts_cache_if_needed(0, MAX_CACHE_SLOTS);
run_test_flush_accounts_cache_if_needed(2 * MAX_CACHE_SLOTS, 2 * MAX_CACHE_SLOTS);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS - 1, MAX_CACHE_SLOTS - 1);
run_test_flush_accounts_cache_if_needed(MAX_CACHE_SLOTS, MAX_CACHE_SLOTS);
}
fn run_test_flush_accounts_cache_if_needed(num_roots: usize, num_unrooted: usize) {
let mut db = AccountsDb::new(Vec::new(), &ClusterType::Development);
db.caching_enabled = true;
let account0 = AccountSharedData::new(1, 0, &Pubkey::default());
let mut keys = vec![];
let num_slots = 2 * MAX_CACHE_SLOTS;
for i in 0..num_roots + num_unrooted {
let key = Pubkey::new_unique();
db.store_cached(i as Slot, &[(&key, &account0)]);
keys.push(key);
db.mark_slot_frozen(i as Slot);
if i < num_roots {
db.add_root(i as Slot);
}
}
db.flush_accounts_cache(false, None);
let total_slots = num_roots + num_unrooted;
// If there's <= the max size, then nothing will be flushed from the slot
if total_slots <= MAX_CACHE_SLOTS {
assert_eq!(db.accounts_cache.num_slots(), total_slots);
} else {
// Otherwise, all the roots are flushed, and only at most MAX_CACHE_SLOTS
// of the unrooted slots are kept in the cache
let expected_size = std::cmp::min(num_unrooted, MAX_CACHE_SLOTS);
if expected_size > 0 {
for unrooted_slot in total_slots - expected_size..total_slots {
assert!(db
.accounts_cache
.slot_cache(unrooted_slot as Slot)
.is_some());
}
}
}
// Should still be able to fetch all the accounts after flush
for (slot, key) in (0..num_slots as Slot).zip(keys) {
let ancestors = if slot < num_roots as Slot {
Ancestors::default()
} else {
vec![(slot, 1)].into_iter().collect()
};
assert_eq!(
db.load_without_fixed_root(&ancestors, &key),
Some((account0.clone(), slot))
);
}
}
fn slot_stores(db: &AccountsDb, slot: Slot) -> Vec<Arc<AccountStorageEntry>> {
db.storage
.get_slot_storage_entries(slot)
.unwrap_or_default()
}
#[test]
fn test_read_only_accounts_cache() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let account_key = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
db.add_root(0);
db.add_root(1);
db.clean_accounts(None, false);
db.flush_accounts_cache(true, None);
db.clean_accounts(None, false);
db.add_root(2);
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
let account = db
.load_with_fixed_root(&Ancestors::default(), &account_key)
.map(|(account, _)| account)
.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
let account = db
.load_with_fixed_root(&Ancestors::default(), &account_key)
.map(|(account, _)| account)
.unwrap();
assert_eq!(account.lamports(), 1);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
db.store_cached(2, &[(&account_key, &zero_lamport_account)]);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
let account = db
.load_with_fixed_root(&Ancestors::default(), &account_key)
.map(|(account, _)| account)
.unwrap();
assert_eq!(account.lamports(), 0);
assert_eq!(db.read_only_accounts_cache.cache_len(), 1);
}
#[test]
fn test_flush_cache_clean() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let account_key = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
db.add_root(0);
db.add_root(1);
// Clean should not remove anything yet as nothing has been flushed
db.clean_accounts(None, false);
let account = db
.do_load(
&Ancestors::default(),
&account_key,
Some(0),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), 0);
// since this item is in the cache, it should not be in the read only cache
assert_eq!(db.read_only_accounts_cache.cache_len(), 0);
// Flush, then clean again. Should not need another root to initiate the cleaning
// because `accounts_index.uncleaned_roots` should be correct
db.flush_accounts_cache(true, None);
db.clean_accounts(None, false);
assert!(db
.do_load(
&Ancestors::default(),
&account_key,
Some(0),
LoadHint::Unspecified
)
.is_none());
}
#[test]
fn test_flush_cache_dont_clean_zero_lamport_account() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let zero_lamport_account_key = Pubkey::new_unique();
let other_account_key = Pubkey::new_unique();
let original_lamports = 1;
let slot0_account =
AccountSharedData::new(original_lamports, 1, AccountSharedData::default().owner());
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
// Store into slot 0, and then flush the slot to storage
db.store_cached(0, &[(&zero_lamport_account_key, &slot0_account)]);
// Second key keeps other lamport account entry for slot 0 alive,
// preventing clean of the zero_lamport_account in slot 1.
db.store_cached(0, &[(&other_account_key, &slot0_account)]);
db.add_root(0);
db.flush_accounts_cache(true, None);
assert!(!db.storage.get_slot_storage_entries(0).unwrap().is_empty());
// Store into slot 1, a dummy slot that will be dead and purged before flush
db.store_cached(1, &[(&zero_lamport_account_key, &zero_lamport_account)]);
// Store into slot 2, which makes all updates from slot 1 outdated.
// This means slot 1 is a dead slot. Later, slot 1 will be cleaned/purged
// before it even reaches storage, but this purge of slot 1should not affect
// the refcount of `zero_lamport_account_key` because cached keys do not bump
// the refcount in the index. This means clean should *not* remove
// `zero_lamport_account_key` from slot 2
db.store_cached(2, &[(&zero_lamport_account_key, &zero_lamport_account)]);
db.add_root(1);
db.add_root(2);
// Flush, then clean. Should not need another root to initiate the cleaning
// because `accounts_index.uncleaned_roots` should be correct
db.flush_accounts_cache(true, None);
db.clean_accounts(None, false);
// The `zero_lamport_account_key` is still alive in slot 1, so refcount for the
// pubkey should be 2
assert_eq!(
db.accounts_index
.ref_count_from_storage(&zero_lamport_account_key),
2
);
assert_eq!(
db.accounts_index.ref_count_from_storage(&other_account_key),
1
);
// The zero-lamport account in slot 2 should not be purged yet, because the
// entry in slot 1 is blocking cleanup of the zero-lamport account.
let max_root = None;
// Fine to simulate a transaction load since we are not doing any out of band
// removals, only using clean_accounts
let load_hint = LoadHint::FixedMaxRoot;
assert_eq!(
db.do_load(
&Ancestors::default(),
&zero_lamport_account_key,
max_root,
load_hint
)
.unwrap()
.0
.lamports(),
0
);
}
struct ScanTracker {
t_scan: JoinHandle<()>,
exit: Arc<AtomicBool>,
}
impl ScanTracker {
fn exit(self) -> thread::Result<()> {
self.exit.store(true, Ordering::Relaxed);
self.t_scan.join()
}
}
fn setup_scan(
db: Arc<AccountsDb>,
scan_ancestors: Arc<Ancestors>,
bank_id: BankId,
stall_key: Pubkey,
) -> ScanTracker {
let exit = Arc::new(AtomicBool::new(false));
let exit_ = exit.clone();
let ready = Arc::new(AtomicBool::new(false));
let ready_ = ready.clone();
let t_scan = Builder::new()
.name("scan".to_string())
.spawn(move || {
db.scan_accounts(
&scan_ancestors,
bank_id,
|_collector: &mut Vec<(Pubkey, AccountSharedData)>, maybe_account| {
ready_.store(true, Ordering::Relaxed);
if let Some((pubkey, _, _)) = maybe_account {
if *pubkey == stall_key {
loop {
if exit_.load(Ordering::Relaxed) {
break;
} else {
sleep(Duration::from_millis(10));
}
}
}
}
},
)
.unwrap();
})
.unwrap();
// Wait for scan to start
while !ready.load(Ordering::Relaxed) {
sleep(Duration::from_millis(10));
}
ScanTracker { t_scan, exit }
}
#[test]
fn test_scan_flush_accounts_cache_then_clean_drop() {
let caching_enabled = true;
let db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let account_key = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
let slot1_account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let slot2_account = AccountSharedData::new(2, 1, AccountSharedData::default().owner());
/*
Store zero lamport account into slots 0, 1, 2 where
root slots are 0, 2, and slot 1 is unrooted.
0 (root)
/ \
1 2 (root)
*/
db.store_cached(0, &[(&account_key, &zero_lamport_account)]);
db.store_cached(1, &[(&account_key, &slot1_account)]);
// Fodder for the scan so that the lock on `account_key` is not held
db.store_cached(1, &[(&account_key2, &slot1_account)]);
db.store_cached(2, &[(&account_key, &slot2_account)]);
db.get_accounts_delta_hash(0);
let max_scan_root = 0;
db.add_root(max_scan_root);
let scan_ancestors: Arc<Ancestors> = Arc::new(vec![(0, 1), (1, 1)].into_iter().collect());
let bank_id = 0;
let scan_tracker = setup_scan(db.clone(), scan_ancestors.clone(), bank_id, account_key2);
// Add a new root 2
let new_root = 2;
db.get_accounts_delta_hash(new_root);
db.add_root(new_root);
// Check that the scan is properly set up
assert_eq!(
db.accounts_index.min_ongoing_scan_root().unwrap(),
max_scan_root
);
// If we specify a requested_flush_root == 2, then `slot 2 <= max_flush_slot` will
// be flushed even though `slot 2 > max_scan_root`. The unrooted slot 1 should
// remain in the cache
db.flush_accounts_cache(true, Some(new_root));
assert_eq!(db.accounts_cache.num_slots(), 1);
assert!(db.accounts_cache.slot_cache(1).is_some());
// Intra cache cleaning should not clean the entry for `account_key` from slot 0,
// even though it was updated in slot `2` because of the ongoing scan
let account = db
.do_load(
&Ancestors::default(),
&account_key,
Some(0),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), zero_lamport_account.lamports());
// Run clean, unrooted slot 1 should not be purged, and still readable from the cache,
// because we're still doing a scan on it.
db.clean_accounts(None, false);
let account = db
.do_load(
&scan_ancestors,
&account_key,
Some(max_scan_root),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), slot1_account.lamports());
// When the scan is over, clean should not panic and should not purge something
// still in the cache.
scan_tracker.exit().unwrap();
db.clean_accounts(None, false);
let account = db
.do_load(
&scan_ancestors,
&account_key,
Some(max_scan_root),
LoadHint::Unspecified,
)
.unwrap();
assert_eq!(account.0.lamports(), slot1_account.lamports());
// Simulate dropping the bank, which finally removes the slot from the cache
let bank_id = 1;
db.purge_slot(1, bank_id, false);
assert!(db
.do_load(
&scan_ancestors,
&account_key,
Some(max_scan_root),
LoadHint::Unspecified
)
.is_none());
}
#[test]
fn test_alive_bytes() {
let caching_enabled = true;
let accounts_db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
let slot: Slot = 0;
let num_keys = 10;
for data_size in 0..num_keys {
let account = AccountSharedData::new(1, data_size, &Pubkey::default());
accounts_db.store_cached(slot, &[(&Pubkey::new_unique(), &account)]);
}
accounts_db.add_root(slot);
accounts_db.flush_accounts_cache(true, None);
let mut storage_maps: Vec<Arc<AccountStorageEntry>> = accounts_db
.storage
.get_slot_storage_entries(slot)
.unwrap_or_default();
// Flushing cache should only create one storage entry
assert_eq!(storage_maps.len(), 1);
let storage0 = storage_maps.pop().unwrap();
let accounts = storage0.all_accounts();
for account in accounts {
let before_size = storage0.alive_bytes.load(Ordering::Relaxed);
let account_info = accounts_db
.accounts_index
.get_account_read_entry(&account.meta.pubkey)
.map(|locked_entry| {
// Should only be one entry per key, since every key was only stored to slot 0
locked_entry.slot_list()[0].clone()
})
.unwrap();
let removed_data_size = account_info.1.stored_size;
// Fetching the account from storage should return the same
// stored size as in the index.
assert_eq!(removed_data_size, account.stored_size);
assert_eq!(account_info.0, slot);
let reclaims = vec![account_info];
accounts_db.remove_dead_accounts(&reclaims, None, None, true);
let after_size = storage0.alive_bytes.load(Ordering::Relaxed);
assert_eq!(before_size, after_size + account.stored_size);
}
}
fn setup_accounts_db_cache_clean(
num_slots: usize,
scan_slot: Option<Slot>,
) -> (Arc<AccountsDb>, Vec<Pubkey>, Vec<Slot>, Option<ScanTracker>) {
let caching_enabled = true;
let accounts_db = Arc::new(AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
));
let slots: Vec<_> = (0..num_slots as Slot).into_iter().collect();
let stall_slot = num_slots as Slot;
let scan_stall_key = Pubkey::new_unique();
let keys: Vec<Pubkey> = std::iter::repeat_with(Pubkey::new_unique)
.take(num_slots)
.collect();
if scan_slot.is_some() {
accounts_db.store_cached(
// Store it in a slot that isn't returned in `slots`
stall_slot,
&[(
&scan_stall_key,
&AccountSharedData::new(1, 0, &Pubkey::default()),
)],
);
}
// Store some subset of the keys in slots 0..num_slots
let mut scan_tracker = None;
for slot in &slots {
for key in &keys[*slot as usize..] {
accounts_db.store_cached(
*slot,
&[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))],
);
}
accounts_db.add_root(*slot as Slot);
if Some(*slot) == scan_slot {
let ancestors = Arc::new(vec![(stall_slot, 1), (*slot, 1)].into_iter().collect());
let bank_id = 0;
scan_tracker = Some(setup_scan(
accounts_db.clone(),
ancestors,
bank_id,
scan_stall_key,
));
assert_eq!(
accounts_db.accounts_index.min_ongoing_scan_root().unwrap(),
*slot
);
}
}
accounts_db.accounts_cache.remove_slot(stall_slot);
// If there's <= MAX_CACHE_SLOTS, no slots should be flushed
if accounts_db.accounts_cache.num_slots() <= MAX_CACHE_SLOTS {
accounts_db.flush_accounts_cache(false, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), num_slots);
}
(accounts_db, keys, slots, scan_tracker)
}
#[test]
fn test_accounts_db_cache_clean_dead_slots() {
let num_slots = 10;
let (accounts_db, keys, mut slots, _) = setup_accounts_db_cache_clean(num_slots, None);
let last_dead_slot = (num_slots - 1) as Slot;
assert_eq!(*slots.last().unwrap(), last_dead_slot);
let alive_slot = last_dead_slot as Slot + 1;
slots.push(alive_slot);
for key in &keys {
// Store a slot that overwrites all previous keys, rendering all previous keys dead
accounts_db.store_cached(
alive_slot,
&[(key, &AccountSharedData::new(1, 0, &Pubkey::default()))],
);
accounts_db.add_root(alive_slot);
}
// Before the flush, we can find entries in the database for slots < alive_slot if we specify
// a smaller max root
for key in &keys {
assert!(accounts_db
.do_load(
&Ancestors::default(),
key,
Some(last_dead_slot),
LoadHint::Unspecified
)
.is_some());
}
// If no `max_clean_root` is specified, cleaning should purge all flushed slots
accounts_db.flush_accounts_cache(true, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
assert_eq!(uncleaned_roots, slots);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
alive_slot,
);
// Specifying a max_root < alive_slot, should not return any more entries,
// as those have been purged from the accounts index for the dead slots.
for key in &keys {
assert!(accounts_db
.do_load(
&Ancestors::default(),
key,
Some(last_dead_slot),
LoadHint::Unspecified
)
.is_none());
}
// Each slot should only have one entry in the storage, since all other accounts were
// cleaned due to later updates
for slot in &slots {
if let ScanStorageResult::Stored(slot_accounts) = accounts_db.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_accounts: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_accounts.insert(*loaded_account.pubkey());
},
) {
if *slot == alive_slot {
assert_eq!(slot_accounts.len(), keys.len());
} else {
assert!(slot_accounts.is_empty());
}
} else {
panic!("Expected slot to be in storage, not cache");
}
}
}
#[test]
fn test_accounts_db_cache_clean() {
let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(10, None);
// If no `max_clean_root` is specified, cleaning should purge all flushed slots
accounts_db.flush_accounts_cache(true, None);
assert_eq!(accounts_db.accounts_cache.num_slots(), 0);
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
assert_eq!(uncleaned_roots, slots);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
*slots.last().unwrap()
);
// Each slot should only have one entry in the storage, since all other accounts were
// cleaned due to later updates
for slot in &slots {
if let ScanStorageResult::Stored(slot_account) = accounts_db.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_account: &Arc<RwLock<Pubkey>>, loaded_account: LoadedAccount| {
*slot_account.write().unwrap() = *loaded_account.pubkey();
},
) {
assert_eq!(*slot_account.read().unwrap(), keys[*slot as usize]);
} else {
panic!("Everything should have been flushed")
}
}
}
fn run_test_accounts_db_cache_clean_max_root(
num_slots: usize,
requested_flush_root: Slot,
scan_root: Option<Slot>,
) {
assert!(requested_flush_root < (num_slots as Slot));
let (accounts_db, keys, slots, scan_tracker) =
setup_accounts_db_cache_clean(num_slots, scan_root);
let is_cache_at_limit = num_slots - requested_flush_root as usize - 1 > MAX_CACHE_SLOTS;
// If:
// 1) `requested_flush_root` is specified,
// 2) not at the cache limit, i.e. `is_cache_at_limit == false`, then
// `flush_accounts_cache()` should clean and flush only slots <= requested_flush_root,
accounts_db.flush_accounts_cache(true, Some(requested_flush_root));
if !is_cache_at_limit {
// Should flush all slots between 0..=requested_flush_root
assert_eq!(
accounts_db.accounts_cache.num_slots(),
slots.len() - requested_flush_root as usize - 1
);
} else {
// Otherwise, if we are at the cache limit, all roots will be flushed
assert_eq!(accounts_db.accounts_cache.num_slots(), 0,);
}
let mut uncleaned_roots = accounts_db
.accounts_index
.clear_uncleaned_roots(None)
.into_iter()
.collect::<Vec<_>>();
uncleaned_roots.sort_unstable();
let expected_max_flushed_root = if !is_cache_at_limit {
// Should flush all slots between 0..=requested_flush_root
requested_flush_root
} else {
// Otherwise, if we are at the cache limit, all roots will be flushed
num_slots as Slot - 1
};
assert_eq!(
uncleaned_roots,
slots[0..=expected_max_flushed_root as usize].to_vec()
);
assert_eq!(
accounts_db.accounts_cache.fetch_max_flush_root(),
expected_max_flushed_root,
);
for slot in &slots {
let slot_accounts = accounts_db.scan_account_storage(
*slot as Slot,
|loaded_account: LoadedAccount| {
if is_cache_at_limit {
panic!(
"When cache is at limit, all roots should have been flushed to storage"
);
}
// All slots <= requested_flush_root should have been flushed, regardless
// of ongoing scans
assert!(*slot > requested_flush_root);
Some(*loaded_account.pubkey())
},
|slot_accounts: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_accounts.insert(*loaded_account.pubkey());
if !is_cache_at_limit {
// Only true when the limit hasn't been reached and there are still
// slots left in the cache
assert!(*slot <= requested_flush_root);
}
},
);
let slot_accounts = match slot_accounts {
ScanStorageResult::Cached(slot_accounts) => {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
}
ScanStorageResult::Stored(slot_accounts) => {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
}
};
let expected_accounts =
if *slot >= requested_flush_root || *slot >= scan_root.unwrap_or(Slot::MAX) {
// 1) If slot > `requested_flush_root`, then either:
// a) If `is_cache_at_limit == false`, still in the cache
// b) if `is_cache_at_limit == true`, were not cleaned before being flushed to storage.
//
// In both cases all the *original* updates at index `slot` were uncleaned and thus
// should be discoverable by this scan.
//
// 2) If slot == `requested_flush_root`, the slot was not cleaned before being flushed to storage,
// so it also contains all the original updates.
//
// 3) If *slot >= scan_root, then we should not clean it either
keys[*slot as usize..]
.iter()
.cloned()
.collect::<HashSet<Pubkey>>()
} else {
// Slots less than `requested_flush_root` and `scan_root` were cleaned in the cache before being flushed
// to storage, should only contain one account
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
};
assert_eq!(slot_accounts, expected_accounts);
}
if let Some(scan_tracker) = scan_tracker {
scan_tracker.exit().unwrap();
}
}
#[test]
fn test_accounts_db_cache_clean_max_root() {
let requested_flush_root = 5;
run_test_accounts_db_cache_clean_max_root(10, requested_flush_root, None);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_scan() {
let requested_flush_root = 5;
run_test_accounts_db_cache_clean_max_root(
10,
requested_flush_root,
Some(requested_flush_root - 1),
);
run_test_accounts_db_cache_clean_max_root(
10,
requested_flush_root,
Some(requested_flush_root + 1),
);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit() {
let requested_flush_root = 5;
// Test that if there are > MAX_CACHE_SLOTS in the cache after flush, then more roots
// will be flushed
run_test_accounts_db_cache_clean_max_root(
MAX_CACHE_SLOTS + requested_flush_root as usize + 2,
requested_flush_root,
None,
);
}
#[test]
fn test_accounts_db_cache_clean_max_root_with_cache_limit_hit_and_scan() {
let requested_flush_root = 5;
// Test that if there are > MAX_CACHE_SLOTS in the cache after flush, then more roots
// will be flushed
run_test_accounts_db_cache_clean_max_root(
MAX_CACHE_SLOTS + requested_flush_root as usize + 2,
requested_flush_root,
Some(requested_flush_root - 1),
);
run_test_accounts_db_cache_clean_max_root(
MAX_CACHE_SLOTS + requested_flush_root as usize + 2,
requested_flush_root,
Some(requested_flush_root + 1),
);
}
fn run_flush_rooted_accounts_cache(should_clean: bool) {
let num_slots = 10;
let (accounts_db, keys, slots, _) = setup_accounts_db_cache_clean(num_slots, None);
let mut cleaned_bytes = 0;
let mut cleaned_accounts = 0;
let should_clean_tracker = if should_clean {
Some((&mut cleaned_bytes, &mut cleaned_accounts))
} else {
None
};
// If no cleaning is specified, then flush everything
accounts_db.flush_rooted_accounts_cache(None, should_clean_tracker);
for slot in &slots {
let slot_accounts = if let ScanStorageResult::Stored(slot_accounts) = accounts_db
.scan_account_storage(
*slot as Slot,
|_| Some(0),
|slot_account: &DashSet<Pubkey>, loaded_account: LoadedAccount| {
slot_account.insert(*loaded_account.pubkey());
},
) {
slot_accounts.into_iter().collect::<HashSet<Pubkey>>()
} else {
panic!("All roots should have been flushed to storage");
};
if !should_clean || slot == slots.last().unwrap() {
// The slot was not cleaned before being flushed to storage,
// so it also contains all the original updates.
assert_eq!(
slot_accounts,
keys[*slot as usize..]
.iter()
.cloned()
.collect::<HashSet<Pubkey>>()
);
} else {
// If clean was specified, only the latest slot should have all the updates.
// All these other slots have been cleaned before flush
assert_eq!(
slot_accounts,
std::iter::once(keys[*slot as usize])
.into_iter()
.collect::<HashSet<Pubkey>>()
);
}
}
}
#[test]
fn test_flush_rooted_accounts_cache_with_clean() {
run_flush_rooted_accounts_cache(true);
}
#[test]
fn test_flush_rooted_accounts_cache_without_clean() {
run_flush_rooted_accounts_cache(false);
}
fn run_test_shrink_unref(do_intra_cache_clean: bool) {
// Enable caching so that we use the straightforward implementation
// of shrink that will shrink all candidate slots
let caching_enabled = true;
let db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
let account_key1 = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
// Store into slot 0
db.store_cached(0, &[(&account_key1, &account1)]);
db.store_cached(0, &[(&account_key2, &account1)]);
db.add_root(0);
if !do_intra_cache_clean {
// If we don't want the cache doing purges before flush,
// then we cannot flush multiple roots at once, otherwise the later
// roots will clean the earlier roots before they are stored.
// Thus flush the roots individually
db.flush_accounts_cache(true, None);
// Add an additional ref within the same slot to pubkey 1
db.store_uncached(0, &[(&account_key1, &account1)]);
}
// Make account_key1 in slot 0 outdated by updating in rooted slot 1
db.store_cached(1, &[(&account_key1, &account1)]);
db.add_root(1);
// Flushes all roots
db.flush_accounts_cache(true, None);
db.get_accounts_delta_hash(0);
db.get_accounts_delta_hash(1);
// Clean to remove outdated entry from slot 0
db.clean_accounts(Some(1), false);
// Shrink Slot 0
let mut slot0_stores = db.storage.get_slot_storage_entries(0).unwrap();
assert_eq!(slot0_stores.len(), 1);
let slot0_store = slot0_stores.pop().unwrap();
{
let mut shrink_candidate_slots = db.shrink_candidate_slots.lock().unwrap();
shrink_candidate_slots
.entry(0)
.or_default()
.insert(slot0_store.append_vec_id(), slot0_store);
}
db.shrink_candidate_slots();
// Make slot 0 dead by updating the remaining key
db.store_cached(2, &[(&account_key2, &account1)]);
db.add_root(2);
// Flushes all roots
db.flush_accounts_cache(true, None);
// Should be one store before clean for slot 0
assert_eq!(db.storage.get_slot_storage_entries(0).unwrap().len(), 1);
db.get_accounts_delta_hash(2);
db.clean_accounts(Some(2), false);
// No stores should exist for slot 0 after clean
assert!(db.storage.get_slot_storage_entries(0).is_none());
// Ref count for `account_key1` (account removed earlier by shrink)
// should be 1, since it was only stored in slot 0 and 1, and slot 0
// is now dead
assert_eq!(db.accounts_index.ref_count_from_storage(&account_key1), 1);
}
#[test]
fn test_shrink_unref() {
run_test_shrink_unref(false)
}
#[test]
fn test_shrink_unref_with_intra_slot_cleaning() {
run_test_shrink_unref(true)
}
#[test]
fn test_partial_clean() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let account_key1 = Pubkey::new_unique();
let account_key2 = Pubkey::new_unique();
let account1 = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
let account2 = AccountSharedData::new(2, 0, AccountSharedData::default().owner());
let account3 = AccountSharedData::new(3, 0, AccountSharedData::default().owner());
let account4 = AccountSharedData::new(4, 0, AccountSharedData::default().owner());
// Store accounts into slots 0 and 1
db.store_uncached(0, &[(&account_key1, &account1)]);
db.store_uncached(0, &[(&account_key2, &account1)]);
db.store_uncached(1, &[(&account_key1, &account2)]);
db.get_accounts_delta_hash(0);
db.get_accounts_delta_hash(1);
db.print_accounts_stats("pre-clean1");
// clean accounts - no accounts should be cleaned, since no rooted slots
//
// Checking that the uncleaned_pubkeys are not pre-maturely removed
// such that when the slots are rooted, and can actually be cleaned, then the
// delta keys are still there.
db.clean_accounts(None, false);
db.print_accounts_stats("post-clean1");
// Check stores > 0
assert!(!slot_stores(&db, 0).is_empty());
assert!(!slot_stores(&db, 1).is_empty());
// root slot 0
db.add_root(0);
// store into slot 2
db.store_uncached(2, &[(&account_key2, &account3)]);
db.store_uncached(2, &[(&account_key1, &account3)]);
db.get_accounts_delta_hash(2);
db.clean_accounts(None, false);
db.print_accounts_stats("post-clean2");
// root slots 1
db.add_root(1);
db.clean_accounts(None, false);
db.print_accounts_stats("post-clean3");
db.store_uncached(3, &[(&account_key2, &account4)]);
db.get_accounts_delta_hash(3);
db.add_root(3);
// Check that we can clean where max_root=3 and slot=2 is not rooted
db.clean_accounts(None, false);
assert!(db.uncleaned_pubkeys.is_empty());
db.print_accounts_stats("post-clean4");
assert!(slot_stores(&db, 0).is_empty());
assert!(!slot_stores(&db, 1).is_empty());
}
#[test]
fn test_recycle_stores_expiration() {
solana_logger::setup();
let dummy_path = Path::new("");
let dummy_slot = 12;
let dummy_size = 1000;
let dummy_id1 = 22;
let entry1 = Arc::new(AccountStorageEntry::new(
dummy_path, dummy_slot, dummy_id1, dummy_size,
));
let dummy_id2 = 44;
let entry2 = Arc::new(AccountStorageEntry::new(
dummy_path, dummy_slot, dummy_id2, dummy_size,
));
let mut recycle_stores = RecycleStores::default();
recycle_stores.add_entry(entry1);
recycle_stores.add_entry(entry2);
assert_eq!(recycle_stores.entry_count(), 2);
// no expiration for newly added entries
let expired = recycle_stores.expire_old_entries();
assert_eq!(
expired
.iter()
.map(|e| e.append_vec_id())
.collect::<Vec<_>>(),
Vec::<AppendVecId>::new()
);
assert_eq!(
recycle_stores
.iter()
.map(|(_, e)| e.append_vec_id())
.collect::<Vec<_>>(),
vec![dummy_id1, dummy_id2]
);
assert_eq!(recycle_stores.entry_count(), 2);
assert_eq!(recycle_stores.total_bytes(), dummy_size * 2);
// expiration for only too old entries
recycle_stores.entries[0].0 =
Instant::now() - Duration::from_secs(EXPIRATION_TTL_SECONDS + 1);
let expired = recycle_stores.expire_old_entries();
assert_eq!(
expired
.iter()
.map(|e| e.append_vec_id())
.collect::<Vec<_>>(),
vec![dummy_id1]
);
assert_eq!(
recycle_stores
.iter()
.map(|(_, e)| e.append_vec_id())
.collect::<Vec<_>>(),
vec![dummy_id2]
);
assert_eq!(recycle_stores.entry_count(), 1);
assert_eq!(recycle_stores.total_bytes(), dummy_size);
}
const RACY_SLEEP_MS: u64 = 10;
const RACE_TIME: u64 = 5;
fn start_load_thread(
with_retry: bool,
ancestors: Ancestors,
db: Arc<AccountsDb>,
exit: Arc<AtomicBool>,
pubkey: Arc<Pubkey>,
expected_lamports: impl Fn(&(AccountSharedData, Slot)) -> u64 + Send + 'static,
) -> JoinHandle<()> {
let load_hint = if with_retry {
LoadHint::FixedMaxRoot
} else {
LoadHint::Unspecified
};
std::thread::Builder::new()
.name("account-do-load".to_string())
.spawn(move || {
loop {
if exit.load(Ordering::Relaxed) {
return;
}
// Meddle load_limit to cover all branches of implementation.
// There should absolutely no behaviorial difference; the load_limit triggered
// slow branch should only affect the performance.
// Ordering::Relaxed is ok because of no data dependencies; the modified field is
// completely free-standing cfg(test) control-flow knob.
db.load_limit
.store(thread_rng().gen_range(0, 10) as u64, Ordering::Relaxed);
// Load should never be unable to find this key
let loaded_account = db.do_load(&ancestors, &pubkey, None, load_hint).unwrap();
// slot + 1 == account.lamports because of the account-cache-flush thread
assert_eq!(
loaded_account.0.lamports(),
expected_lamports(&loaded_account)
);
}
})
.unwrap()
}
fn do_test_load_account_and_cache_flush_race(with_retry: bool) {
solana_logger::setup();
let caching_enabled = true;
let mut db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
db.load_delay = RACY_SLEEP_MS;
let db = Arc::new(db);
let pubkey = Arc::new(Pubkey::new_unique());
let exit = Arc::new(AtomicBool::new(false));
db.store_cached(
0,
&[(
&pubkey,
&AccountSharedData::new(1, 0, AccountSharedData::default().owner()),
)],
);
db.add_root(0);
db.flush_accounts_cache(true, None);
let t_flush_accounts_cache = {
let db = db.clone();
let exit = exit.clone();
let pubkey = pubkey.clone();
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || {
let mut slot = 1;
loop {
if exit.load(Ordering::Relaxed) {
return;
}
account.set_lamports(slot + 1);
db.store_cached(slot, &[(&pubkey, &account)]);
db.add_root(slot);
sleep(Duration::from_millis(RACY_SLEEP_MS));
db.flush_accounts_cache(true, None);
slot += 1;
}
})
.unwrap()
};
let t_do_load = start_load_thread(
with_retry,
Ancestors::default(),
db,
exit.clone(),
pubkey,
|(_, slot)| slot + 1,
);
sleep(Duration::from_secs(RACE_TIME));
exit.store(true, Ordering::Relaxed);
t_flush_accounts_cache.join().unwrap();
t_do_load.join().map_err(std::panic::resume_unwind).unwrap()
}
#[test]
fn test_load_account_and_cache_flush_race_with_retry() {
do_test_load_account_and_cache_flush_race(true);
}
#[test]
fn test_load_account_and_cache_flush_race_without_retry() {
do_test_load_account_and_cache_flush_race(false);
}
fn do_test_load_account_and_shrink_race(with_retry: bool) {
let caching_enabled = true;
let mut db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
db.load_delay = RACY_SLEEP_MS;
let db = Arc::new(db);
let pubkey = Arc::new(Pubkey::new_unique());
let exit = Arc::new(AtomicBool::new(false));
let slot = 1;
// Store an account
let lamports = 42;
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
account.set_lamports(lamports);
db.store_uncached(slot, &[(&pubkey, &account)]);
// Set the slot as a root so account loads will see the contents of this slot
db.add_root(slot);
let t_shrink_accounts = {
let db = db.clone();
let exit = exit.clone();
std::thread::Builder::new()
.name("account-shrink".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
// Simulate adding shrink candidates from clean_accounts()
let stores = db.storage.get_slot_storage_entries(slot).unwrap();
assert_eq!(stores.len(), 1);
let store = &stores[0];
let store_id = store.append_vec_id();
db.shrink_candidate_slots
.lock()
.unwrap()
.entry(slot)
.or_default()
.insert(store_id, store.clone());
db.shrink_candidate_slots();
})
.unwrap()
};
let t_do_load = start_load_thread(
with_retry,
Ancestors::default(),
db,
exit.clone(),
pubkey,
move |_| lamports,
);
sleep(Duration::from_secs(RACE_TIME));
exit.store(true, Ordering::Relaxed);
t_shrink_accounts.join().unwrap();
t_do_load.join().map_err(std::panic::resume_unwind).unwrap()
}
#[test]
fn test_load_account_and_shrink_race_with_retry() {
do_test_load_account_and_shrink_race(true);
}
#[test]
fn test_load_account_and_shrink_race_without_retry() {
do_test_load_account_and_shrink_race(false);
}
#[test]
fn test_cache_flush_delayed_remove_unrooted_race() {
let caching_enabled = true;
let mut db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
db.load_delay = RACY_SLEEP_MS;
let db = Arc::new(db);
let slot = 10;
let bank_id = 10;
let lamports = 42;
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
account.set_lamports(lamports);
// Start up a thread to flush the accounts cache
let (flush_trial_start_sender, flush_trial_start_receiver) = unbounded();
let (flush_done_sender, flush_done_receiver) = unbounded();
let t_flush_cache = {
let db = db.clone();
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || loop {
// Wait for the signal to start a trial
if flush_trial_start_receiver.recv().is_err() {
return;
}
db.flush_slot_cache(10, None::<&mut fn(&_, &_) -> bool>);
flush_done_sender.send(()).unwrap();
})
.unwrap()
};
// Start up a thread remove the slot
let (remove_trial_start_sender, remove_trial_start_receiver) = unbounded();
let (remove_done_sender, remove_done_receiver) = unbounded();
let t_remove = {
let db = db.clone();
std::thread::Builder::new()
.name("account-remove".to_string())
.spawn(move || loop {
// Wait for the signal to start a trial
if remove_trial_start_receiver.recv().is_err() {
return;
}
db.remove_unrooted_slots(&[(slot, bank_id)]);
remove_done_sender.send(()).unwrap();
})
.unwrap()
};
let num_trials = 10;
for _ in 0..num_trials {
let pubkey = Pubkey::new_unique();
db.store_cached(slot, &[(&pubkey, &account)]);
// Wait for both threads to finish
flush_trial_start_sender.send(()).unwrap();
remove_trial_start_sender.send(()).unwrap();
let _ = flush_done_receiver.recv();
let _ = remove_done_receiver.recv();
}
drop(flush_trial_start_sender);
drop(remove_trial_start_sender);
t_flush_cache.join().unwrap();
t_remove.join().unwrap();
}
#[test]
fn test_cache_flush_remove_unrooted_race_multiple_slots() {
let caching_enabled = true;
let db = AccountsDb::new_with_config(
Vec::new(),
&ClusterType::Development,
AccountSecondaryIndexes::default(),
caching_enabled,
AccountShrinkThreshold::default(),
);
let db = Arc::new(db);
let num_cached_slots = 100;
let num_trials = 100;
let (new_trial_start_sender, new_trial_start_receiver) = unbounded();
let (flush_done_sender, flush_done_receiver) = unbounded();
// Start up a thread to flush the accounts cache
let t_flush_cache = {
let db = db.clone();
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || loop {
// Wait for the signal to start a trial
if new_trial_start_receiver.recv().is_err() {
return;
}
for slot in 0..num_cached_slots {
db.flush_slot_cache(slot, None::<&mut fn(&_, &_) -> bool>);
}
flush_done_sender.send(()).unwrap();
})
.unwrap()
};
let exit = Arc::new(AtomicBool::new(false));
let t_spurious_signal = {
let db = db.clone();
let exit = exit.clone();
std::thread::Builder::new()
.name("account-cache-flush".to_string())
.spawn(move || loop {
if exit.load(Ordering::Relaxed) {
return;
}
// Simulate spurious wake-up that can happen, but is too rare to
// otherwise depend on in tests.
db.remove_unrooted_slots_synchronization.signal.notify_all();
})
.unwrap()
};
// Run multiple trials. Has the added benefit of rewriting the same slots after we've
// dumped them in previous trials.
for _ in 0..num_trials {
// Store an account
let lamports = 42;
let mut account = AccountSharedData::new(1, 0, AccountSharedData::default().owner());
account.set_lamports(lamports);
// Pick random 50% of the slots to pass to `remove_unrooted_slots()`
let mut all_slots: Vec<(Slot, BankId)> = (0..num_cached_slots)
.map(|slot| {
let bank_id = slot + 1;
(slot, bank_id)
})
.collect();
all_slots.shuffle(&mut rand::thread_rng());
let slots_to_dump = &all_slots[0..num_cached_slots as usize / 2];
let slots_to_keep = &all_slots[num_cached_slots as usize / 2..];
// Set up a one account per slot across many different slots, track which
// pubkey was stored in each slot.
let slot_to_pubkey_map: HashMap<Slot, Pubkey> = (0..num_cached_slots)
.map(|slot| {
let pubkey = Pubkey::new_unique();
db.store_cached(slot, &[(&pubkey, &account)]);
(slot, pubkey)
})
.collect();
// Signal the flushing shred to start flushing
new_trial_start_sender.send(()).unwrap();
// Here we want to test both:
// 1) Flush thread starts flushing a slot before we try dumping it.
// 2) Flushing thread trying to flush while/after we're trying to dump the slot,
// in which case flush should ignore/move past the slot to be dumped
//
// Hence, we split into chunks to get the dumping of each chunk to race with the
// flushes. If we were to dump the entire chunk at once, then this reduces the possibility
// of the flush occurring first since the dumping logic reserves all the slots it's about
// to dump immediately.
for chunks in slots_to_dump.chunks(slots_to_dump.len() / 2) {
db.remove_unrooted_slots(chunks);
}
// Check that all the slots in `slots_to_dump` were completely removed from the
// cache, storage, and index
for (slot, _) in slots_to_dump {
assert!(db.storage.get_slot_storage_entries(*slot).is_none());
assert!(db.accounts_cache.slot_cache(*slot).is_none());
let account_in_slot = slot_to_pubkey_map[slot];
assert!(db
.accounts_index
.get_account_read_entry(&account_in_slot)
.is_none());
}
// Wait for flush to finish before starting next trial
flush_done_receiver.recv().unwrap();
for (slot, bank_id) in slots_to_keep {
let account_in_slot = slot_to_pubkey_map[slot];
assert!(db
.load(
&Ancestors::from(vec![(*slot, 0)]),
&account_in_slot,
LoadHint::FixedMaxRoot
)
.is_some());
// Clear for next iteration so that `assert!(self.storage.get_slot_stores(purged_slot).is_none());`
// in `purge_slot_pubkeys()` doesn't trigger
db.remove_unrooted_slots(&[(*slot, *bank_id)]);
}
}
exit.store(true, Ordering::Relaxed);
drop(new_trial_start_sender);
t_flush_cache.join().unwrap();
t_spurious_signal.join().unwrap();
}
#[test]
fn test_collect_uncleaned_slots_up_to_slot() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let slot1 = 11;
let slot2 = 222;
let slot3 = 3333;
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let pubkey3 = Pubkey::new_unique();
db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
let mut uncleaned_slots1 = db.collect_uncleaned_slots_up_to_slot(slot1);
let mut uncleaned_slots2 = db.collect_uncleaned_slots_up_to_slot(slot2);
let mut uncleaned_slots3 = db.collect_uncleaned_slots_up_to_slot(slot3);
uncleaned_slots1.sort_unstable();
uncleaned_slots2.sort_unstable();
uncleaned_slots3.sort_unstable();
assert_eq!(uncleaned_slots1, [slot1]);
assert_eq!(uncleaned_slots2, [slot1, slot2]);
assert_eq!(uncleaned_slots3, [slot1, slot2, slot3]);
}
#[test]
fn test_remove_uncleaned_slots_and_collect_pubkeys() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let slot1 = 11;
let slot2 = 222;
let slot3 = 3333;
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let pubkey3 = Pubkey::new_unique();
let account1 = AccountSharedData::new(0, 0, &pubkey1);
let account2 = AccountSharedData::new(0, 0, &pubkey2);
let account3 = AccountSharedData::new(0, 0, &pubkey3);
db.store_uncached(slot1, &[(&pubkey1, &account1)]);
db.store_uncached(slot2, &[(&pubkey2, &account2)]);
db.store_uncached(slot3, &[(&pubkey3, &account3)]);
db.add_root(slot1);
// slot 2 is _not_ a root on purpose
db.add_root(slot3);
db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
let uncleaned_pubkeys1 = db
.remove_uncleaned_slots_and_collect_pubkeys(vec![slot1])
.into_iter()
.flatten()
.collect::<Vec<_>>();
let uncleaned_pubkeys2 = db
.remove_uncleaned_slots_and_collect_pubkeys(vec![slot2])
.into_iter()
.flatten()
.collect::<Vec<_>>();
let uncleaned_pubkeys3 = db
.remove_uncleaned_slots_and_collect_pubkeys(vec![slot3])
.into_iter()
.flatten()
.collect::<Vec<_>>();
assert!(uncleaned_pubkeys1.contains(&pubkey1));
assert!(!uncleaned_pubkeys1.contains(&pubkey2));
assert!(!uncleaned_pubkeys1.contains(&pubkey3));
assert!(!uncleaned_pubkeys2.contains(&pubkey1));
assert!(uncleaned_pubkeys2.contains(&pubkey2));
assert!(!uncleaned_pubkeys2.contains(&pubkey3));
assert!(!uncleaned_pubkeys3.contains(&pubkey1));
assert!(!uncleaned_pubkeys3.contains(&pubkey2));
assert!(uncleaned_pubkeys3.contains(&pubkey3));
}
#[test]
fn test_remove_uncleaned_slots_and_collect_pubkeys_up_to_slot() {
solana_logger::setup();
let db = AccountsDb::new(Vec::new(), &ClusterType::Development);
let slot1 = 11;
let slot2 = 222;
let slot3 = 3333;
let pubkey1 = Pubkey::new_unique();
let pubkey2 = Pubkey::new_unique();
let pubkey3 = Pubkey::new_unique();
let account1 = AccountSharedData::new(0, 0, &pubkey1);
let account2 = AccountSharedData::new(0, 0, &pubkey2);
let account3 = AccountSharedData::new(0, 0, &pubkey3);
db.store_uncached(slot1, &[(&pubkey1, &account1)]);
db.store_uncached(slot2, &[(&pubkey2, &account2)]);
db.store_uncached(slot3, &[(&pubkey3, &account3)]);
// slot 1 is _not_ a root on purpose
db.add_root(slot2);
db.add_root(slot3);
db.uncleaned_pubkeys.insert(slot1, vec![pubkey1]);
db.uncleaned_pubkeys.insert(slot2, vec![pubkey2]);
db.uncleaned_pubkeys.insert(slot3, vec![pubkey3]);
let uncleaned_pubkeys = db
.remove_uncleaned_slots_and_collect_pubkeys_up_to_slot(slot3)
.into_iter()
.flatten()
.collect::<Vec<_>>();
assert!(uncleaned_pubkeys.contains(&pubkey1));
assert!(uncleaned_pubkeys.contains(&pubkey2));
assert!(uncleaned_pubkeys.contains(&pubkey3));
}
#[test]
fn test_shrink_productive() {
solana_logger::setup();
let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, 1024);
let stores = vec![Arc::new(s1)];
assert!(!AccountsDb::is_shrinking_productive(0, &stores));
let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, PAGE_SIZE * 4);
let stores = vec![Arc::new(s1)];
stores[0].add_account((3 * PAGE_SIZE as usize) - 1);
stores[0].add_account(10);
stores[0].remove_account(10, false);
assert!(AccountsDb::is_shrinking_productive(0, &stores));
stores[0].add_account(PAGE_SIZE as usize);
assert!(!AccountsDb::is_shrinking_productive(0, &stores));
let s1 = AccountStorageEntry::new(Path::new("."), 0, 0, PAGE_SIZE + 1);
s1.add_account(PAGE_SIZE as usize);
let s2 = AccountStorageEntry::new(Path::new("."), 0, 1, PAGE_SIZE + 1);
s2.add_account(PAGE_SIZE as usize);
let stores = vec![Arc::new(s1), Arc::new(s2)];
assert!(AccountsDb::is_shrinking_productive(0, &stores));
}
#[test]
fn test_is_candidate_for_shrink() {
solana_logger::setup();
let mut accounts = AccountsDb::new_single();
let dummy_path = Path::new("");
let dummy_size = 2 * PAGE_SIZE;
let entry = Arc::new(AccountStorageEntry::new(dummy_path, 0, 1, dummy_size));
match accounts.shrink_ratio {
AccountShrinkThreshold::TotalSpace { shrink_ratio } => {
assert_eq!(
(DEFAULT_ACCOUNTS_SHRINK_RATIO * 100.) as u64,
(shrink_ratio * 100.) as u64
)
}
AccountShrinkThreshold::IndividalStore { shrink_ratio: _ } => {
panic!("Expect the default to be TotalSpace")
}
}
entry.alive_bytes.store(3000, Ordering::Relaxed);
assert!(accounts.is_candidate_for_shrink(&entry));
entry.alive_bytes.store(5000, Ordering::Relaxed);
assert!(!accounts.is_candidate_for_shrink(&entry));
accounts.shrink_ratio = AccountShrinkThreshold::TotalSpace { shrink_ratio: 0.3 };
entry.alive_bytes.store(3000, Ordering::Relaxed);
assert!(accounts.is_candidate_for_shrink(&entry));
accounts.shrink_ratio = AccountShrinkThreshold::IndividalStore { shrink_ratio: 0.3 };
assert!(!accounts.is_candidate_for_shrink(&entry));
}
#[test]
fn test_calculate_storage_count_and_alive_bytes() {
let accounts = AccountsDb::new_single();
let shared_key = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let slot0 = 0;
accounts.store_uncached(slot0, &[(&shared_key, &account)]);
let result =
accounts.calculate_storage_count_and_alive_bytes(&mut GenerateIndexTimings::default());
assert_eq!(result.len(), 1);
for (k, v) in result.iter() {
assert_eq!((k, v), (&0, &(144, 1)));
}
}
#[test]
fn test_calculate_storage_count_and_alive_bytes_0_accounts() {
let accounts = AccountsDb::new_single();
let result =
accounts.calculate_storage_count_and_alive_bytes(&mut GenerateIndexTimings::default());
assert!(result.is_empty());
}
#[test]
fn test_calculate_storage_count_and_alive_bytes_2_accounts() {
let accounts = AccountsDb::new_single();
let keys = [
solana_sdk::pubkey::Pubkey::new(&[0; 32]),
solana_sdk::pubkey::Pubkey::new(&[255; 32]),
];
// make sure accounts are in 2 different bins
assert!(
crate::accounts_index::get_bin_pubkey(&keys[0])
!= crate::accounts_index::get_bin_pubkey(&keys[1])
);
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let account_big = AccountSharedData::new(1, 1000, AccountSharedData::default().owner());
let slot0 = 0;
accounts.store_uncached(slot0, &[(&keys[0], &account)]);
accounts.store_uncached(slot0, &[(&keys[1], &account_big)]);
let result =
accounts.calculate_storage_count_and_alive_bytes(&mut GenerateIndexTimings::default());
assert_eq!(result.len(), 1);
for (k, v) in result.iter() {
assert_eq!((k, v), (&0, &(1280, 2)));
}
}
#[test]
fn test_set_storage_count_and_alive_bytes() {
let accounts = AccountsDb::new_single();
// make sure we have storage 0
let shared_key = solana_sdk::pubkey::new_rand();
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
let slot0 = 0;
accounts.store_uncached(slot0, &[(&shared_key, &account)]);
// fake out the store count to avoid the assert
for slot_stores in accounts.storage.0.iter() {
for (_id, store) in slot_stores.value().read().unwrap().iter() {
store.alive_bytes.store(0, Ordering::SeqCst);
}
}
// populate based on made up hash data
let mut hashmap = HashMap::default();
hashmap.insert(0, (2, 3));
accounts.set_storage_count_and_alive_bytes(hashmap, &mut GenerateIndexTimings::default());
assert_eq!(accounts.storage.0.len(), 1);
for slot_stores in accounts.storage.0.iter() {
for (id, store) in slot_stores.value().read().unwrap().iter() {
assert_eq!(id, &0);
assert_eq!(store.count_and_status.read().unwrap().0, 3);
assert_eq!(store.alive_bytes.load(Ordering::SeqCst), 2);
}
}
}
#[test]
fn test_purge_alive_unrooted_slots_after_clean() {
let accounts = AccountsDb::new_single();
// Key shared between rooted and nonrooted slot
let shared_key = solana_sdk::pubkey::new_rand();
// Key to keep the storage entry for the unrooted slot alive
let unrooted_key = solana_sdk::pubkey::new_rand();
let slot0 = 0;
let slot1 = 1;
// Store accounts with greater than 0 lamports
let account = AccountSharedData::new(1, 1, AccountSharedData::default().owner());
accounts.store_uncached(slot0, &[(&shared_key, &account)]);
accounts.store_uncached(slot0, &[(&unrooted_key, &account)]);
// Simulate adding dirty pubkeys on bank freeze. Note this is
// not a rooted slot
accounts.get_accounts_delta_hash(slot0);
// On the next *rooted* slot, update the `shared_key` account to zero lamports
let zero_lamport_account =
AccountSharedData::new(0, 0, AccountSharedData::default().owner());
accounts.store_uncached(slot1, &[(&shared_key, &zero_lamport_account)]);
// Simulate adding dirty pubkeys on bank freeze, set root
accounts.get_accounts_delta_hash(slot1);
accounts.add_root(slot1);
// The later rooted zero-lamport update to `shared_key` cannot be cleaned
// because it is kept alive by the unrooted slot.
accounts.clean_accounts(None, false);
assert!(accounts
.accounts_index
.get_account_read_entry(&shared_key)
.is_some());
// Simulate purge_slot() all from AccountsBackgroundService
let is_from_abs = true;
accounts.purge_slot(slot0, 0, is_from_abs);
// Now clean should clean up the remaining key
accounts.clean_accounts(None, false);
assert!(accounts
.accounts_index
.get_account_read_entry(&shared_key)
.is_none());
assert!(accounts.storage.get_slot_storage_entries(slot0).is_none());
}
}
| {
Self {
last_update: Instant::now(),
my_last_report_count: 0,
total_count,
report_delay_secs,
first_caller: false,
ultimate_count,
}
} |
info_messaging.py | '''
Project: Gui Gin Rummy
File name: info_messaging.py
Author: William Hale
Date created: 3/28/2020
'''
# from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .game_canvas import GameCanvas
from typing import List
import rlcard.games.gin_rummy.utils.utils as gin_rummy_utils
from rlcard.games.gin_rummy.utils.thinker import Thinker
from .canvas_item import CardItem
from . import configurations
def blank_info_message_label(game_canvas: 'GameCanvas'):
game_canvas.info_message_label.configure(text="")
def show_activate_menus_message(game_canvas: 'GameCanvas'):
if not game_canvas.query.is_human(player_id=1):
return
if not configurations.IS_SHOW_TIPS:
return
if game_canvas.query.is_going_out_button_visible():
return
lines = [] # type: List[str]
lines.append("The menu items may not drop down.")
lines.append("On an Apple computer, this is a known problem.")
lines.append("A workaround is to hit cmd-tab twice to switch to another application and back to this application.")
info_message = " ".join(lines)
game_canvas.info_message_label.configure(text=info_message)
def show_pick_up_discard_message(player_id: int, game_canvas: 'GameCanvas'):
if not game_canvas.query.is_human(player_id=1):
return
if not configurations.IS_SHOW_TIPS:
return
if player_id == 1 and game_canvas.info_message_label['text'] == "":
hand = game_canvas.getter.get_held_pile_cards(player_id=player_id)
discard_card_item_id = game_canvas.getter.get_top_discard_pile_item_id()
discard_card_item = game_canvas.canvas_item_by_item_id[discard_card_item_id]
if isinstance(discard_card_item, CardItem):
discard_card_id = discard_card_item.card_id
discard_card = gin_rummy_utils.get_card(card_id=discard_card_id)
thinker = Thinker(hand=hand)
meld_piles_with_discard_card = thinker.get_meld_piles_with_discard_card(discard_card=discard_card)
if meld_piles_with_discard_card:
one_meld_pile = meld_piles_with_discard_card[0]
left_held_card = hand[0]
if left_held_card not in one_meld_pile:
lines = ["Tip:"] # type: List[str]
for card in meld_piles_with_discard_card[0]:
if card != discard_card:
message = "Tap {} to select it.".format(card)
lines.append(message)
lines.append("Tap {} to pick it up.".format(discard_card))
lines.append("Tap to the left of the {} to drop the new meld.".format(left_held_card))
info_message = "\n".join(lines)
game_canvas.info_message_label.configure(text=info_message)
def show_arrange_cards_message(player_id: int, game_canvas: 'GameCanvas'):
if not game_canvas.query.is_human(player_id=1):
return
if not configurations.IS_SHOW_TIPS:
return
if game_canvas.query.is_going_out_button_visible():
return
game_round = game_canvas.query.get_game().round
if game_round is None:
return
move_count = len(game_round.move_sheet)
if move_count <= 1 or move_count > 8:
return
if player_id == 1 and game_canvas.info_message_label['text'] == "":
lines = ["Tip:"] # type: List[str]
lines.append("You can arrange cards in your hand.")
lines.append("Select the cards you want to move by tapping them.")
lines.append("Right click the card that you want to drop them on.")
info_message = " ".join(lines)
game_canvas.info_message_label.configure(text=info_message)
def show_hide_tips_message(game_canvas: 'GameCanvas'):
| if not game_canvas.query.is_human(player_id=1):
return
if not configurations.IS_SHOW_TIPS:
return
lines = ["Tip:"] # type: List[str]
lines.append("Uncheck 'show tips' in the preferences to hide tips.")
info_message = " ".join(lines)
game_canvas.info_message_label.configure(text=info_message) |
|
test_sync.rs | use std::sync::Arc;
use std::thread;
use protobuf::CodedInputStream;
use protobuf::Message;
use super::test_sync_pb::*;
// test messages are sync
#[test]
fn test_sync() | {
let m = Arc::new({
let mut r = TestSync::new();
r.set_int32_field(23);
r
});
let threads: Vec<_> = (0..4)
.map(|_| {
let m_copy = m.clone();
thread::spawn(move || {
let bytes = m_copy.write_to_bytes().unwrap();
let mut is = CodedInputStream::from_bytes(&bytes);
let mut read = TestSync::new();
// API is not very convenient here
read.merge_from(&mut is).unwrap();
read.check_initialized().unwrap();
read.get_int32_field()
})
}).collect();
let results = threads
.into_iter()
.map(|t| t.join().unwrap())
.collect::<Vec<_>>();
assert_eq!(&[23, 23, 23, 23], &results[..]);
} |
|
VmProxy.ts | import { util } from '@remix-project/remix-lib'
const { hexListFromBNs, formatMemory } = util
import { helpers } from '@remix-project/remix-lib'
const { normalizeHexAddress } = helpers.ui
import { ConsoleLogs } from '@remix-project/remix-lib'
import { toChecksumAddress, BN, bufferToHex, Address } from 'ethereumjs-util'
import Web3 from 'web3'
import { ethers } from 'ethers'
import { VMContext } from './vm-context'
export class VmProxy {
vmContext: VMContext
web3: Web3
vm
vmTraces
txs
txsReceipt
hhLogs
processingHash
processingAddress
processingIndex
previousDepth
incr
eth
debug
providers
currentProvider
storageCache
lastProcessedStorageTxHash
sha3Preimages
sha3
toHex
toAscii
fromAscii
fromDecimal
fromWei
toWei
toBigNumber
isAddress
utils
txsMapBlock
blocks
latestBlockNumber
constructor (vmContext: VMContext) {
this.vmContext = vmContext
this.web3 = new Web3()
this.vm = null
this.vmTraces = {}
this.txs = {}
this.txsReceipt = {}
this.hhLogs = {}
this.processingHash = null
this.processingAddress = null
this.processingIndex = null
this.previousDepth = 0
this.incr = 0
this.eth = {}
this.debug = {}
this.eth.getCode = (address, cb) => this.getCode(address, cb)
this.eth.getTransaction = (txHash, cb) => this.getTransaction(txHash, cb)
this.eth.getTransactionReceipt = (txHash, cb) => this.getTransactionReceipt(txHash, cb)
this.eth.getTransactionFromBlock = (blockNumber, txIndex, cb) => this.getTransactionFromBlock(blockNumber, txIndex, cb)
this.eth.getBlockNumber = (cb) => this.getBlockNumber(cb)
this.debug.traceTransaction = (txHash, options, cb) => this.traceTransaction(txHash, options, cb)
this.debug.storageRangeAt = (blockNumber, txIndex, address, start, maxLength, cb) => this.storageRangeAt(blockNumber, txIndex, address, start, maxLength, cb)
this.debug.preimage = (hashedKey, cb) => this.preimage(hashedKey, cb)
this.providers = { HttpProvider: function (url) {} }
this.currentProvider = { host: 'vm provider' }
this.storageCache = {}
this.lastProcessedStorageTxHash = {}
this.sha3Preimages = {}
// util
this.sha3 = (...args) => this.web3.utils.sha3.apply(this, args)
this.toHex = (...args) => this.web3.utils.toHex.apply(this, args)
this.toAscii = (...args) => this.web3.utils.toAscii.apply(this, args)
this.fromAscii = (...args) => this.web3.utils.fromAscii.apply(this, args)
this.fromDecimal = (...args) => this.web3.utils.fromDecimal.apply(this, args)
this.fromWei = (...args) => this.web3.utils.fromWei.apply(this, args)
this.toWei = (...args) => this.web3.utils.toWei.apply(this, args)
this.toBigNumber = (...args) => this.web3.utils.toBN.apply(this, args)
this.isAddress = (...args) => this.web3.utils.isAddress.apply(this, args)
this.utils = Web3.utils || []
this.txsMapBlock = {}
this.blocks = {}
this.latestBlockNumber = 0
}
setVM (vm) {
if (this.vm === vm) return
this.vm = vm
this.vm.on('step', async (data, next) => {
await this.pushTrace(data)
next()
})
this.vm.on('afterTx', async (data, next) => {
await this.txProcessed(data)
next()
})
this.vm.on('beforeTx', async (data, next) => {
await this.txWillProcess(data)
next()
})
}
releaseCurrentHash () {
const ret = this.processingHash
this.processingHash = undefined
return ret
}
async txWillProcess (data) {
this.incr++
this.processingHash = bufferToHex(data.hash())
this.vmTraces[this.processingHash] = {
gas: '0x0',
return: '0x0',
structLogs: []
}
const tx = {}
tx['hash'] = this.processingHash
tx['from'] = toChecksumAddress(data.getSenderAddress().toString())
if (data.to) {
tx['to'] = toChecksumAddress(data.to.toString())
}
this.processingAddress = tx['to']
tx['input'] = bufferToHex(data.data)
tx['gas'] = data.gasLimit.toString(10)
if (data.value) {
tx['value'] = data.value.toString(10)
}
this.txs[this.processingHash] = tx
this.txsReceipt[this.processingHash] = tx
this.storageCache[this.processingHash] = {}
if (data.to) {
try {
const storage = await this.vm.stateManager.dumpStorage(data.to)
this.storageCache[this.processingHash][tx['to']] = storage
this.lastProcessedStorageTxHash[tx['to']] = this.processingHash
} catch (e) {
console.log(e)
}
}
this.processingIndex = 0
}
async txProcessed (data) {
const lastOp = this.vmTraces[this.processingHash].structLogs[this.processingIndex - 1]
if (lastOp) {
lastOp.error = lastOp.op !== 'RETURN' && lastOp.op !== 'STOP' && lastOp.op !== 'DESTRUCT'
}
const gasUsed = '0x' + data.gasUsed.toString(16)
this.vmTraces[this.processingHash].gas = gasUsed
this.txsReceipt[this.processingHash].gasUsed = gasUsed
const logs = []
for (const l in data.execResult.logs) {
const log = data.execResult.logs[l]
const topics = []
if (log[1].length > 0) {
for (const k in log[1]) {
topics.push('0x' + log[1][k].toString('hex'))
}
} else {
topics.push('0x')
}
logs.push({
address: '0x' + log[0].toString('hex'),
data: '0x' + log[2].toString('hex'),
topics: topics,
rawVMResponse: log
})
}
this.txsReceipt[this.processingHash].logs = logs
this.txsReceipt[this.processingHash].transactionHash = this.processingHash
const status = data.execResult.exceptionError ? 0 : 1
this.txsReceipt[this.processingHash].status = `0x${status}`
if (data.createdAddress) {
const address = data.createdAddress.toString()
this.vmTraces[this.processingHash].return = toChecksumAddress(address)
this.txsReceipt[this.processingHash].contractAddress = toChecksumAddress(address)
} else if (data.execResult.returnValue) {
this.vmTraces[this.processingHash].return = bufferToHex(data.execResult.returnValue)
} else {
this.vmTraces[this.processingHash].return = '0x'
}
this.processingIndex = null
this.processingAddress = null
this.previousDepth = 0
}
async pushTrace (data) {
const depth = data.depth + 1 // geth starts the depth from 1
if (!this.processingHash) {
console.log('no tx processing')
return
}
let previousopcode
if (this.vmTraces[this.processingHash] && this.vmTraces[this.processingHash].structLogs[this.processingIndex - 1]) {
previousopcode = this.vmTraces[this.processingHash].structLogs[this.processingIndex - 1]
}
if (this.previousDepth > depth && previousopcode) {
// returning from context, set error it is not STOP, RETURN
previousopcode.invalidDepthChange = previousopcode.op !== 'RETURN' && previousopcode.op !== 'STOP'
}
const step = {
stack: hexListFromBNs(data.stack),
memory: formatMemory(data.memory),
storage: data.storage,
op: data.opcode.name,
pc: data.pc,
gasCost: data.opcode.fee.toString(),
gas: data.gasLeft.toString(),
depth: depth,
error: data.error === false ? undefined : data.error
}
this.vmTraces[this.processingHash].structLogs.push(step)
// Track hardhat console.log call
if (step.op === 'STATICCALL' && step.stack[step.stack.length - 2] === '0x000000000000000000000000000000000000000000636f6e736f6c652e6c6f67') {
const stackLength = step.stack.length
const payloadStart = parseInt(step.stack[stackLength - 3], 16)
const memory = step.memory.join('')
let payload = memory.substring(payloadStart * 2, memory.length)
const fnselectorStr = payload.substring(0, 8)
const fnselectorStrInHex = '0x' + fnselectorStr
const fnselector = parseInt(fnselectorStrInHex)
const fnArgs = ConsoleLogs[fnselector]
const iface = new ethers.utils.Interface([`function log${fnArgs} view`])
const functionDesc = iface.getFunction(`log${fnArgs}`)
const sigHash = iface.getSighash(`log${fnArgs}`)
if (fnArgs.includes('uint') && sigHash !== fnselectorStrInHex) {
payload = payload.replace(fnselectorStr, sigHash)
} else {
payload = '0x' + payload
}
const consoleArgs = iface.decodeFunctionData(functionDesc, payload)
this.hhLogs[this.processingHash] = this.hhLogs[this.processingHash] ? this.hhLogs[this.processingHash] : []
this.hhLogs[this.processingHash].push(consoleArgs)
}
if (step.op === 'CREATE' || step.op === 'CALL') {
if (step.op === 'CREATE') {
this.processingAddress = '(Contract Creation - Step ' + this.processingIndex + ')'
this.storageCache[this.processingHash][this.processingAddress] = {}
this.lastProcessedStorageTxHash[this.processingAddress] = this.processingHash
} else {
this.processingAddress = normalizeHexAddress(step.stack[step.stack.length - 2])
this.processingAddress = toChecksumAddress(this.processingAddress)
if (!this.storageCache[this.processingHash][this.processingAddress]) {
const account = Address.fromString(this.processingAddress)
try {
const storage = await this.vm.stateManager.dumpStorage(account)
this.storageCache[this.processingHash][this.processingAddress] = storage
this.lastProcessedStorageTxHash[this.processingAddress] = this.processingHash
} catch (e) {
console.log(e)
}
}
}
}
if (previousopcode && previousopcode.op === 'SHA3') {
const preimage = this.getSha3Input(previousopcode.stack, previousopcode.memory)
const imageHash = step.stack[step.stack.length - 1].replace('0x', '')
this.sha3Preimages[imageHash] = {
preimage: preimage
}
}
this.processingIndex++
this.previousDepth = depth
}
getCode (address, cb) {
address = toChecksumAddress(address)
this.vm.stateManager.getContractCode(Address.fromString(address)).then((result) => {
cb(null, bufferToHex(result))
}).catch((error) => {
cb(error)
})
}
setProvider (provider) {}
traceTransaction (txHash, options, cb) { | cb(null, this.vmTraces[txHash])
}
return this.vmTraces[txHash]
}
if (cb) {
cb('unable to retrieve traces ' + txHash, null)
}
}
storageRangeAt (blockNumber, txIndex, address, start, maxLength, cb) {
// we don't use the range params here
address = toChecksumAddress(address)
let txHash
if (txIndex === 'latest') {
txHash = this.lastProcessedStorageTxHash[address]
} else {
const block = this.vmContext.blocks[blockNumber]
txHash = '0x' + block.transactions[txIndex].hash().toString('hex')
}
if (this.storageCache[txHash] && this.storageCache[txHash][address]) {
const storage = this.storageCache[txHash][address]
return cb(null, {
storage: JSON.parse(JSON.stringify(storage)),
nextKey: null
})
}
// Before https://github.com/ethereum/remix-project/pull/1703, it used to throw error as
// 'unable to retrieve storage ' + txIndex + ' ' + address
cb(null, { storage: {} })
}
getBlockNumber (cb) { cb(null, 'vm provider') }
getTransaction (txHash, cb) {
if (this.txs[txHash]) {
if (cb) {
cb(null, this.txs[txHash])
}
return this.txs[txHash]
}
if (cb) {
cb('unable to retrieve tx ' + txHash, null)
}
}
getTransactionReceipt (txHash, cb) {
// same as getTransaction but return the created address also
if (this.txsReceipt[txHash]) {
if (cb) {
cb(null, this.txsReceipt[txHash])
}
return this.txsReceipt[txHash]
}
if (cb) {
cb('unable to retrieve txReceipt ' + txHash, null)
}
}
getTransactionFromBlock (blockNumber, txIndex, cb) {
const mes = 'not supposed to be needed by remix in vmmode'
console.log(mes)
if (cb) {
cb(mes, null)
}
}
preimage (hashedKey, cb) {
hashedKey = hashedKey.replace('0x', '')
cb(null, this.sha3Preimages[hashedKey] !== undefined ? this.sha3Preimages[hashedKey].preimage : null)
}
getSha3Input (stack, memory) {
let memoryStart = stack[stack.length - 1]
let memoryLength = stack[stack.length - 2]
const memStartDec = (new BN(memoryStart.replace('0x', ''), 16)).toString(10)
memoryStart = parseInt(memStartDec) * 2
const memLengthDec = (new BN(memoryLength.replace('0x', ''), 16).toString(10))
memoryLength = parseInt(memLengthDec) * 2
let i = Math.floor(memoryStart / 32)
const maxIndex = Math.floor(memoryLength / 32) + i
if (!memory[i]) {
return this.emptyFill(memoryLength)
}
let sha3Input = memory[i].slice(memoryStart - 32 * i)
i++
while (i < maxIndex) {
sha3Input += memory[i] ? memory[i] : this.emptyFill(32)
i++
}
if (sha3Input.length < memoryLength) {
const leftSize = memoryLength - sha3Input.length
sha3Input += memory[i] ? memory[i].slice(0, leftSize) : this.emptyFill(leftSize)
}
return sha3Input
}
emptyFill (size) {
return (new Array(size)).join('0')
}
} | if (this.vmTraces[txHash]) {
if (cb) { |
dvorak.rs | #[crate_id="keyboard-layout-dvorak#1.0"];
#[feature(globs)];
extern mod keyboard_key = "keyboard-key";
pub mod keyboard {
pub mod layout {
pub mod dvorak {
use keyboard_key::keyboard::key::*;
static translateToChar:[char, ..173] = [
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '`', '1', '2', '3', '4', '5', '6', '7',
'8', '9', '0', '[', ']', '\x00', '\x00', '\x00', '\x00', '\x00',
'/', '*', '-', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\'', ',', '.', 'p', 'y',
'f', 'g', 'c', 'r', 'l', '/', '=', '\\', '\x00', '\x00',
'\x00', '7', '8', '9', '+', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', 'a', 'o', 'e',
'u', 'i', 'd', 'h', 't', 'n', 's', '-', '\x00', '4',
'5', '6', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', ';',
'q', 'j', 'k', 'x', 'b', 'm', 'w', 'v', 'z', '\x00',
'\x00', '\x00', '1', '2', '3', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', ' ', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '0', '.'
];
static translateToCharShift:[char, ..173] = [
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '~', '!', '@', '#', '$', '%', '^', '&',
'*', '(', ')', '{', '}', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '"', '<', '>', 'P', 'Y',
'F', 'G', 'C', 'R', 'L', '?', '+', '|', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', 'A', 'O', 'E',
'U', 'I', 'D', 'H', 'T', 'N', 'S', '_', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', ':',
'Q', 'J', 'K', 'X', 'B', 'M', 'W', 'V', 'Z', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00', ' ', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',
'\x00', '\x00', '\x00'
];
pub fn translate(key: Key, dead: u32) -> u32 |
}
}
}
| {
if (!key.shift() && !key.alt() && !key.control()) {
if (dead != 0) {
// XXX: use unicode combine
translateToChar[key.code] as u32
}
else {
translateToChar[key.code] as u32
}
}
else if (key.shift() && !key.alt() && !key.control()) {
if (dead != 0) {
// XXX: use unicode combine
translateToCharShift[key.code] as u32
}
else {
translateToCharShift[key.code] as u32
}
}
else {
0
}
} |
generic_duplicate_param_use3.rs | // revisions: min_tait full_tait
#![feature(min_type_alias_impl_trait)]
#![cfg_attr(full_tait, feature(type_alias_impl_trait))]
//[full_tait]~^ WARN incomplete |
// test that unused generic parameters are ok
type Two<T, U> = impl Debug;
fn one<T: Debug>(t: T) -> Two<T, T> {
//~^ ERROR non-defining opaque type use in defining scope
t
}
fn two<T: Debug, U>(t: T, _: U) -> Two<T, U> {
t
}
fn three<T, U: Debug>(_: T, u: U) -> Two<T, U> {
u
} |
use std::fmt::Debug;
fn main() {} |
bidir.rs | use crate::traits::{self, EmitResult};
use std::{borrow::Cow, cell::RefCell, collections::VecDeque, rc::Rc};
struct InnerRef<'parent, Tin, Tout> {
inq: &'parent mut VecDeque<Tin>,
outq: &'parent mut VecDeque<Tout>,
}
/// Non-thread-safe, reference-counted,
/// bidirectional event queue,
/// designed for `1:1` communication,
/// thus, it doesn't support multi-casting.
///
/// The first type parameter describes the
/// events which the primary peer receives,
/// the second type parameter describes the
/// events which the secondary peer receives.
#[derive(Clone, Debug)]
pub struct Queue<Tp, Ts>(pub(crate) Rc<RefCell<(VecDeque<Tp>, VecDeque<Ts>)>>);
/// The "other" end of the bidirectional [`Queue`](crate::bidir::Queue)
#[derive(Clone, Debug)]
pub struct Secondary<Tp, Ts>(Queue<Tp, Ts>);
impl<Tp, Ts> Default for Queue<Tp, Ts> {
fn default() -> Self {
Queue(Rc::new(RefCell::new((VecDeque::new(), VecDeque::new()))))
}
}
impl<Tp, Ts> Queue<Tp, Ts> {
#[inline]
pub fn | () -> Self {
Default::default()
}
/// This function returns the "other" end of the bidirectional `Queue`
///
/// NOTE: multiple calls to this method on the same queue
/// return wrapped references to the same [`Secondary`](crate::bidir::Secondary).
#[inline]
pub fn secondary(&self) -> Secondary<Tp, Ts> {
Secondary(Queue(Rc::clone(&self.0)))
}
fn on_queues_mut<F, R>(&self, f: F) -> R
where
F: FnOnce(InnerRef<Tp, Ts>) -> R,
{
let inner = &mut *self.0.borrow_mut();
f(InnerRef { inq: &mut inner.0, outq: &mut inner.1 })
}
}
impl<Tp, Ts> Secondary<Tp, Ts> {
fn on_queues_mut<F, R>(&self, f: F) -> R
where
F: FnOnce(InnerRef<Ts, Tp>) -> R,
{
let inner = &mut *(self.0).0.borrow_mut();
f(InnerRef { inq: &mut inner.1, outq: &mut inner.0 })
}
}
macro_rules! impl_queue_part {
($strucn:ident, $tp1:ident, $tp2:ident, $tin:ident, $tout:ident) => {
impl<$tp1, $tp2> $strucn<$tp1, $tp2> {
/// Function which iterates over the input event queue
/// and optionally schedules items to be put into the
/// outgoing event queue
pub fn bounce<F>(&self, f: F)
where
F: FnMut($tin) -> Option<$tout>,
{
self.on_queues_mut(|x| {
x.outq.extend(std::mem::replace(x.inq, VecDeque::new()).into_iter().flat_map(f))
})
}
/// This function retrieves the newest event from
/// the event queue and drops the rest.
pub fn retrieve_newest(&self) -> Option<$tin> {
self.on_queues_mut(|x| x.inq.drain(..).last())
}
}
impl<$tp1, $tp2> traits::QueueInterfaceCommon for $strucn<$tp1, $tp2> {
type Item = $tout;
#[inline]
fn buffer_is_empty(&self) -> bool {
self.on_queues_mut(|x| x.outq.is_empty())
}
}
impl<$tin, $tout: Clone> traits::Emitter for $strucn<$tp1, $tp2> {
#[inline]
fn emit<'a>(&self, event: Cow<'a, $tout>) -> EmitResult<'a, $tout> {
self.on_queues_mut(|x| x.outq.push_back(event.into_owned()));
EmitResult::Delivered
}
}
impl<$tin: Clone, $tout> traits::Listen for $strucn<$tp1, $tp2> {
type Item = $tin;
#[inline]
fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(&[Self::Item]) -> R,
{
f(&self.peek()[..])
}
#[inline]
fn map<F, R>(&self, f: F) -> Vec<R>
where
F: FnMut(&Self::Item) -> R,
{
self.on_queues_mut(|x| {
std::mem::replace(x.inq, VecDeque::new()).iter().map(f).collect()
})
}
#[inline]
fn peek(&self) -> Vec<Self::Item> {
self.on_queues_mut(|x| {
std::mem::replace(x.inq, VecDeque::new()).into_iter().collect()
})
}
#[inline]
fn with_n<F, R>(&self, n: usize, f: F) -> R
where
F: FnOnce(&[Self::Item]) -> R,
{
f(&self.peek_n(n)[..])
}
#[inline]
fn map_n<F, R>(&self, n: usize, f: F) -> Vec<R>
where
F: FnMut(&Self::Item) -> R,
{
self.on_queues_mut(|x| {
let n = n.min(x.inq.len());
x.inq.drain(0..n).collect::<Vec<_>>().iter().map(f).collect()
})
}
#[inline]
fn peek_n(&self, n: usize) -> Vec<Self::Item> {
self.on_queues_mut(|x| {
let n = n.min(x.inq.len());
x.inq.drain(0..n).collect()
})
}
}
};
}
impl_queue_part!(Queue, Tp, Ts, Tp, Ts);
impl_queue_part!(Secondary, Tp, Ts, Ts, Tp);
#[cfg(test)]
mod tests {
use crate::prelude::*;
#[test]
fn test_bidir_evq() {
let primary = super::Queue::new();
let secondary = primary.secondary();
primary.emit_owned(1);
assert_eq!(secondary.peek(), &[1]);
primary.emit_owned(2);
primary.emit_owned(3);
assert_eq!(secondary.peek(), &[2, 3]);
secondary.emit_owned(4);
secondary.emit_owned(5);
secondary.emit_owned(6);
primary.bounce(|x| Some(x + 1));
assert_eq!(secondary.peek(), &[5, 6, 7]);
}
#[test]
fn test_n_bidir_evq() {
let primary = super::Queue::new();
let secondary = primary.secondary();
primary.emit_owned(1);
assert_eq!(secondary.peek(), &[1]);
primary.emit_owned(2);
primary.emit_owned(3);
assert_eq!(secondary.peek_n(1), &[2]);
secondary.emit_owned(4);
secondary.emit_owned(5);
secondary.emit_owned(6);
primary.bounce(|x| Some(x + 1));
assert_eq!(secondary.peek_n(2), &[3, 5]);
assert_eq!(secondary.peek_n(2), &[6, 7]);
}
}
| new |
publisher.rs | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use std::os::unix::io::RawFd;
use async_trait::async_trait;
use containerd_shim_protos::api::Empty;
use containerd_shim_protos::protobuf::Message;
use containerd_shim_protos::shim::events;
use containerd_shim_protos::shim_async::{Client, Events, EventsClient};
use containerd_shim_protos::ttrpc;
use containerd_shim_protos::ttrpc::context::Context;
use containerd_shim_protos::ttrpc::r#async::TtrpcContext;
use crate::asynchronous::util::asyncify;
use crate::error::Result;
use crate::util::{any, connect, timestamp};
/// Async Remote publisher connects to containerd's TTRPC endpoint to publish events from shim.
pub struct RemotePublisher {
client: EventsClient,
}
impl RemotePublisher {
/// Connect to containerd's TTRPC endpoint asynchronously.
///
/// containerd uses `/run/containerd/containerd.sock.ttrpc` by default
pub async fn new(address: impl AsRef<str>) -> Result<RemotePublisher> {
let client = Self::connect(address).await?;
Ok(RemotePublisher {
client: EventsClient::new(client),
})
}
async fn connect(address: impl AsRef<str>) -> Result<Client> {
let addr = address.as_ref().to_string();
let fd = asyncify(move || -> Result<RawFd> {
let fd = connect(addr)?;
Ok(fd)
})
.await?;
// Client::new() takes ownership of the RawFd.
Ok(Client::new(fd))
}
/// Publish a new event.
///
/// Event object can be anything that Protobuf able serialize (e.g. implement `Message` trait).
pub async fn publish(
&self,
ctx: Context,
topic: &str,
namespace: &str,
event: impl Message,
) -> Result<()> {
let mut envelope = events::Envelope::new();
envelope.set_topic(topic.to_owned());
envelope.set_namespace(namespace.to_owned());
envelope.set_timestamp(timestamp()?);
envelope.set_event(any(event)?);
let mut req = events::ForwardRequest::new();
req.set_envelope(envelope);
self.client.forward(ctx, &req).await?;
Ok(())
}
}
#[async_trait]
impl Events for RemotePublisher {
async fn | (
&self,
_ctx: &TtrpcContext,
req: events::ForwardRequest,
) -> ttrpc::Result<Empty> {
self.client.forward(Context::default(), &req).await
}
}
#[cfg(test)]
mod tests {
use std::os::unix::io::AsRawFd;
use std::os::unix::net::UnixListener;
use std::sync::Arc;
use tokio::sync::mpsc::{channel, Sender};
use tokio::sync::Barrier;
use containerd_shim_protos::api::{Empty, ForwardRequest};
use containerd_shim_protos::events::task::TaskOOM;
use containerd_shim_protos::shim_async::create_events;
use containerd_shim_protos::ttrpc::asynchronous::Server;
use super::*;
struct FakeServer {
tx: Sender<i32>,
}
#[async_trait]
impl Events for FakeServer {
async fn forward(&self, _ctx: &TtrpcContext, req: ForwardRequest) -> ttrpc::Result<Empty> {
let env = req.get_envelope();
if env.get_topic() == "/tasks/oom" {
self.tx.send(0).await.unwrap();
} else {
self.tx.send(-1).await.unwrap();
}
Ok(Empty::default())
}
}
#[tokio::test]
async fn test_connect() {
let tmpdir = tempfile::tempdir().unwrap();
let path = format!("{}/socket", tmpdir.as_ref().to_str().unwrap());
let path1 = path.clone();
assert!(RemotePublisher::connect("a".repeat(16384)).await.is_err());
assert!(RemotePublisher::connect(&path).await.is_err());
let (tx, mut rx) = channel(1);
let server = FakeServer { tx };
let barrier = Arc::new(Barrier::new(2));
let barrier2 = barrier.clone();
let server_thread = tokio::spawn(async move {
let listener = UnixListener::bind(&path1).unwrap();
let t = Arc::new(Box::new(server) as Box<dyn Events + Send + Sync>);
let service = create_events(t);
let mut server = Server::new()
.set_domain_unix()
.add_listener(listener.as_raw_fd())
.unwrap()
.register_service(service);
std::mem::forget(listener);
server.start().await.unwrap();
barrier2.wait().await;
barrier2.wait().await;
server.shutdown().await.unwrap();
});
barrier.wait().await;
let client = RemotePublisher::new(&path).await.unwrap();
let mut msg = TaskOOM::new();
msg.set_container_id("test".to_string());
client
.publish(Context::default(), "/tasks/oom", "ns1", msg)
.await
.unwrap();
match rx.recv().await {
Some(0) => {}
_ => {
panic!("the received event is not same as published")
}
}
barrier.wait().await;
server_thread.await.unwrap();
}
}
| forward |
session.rs | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
use bytes::{Bytes, BytesMut};
use futures::future::Future;
use futures::stream::{FuturesUnordered, StreamExt};
use nix::fcntl;
use nix::fcntl::OFlag;
use nix::sys::signal::Signal;
use nix::sys::stat::{self, Mode};
use nix::sys::uio;
use nix::unistd;
use reverie::Pid;
use std::sync::Arc;
use tokio::io::AsyncWrite;
use tokio::io::AsyncWriteExt;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio::sync::MappedMutexGuard;
use tokio::sync::Mutex;
use tokio::sync::MutexGuard;
use crate::trace::ChildOp;
use super::commands::{self, *};
use super::regs::Amd64CoreRegs;
use super::response::*;
use super::Breakpoint;
use super::BreakpointType;
use super::Error;
use super::GdbRequest;
use super::Inferior;
use super::InferiorThreadId;
use super::Packet;
use super::ResumeInferior;
use super::StoppedInferior;
use std::collections::BTreeMap;
type BoxWriter = Box<dyn AsyncWrite + Unpin + Send + Sync + 'static>;
/// Gdb session manager.
/// recv commands over tcp stream
/// recv request from Tracee (new task, reap orphans, etc..)
/// Session ends when client disconnect from tcp stream.
/// (gdb) detach semantics?
pub struct Session {
/// No-ACK mode, set by gdb client.
pub no_ack_mode: bool,
/// Stream to send reply to.
pub stream_tx: BoxWriter,
/// buffer use to send data over to tcp stream
pub tx_buf: BytesMut,
/// Gdb remote protocol command notifier.
pub pkt_rx: Option<mpsc::Receiver<Packet>>,
/// buffer used by hostio.
pub bufsiz: usize,
/// Current pid used by vFile (hostio).
pub hostio_pid: Option<Pid>,
/// Inferiors managed by this session.
pub inferiors: Arc<Mutex<BTreeMap<Pid, Inferior>>>,
/// Current thread
pub current: Option<InferiorThreadId>,
/// Channel to report stop event.
// NB: even though we could use a single `gdb_stop_rx` to receive all
// stop events (mpsc), we use a `stop_rx` channel for each `inferior`
// instead, this is because `vCont;p<pid>:-1` could resume multiple
// threads hence there are could be multiple threads reporting stop
// event at the same time, causing de-sync issue. This can be mitigated
// if each inferior has its own `stop_rx` channel. As a result,
// `gdb_stop_rx` is moved after initial gdb attach, once we can create
// the first inferior.
pub gdb_stop_rx: Option<mpsc::Receiver<StoppedInferior>>,
}
struct VcontResumeResult {
/// stop reason
reason: StopReason,
/// A new inferior was created.
new_inferior: Option<Inferior>,
/// ptids must be removed, due to some tasks are exited.
ptid_to_remove: Vec<ThreadId>,
/// Switch to a new task
//
// NB: This is possible when supporting multi-threaded programs. See
// Below examples.
//
// Sending packet: $vCont;c:p2.-1#10...Packet received: T05create:p02.12;06:70d9ffffff7f0000;07:28d9ffffff7f0000;10:1138eef7ff7f0000;thread:p02.02;
// Sending packet: $vCont;c:p2.-1#10...Packet received: T05swbreak:;06:201e5cf5ff7f0000;07:f01d5cf5ff7f0000;10:0e14400000000000;thread:p02.10;
// Sending packet: $qfThreadInfo#bb...Packet received: mp02.02,p02.06,p02.08,p02.0a,p02.0c,p02.0e,p02.10,p02.12,
// Sending packet: $qsThreadInfo#c8...Packet received: l
// [New Thread 2.18]
// [Switching to Thread 2.16]
// Sending packet: $z0,40140e,1#91...Packet received: OK
// Sending packet: $z0,7ffff7fe3340,1#ce...Packet received: OK
//
// Even though gdb (client) said `[Switching to Thread 2.16]`, No packets
// was sent to the server side (such as `Hgp2.16`) hence the server side
// was completely unaware of the switching. Presumably gdb (client)
// assumed *any* thread in the same process group and read/write memory,
// but it is not necessarily true for us because we use different
// channels to communicate between gdbstub <-> reverie. As a result
// we switch current to thread `switch_to`, to simulate gdb's (client)
// (mis)behavior.
switch_to: Option<InferiorThreadId>,
}
enum HandleVcontResume {
/// vCont resume not handled, this is possible because vCont can encode
/// multiple actions, only the left-most action is used if it matches
/// a given ptid.
NotHandled,
/// vCont matches a `ptid`.
Handled(VcontResumeResult),
}
impl Session {
/// Create a new session from root task.
pub fn new(
stream_tx: BoxWriter,
pkt_rx: mpsc::Receiver<Packet>,
gdb_stop_rx: mpsc::Receiver<StoppedInferior>,
) -> Self {
Session {
no_ack_mode: false,
stream_tx,
tx_buf: BytesMut::with_capacity(0x8000),
pkt_rx: Some(pkt_rx),
hostio_pid: None,
bufsiz: 0x8000,
inferiors: Arc::new(Mutex::new(BTreeMap::new())),
current: None,
gdb_stop_rx: Some(gdb_stop_rx),
}
}
/// Get current inferior. GDB can select current inferior by `Hg<thread-id>`.
async fn with_inferior<'a, F, Fut>(&'a self, threadid: ThreadId, f: F) -> Fut::Output
where
F: FnOnce(MappedMutexGuard<'a, Inferior>) -> Fut + 'a,
Fut: Future + 'a,
{
let tid = threadid
.gettid()
.unwrap_or_else(|| threadid.getpid().unwrap());
let inferiors = self.inferiors.lock().await;
let inferior = MutexGuard::map(inferiors, |inferiors| inferiors.get_mut(&tid).unwrap());
f(inferior).await
}
/// Get current inferior. GDB can select current inferior by `Hg<thread-id>`.
async fn with_current_inferior<'a, F, Fut>(&'a self, f: F) -> Fut::Output
where
F: FnOnce(MappedMutexGuard<'a, Inferior>) -> Fut + 'a,
Fut: Future + 'a,
{
let threadid: ThreadId = self.current.unwrap().into();
self.with_inferior(threadid, f).await
}
/// create a new response writer
fn response(&self, mut tx: BytesMut) -> ResponseWriter {
ResponseWriter::new(tx.split(), self.no_ack_mode)
}
/// Detach or Kill all threads matching `threadid`.
async fn detach_or_kill(&self, threadid: ThreadId, kill: bool) -> Result<(), Error> {
let mut inferiors = self.inferiors.lock().await;
let resume = ResumeInferior {
action: if kill {
ResumeAction::Continue(Some(Signal::SIGKILL))
} else {
ResumeAction::Continue(None)
},
detach: true,
};
for (_, inferior) in inferiors.iter_mut() {
if inferior.matches(&threadid) {
inferior.notify_resume(resume).await?;
}
}
inferiors.retain(|_, inferior| !inferior.matches(&threadid));
Ok(())
}
/// handle vCont resume
async fn vcont_resume(
&self,
threadid: ThreadId,
resume: ResumeInferior,
) -> Result<HandleVcontResume, Error> {
let mut inferiors_to_resume: Vec<&mut Inferior> = Vec::new();
let mut inferiors = self.inferiors.lock().await;
match threadid.tid {
// vCont a specific ptid, such as $vCont;c:p2.2#..
IdKind::Id(tid) => {
let inferior = inferiors.get_mut(&tid).ok_or(Error::UnknownThread(tid))?;
inferiors_to_resume.push(inferior);
}
// Invalid vCont
IdKind::Any => {
return Err(Error::ThreadIdNotSpecified);
}
// vCont all threads, such as $vCont;c:p2.-1#10
IdKind::All => match threadid.pid {
IdKind::Id(pid) => {
for (_, inferior) in inferiors.iter_mut() {
if inferior.getpid() == pid {
inferiors_to_resume.push(inferior);
}
}
}
_ => return Err(Error::ThreadIdNotSpecified),
},
}
if inferiors_to_resume.is_empty() {
return Ok(HandleVcontResume::NotHandled);
}
let mut new_inferior: Option<Inferior> = None;
let mut ptid_to_remove: Vec<ThreadId> = Vec::new();
let mut switch_to: Option<InferiorThreadId> = None;
let mut inferiors_to_wait = FuturesUnordered::new();
for inferior in inferiors_to_resume {
inferior.notify_resume(resume).await?;
inferiors_to_wait.push(inferior.wait_for_stop());
}
let mut reason: Option<StopReason> = None;
while let Some(stop_reason) = inferiors_to_wait.next().await {
let mut stop_reason = stop_reason?;
match &mut stop_reason {
StopReason::ThreadExited(pid, tgid, _exit_status) => {
ptid_to_remove.push(ThreadId::pid_tid(tgid.as_raw(), pid.as_raw()));
// The thread exit event `w XX; ptid is not reported
continue;
}
StopReason::Exited(pid, _exit_staus) => {
ptid_to_remove.push(ThreadId::pid(pid.as_raw()));
}
StopReason::NewTask(new_task) => {
new_inferior = Some(match new_task.op {
ChildOp::Fork => Inferior {
id: InferiorThreadId::new(new_task.child, new_task.child),
resume_tx: new_task.resume_tx.take(),
request_tx: new_task.request_tx.take(),
stop_rx: new_task.stop_rx.take(),
resume_pending: false,
},
ChildOp::Vfork => Inferior {
id: InferiorThreadId::new(new_task.child, new_task.child),
resume_tx: new_task.resume_tx.take(),
request_tx: new_task.request_tx.take(),
stop_rx: new_task.stop_rx.take(),
resume_pending: false,
},
ChildOp::Clone => Inferior {
id: InferiorThreadId::new(new_task.child, new_task.tgid),
resume_tx: new_task.resume_tx.take(),
request_tx: new_task.request_tx.take(),
stop_rx: new_task.stop_rx.take(),
resume_pending: false,
},
});
}
StopReason::Stopped(stopped) => {
switch_to = Some(InferiorThreadId::new(stopped.pid, stopped.tgid));
}
}
reason = Some(stop_reason);
break;
}
Ok(HandleVcontResume::Handled(VcontResumeResult {
reason: reason.unwrap(),
new_inferior,
ptid_to_remove,
switch_to,
}))
}
/// handle gdb remote base command
async fn handle_base(&mut self, cmd: Base, writer: &mut ResponseWriter) -> Result<(), Error> {
match cmd {
Base::QuestionMark(_) => {
writer.put_str("S05");
}
Base::QStartNoAckMode(_) => {
self.no_ack_mode = true;
writer.put_str("OK");
}
Base::qSupported(_) => {
writer.put_str("PacketSize=8000;vContSupported+;multiprocess+;exec-events+;fork-events+;vfork-events+;QThreadEvents+;QStartNoAckMode+;swbreak+;qXfer:features:read+;qXfer:auxv:read+;");
}
Base::qXfer(request) => match request {
qXfer::FeaturesRead { offset: _, len: _ } => {
// gdb/64bit-sse.xml
writer.put_str("l<target version=\"1.0\"><architecture>i386:x86-64</architecture><feature name=\"org.gnu.gdb.i386.sse\"></feature></target>");
}
qXfer::AuxvRead { offset, len } => {
if let Some(id) = self.current {
let buffer_size = std::cmp::min(self.bufsiz, len);
let mut auxv: Vec<u8> = vec![0; buffer_size];
if let Ok(nb) = fcntl::open(
format!("/proc/{}/auxv", id.pid).as_str(),
OFlag::O_RDONLY,
Mode::from_bits_truncate(0o644),
)
.and_then(|fd| {
let nb = uio::pread(fd, &mut auxv, offset as libc::off_t)?;
let _ = unistd::close(fd);
Ok(nb)
}) {
writer.put_str("l");
writer.put_binary_encoded(&auxv[..nb]);
}
}
}
},
Base::qfThreadInfo(_) => {
writer.put_str("m");
for task in self.inferiors.lock().await.values() {
let threadid: ThreadId = task.id.into();
threadid.write_response(writer);
writer.put_str(",");
}
}
Base::qsThreadInfo(_) => {
writer.put_str("l");
}
Base::qAttached(_pid) => {
writer.put_str("0");
}
Base::QThreadEvents(_thread_events) => {
// NB: This should toggle reporting thread event, such as
// `T05Create`, but I couldn't find any examples even with
// vanilla gdbserver debugging threaded programs. gdb client
// never send this command, even after I tried to run
// `set remote thread-events on`, as described in gdb remote
// protocol doc.
writer.put_str("OK");
}
Base::qC(_) => {
if let Some(id) = self.current {
let thread_id: ThreadId = id.into();
writer.put_str("QC");
thread_id.write_response(writer);
}
}
Base::H(h) => {
match h.op {
ThreadOp::g => {
// qeury or set current threadid.
if h.id.pid == IdKind::Any && h.id.tid == IdKind::Any {
ResponseOk.write_response(writer);
} else {
h.id.try_into()
.map(|id| {
self.current = Some(id);
ResponseOk
})
.write_response(writer)
}
}
_ => {
// Hc is deprecated, others not supported.
writer.put_str("E01");
}
}
}
Base::g(_) => self
.read_registers()
.await
.map(ResponseAsHex)
.write_response(writer),
Base::G(regs) => self
.write_registers(regs.vals)
.await
.map(|_| ResponseOk)
.write_response(writer),
Base::m(m) => self
.read_inferior_memory(m.addr, m.length)
.await
.map(ResponseAsHex)
.write_response(writer),
Base::M(mem) => self
.write_inferior_memory(mem.addr, mem.length, mem.vals)
.await
.map(|_| ResponseOk)
.write_response(writer),
Base::X(mem) => self
.write_inferior_memory(mem.addr, mem.length, mem.vals)
.await
.map(|_| ResponseOk)
.write_response(writer),
// NB: detach is a resume, but we don't care about receiving
// further (gdb) stop events.
Base::D(pid) => {
let pid = pid.pid;
let threadid = pid.map_or_else(ThreadId::all, |pid| ThreadId::pid(pid.as_raw()));
self.detach_or_kill(threadid, false)
.await
.map(|_| ResponseOk)
.write_response(writer);
}
Base::z(bkpt) => {
if bkpt.ty == BreakpointType::Software {
let bkpt = Breakpoint {
ty: BreakpointType::Software,
addr: bkpt.addr,
bytecode: None,
};
self.remove_breakpoint(bkpt)
.await
.map(|_| ResponseOk)
.write_response(writer);
}
}
Base::Z(bkpt) => {
if bkpt.ty == BreakpointType::Software {
let bkpt = Breakpoint {
ty: BreakpointType::Software,
addr: bkpt.addr,
bytecode: None,
};
self.set_breakpoint(bkpt)
.await
.map(|_| ResponseOk)
.write_response(writer);
}
}
// NB: kill is a resume(SIGKILL), but we don't care about
// receiving further (gdb) stop events.
Base::vKill(pid) => {
let threadid = ThreadId::pid(pid.pid.as_raw());
self.detach_or_kill(threadid, true)
.await
.map(|_| ResponseOk)
.write_response(writer);
}
Base::vCont(vcont) => match vcont {
vCont::Query => {
writer.put_str("vCont;c;C;s;S");
}
vCont::Actions(actions) => {
// `vCont` can encode multiple actions, but we should
// resume only one matching ptid only (left-most).
while let Some((action, threadid)) = actions.first() {
let resume = match action {
ResumeAction::Step(step) => ResumeInferior {
action: ResumeAction::Step(*step),
detach: false,
},
ResumeAction::Continue(cont) => ResumeInferior {
action: ResumeAction::Continue(*cont),
detach: false,
},
not_supported => {
// Shouldn't reach here because only `c;C;s:S` are advertised.
panic!("Unsupported vCont command: {:?}", not_supported);
}
};
match self.vcont_resume(*threadid, resume).await? {
HandleVcontResume::NotHandled => {}
HandleVcontResume::Handled(VcontResumeResult {
reason,
new_inferior,
ptid_to_remove,
switch_to,
}) => {
let mut inferiors = self.inferiors.lock().await;
for ptid in ptid_to_remove {
if let Some(tid) = ptid.gettid() {
let _ = inferiors.remove(&tid);
} else {
inferiors.retain(|_, inferior| !inferior.matches(&ptid));
}
}
if let Some(new_inferior) = new_inferior {
inferiors.insert(new_inferior.gettid(), new_inferior);
}
if let Some(switch_to) = switch_to {
self.current = Some(switch_to);
}
reason.write_response(writer);
break;
}
}
}
}
},
// TODO T92309086: implement ACL for hostio.
Base::vFile(hostio) => match hostio {
vFile::Setfs(pid) => {
match pid {
Some(pid) => {
self.hostio_pid = Some(Pid::from_raw(pid));
}
None => {
self.hostio_pid = self.current.as_ref().map(|x| x.pid);
}
}
writer.put_str("F0");
}
vFile::Open(fname, flags, mode) => {
let oflag = OFlag::from_bits_truncate(flags);
let mode = Mode::from_bits_truncate(mode);
writer.put_str("F");
match fcntl::open(&fname, oflag, mode) {
Ok(fd) => writer.put_num(fd),
Err(_) => writer.put_str("-1"),
}
}
vFile::Close(fd) => {
writer.put_str(unistd::close(fd).map_or("F-1", |_| "F0"));
}
vFile::Pread(fd, count, offset) => {
let count = std::cmp::min(count as usize, self.bufsiz);
let mut buf: Vec<u8> = vec![0; count];
match uio::pread(fd, &mut buf, offset as i64) {
Ok(nb) => {
writer.put_str("F");
writer.put_num(nb);
writer.put_str(";");
writer.put_binary_encoded(&buf[..nb]);
}
Err(_) => {
writer.put_str("F-1");
}
}
}
vFile::Pwrite(fd, offset, data) => match uio::pwrite(fd, &data, offset as i64) {
Ok(nb) => {
writer.put_str("F");
writer.put_num(nb);
}
Err(_) => {
writer.put_str("F-1");
}
},
vFile::Unlink(fname) => { | }
vFile::Readlink(fname) => {
match fcntl::readlink(&fname)
.ok()
.and_then(|s| s.to_str().map(|s| s.as_bytes().to_vec()))
{
Some(bytes) => {
writer.put_str("F");
writer.put_num(bytes.len());
writer.put_str(";");
writer.put_binary_encoded(&bytes)
}
None => {
writer.put_str("F-1");
}
}
}
vFile::Fstat(fd) => {
// NB: HostioStat is not the same as FileStat.
const STAT_SIZE: usize = std::mem::size_of::<HostioStat>();
match stat::fstat(fd).ok().map(|st| {
let st: HostioStat = st.into();
let bytes: [u8; STAT_SIZE] = unsafe { std::mem::transmute(st) };
bytes
}) {
Some(bytes) => {
writer.put_str("F");
writer.put_num(STAT_SIZE);
writer.put_str(";");
writer.put_binary_encoded(&bytes);
}
None => {
writer.put_str("F-1");
}
}
}
},
}
Ok(())
}
/// handle gdb remote extended mode command
async fn handle_extended_mode(
&mut self,
cmd: ExtendedMode,
writer: &mut ResponseWriter,
) -> Result<(), Error> {
match cmd {
ExtendedMode::ExclamationMark(_) => {
writer.put_str("OK");
}
ExtendedMode::QDisableRandomization(disable_aslr) => {
// ASLR is always disabled by reverie.
if disable_aslr.val {
writer.put_str("OK");
} else {
writer.put_str("E22");
}
}
}
Ok(())
}
/// handle gdb remote monitor command
async fn handle_monitor_cmd(
&mut self,
cmd: MonitorCmd,
_writer: &mut ResponseWriter,
) -> Result<(), Error> {
match cmd {
MonitorCmd::qRcmd(_) => {
unimplemented!()
}
}
}
/// handle gdb remote section offset command
async fn handle_section_offsets(
&mut self,
cmd: SectionOffsets,
writer: &mut ResponseWriter,
) -> Result<(), Error> {
match cmd {
// should use libraries-svr4:read instead
SectionOffsets::qOffsets(_) => {
writer.put_str("");
}
}
Ok(())
}
/// handle gdb remote command
async fn handle_command(
&mut self,
cmd: commands::Command,
resp: BytesMut,
) -> Result<Bytes, Error> {
let mut writer = self.response(resp);
match cmd {
Command::Unknown(cmd) => {
tracing::info!("Unknown command: {:?}", cmd);
}
Command::Base(cmd) => self.handle_base(cmd, &mut writer).await?,
Command::ExtendedMode(cmd) => self.handle_extended_mode(cmd, &mut writer).await?,
Command::MonitorCmd(cmd) => self.handle_monitor_cmd(cmd, &mut writer).await?,
Command::SectionOffsets(cmd) => self.handle_section_offsets(cmd, &mut writer).await?,
};
Ok(writer.finish())
}
/// Handle incoming request sent over tcp stream
pub async fn run(&mut self) -> Result<(), Error> {
let cmd_rx = self.pkt_rx.take().unwrap();
let mut gdb_stop_rx = self.gdb_stop_rx.take().ok_or(Error::Detached)?;
let stop_reason = gdb_stop_rx.recv().await.ok_or(Error::Detached)?;
// set initial task as current attached task.
match stop_reason.reason {
StopReason::Stopped(stopped) => {
let id = InferiorThreadId::new(stopped.pid, stopped.tgid);
self.current = Some(id);
let mut inferior = Inferior::new(id);
inferior.request_tx = Some(stop_reason.request_tx);
inferior.resume_tx = Some(stop_reason.resume_tx);
inferior.stop_rx = Some(gdb_stop_rx);
self.inferiors.lock().await.insert(id.tid, inferior);
}
_ => unreachable!(),
}
self.handle_gdb_commands(cmd_rx).await
}
async fn handle_gdb_commands(
&mut self,
mut cmd_rx: mpsc::Receiver<Packet>,
) -> Result<(), Error> {
let mut tx_buf = BytesMut::with_capacity(0x8000);
while let Some(pkt) = cmd_rx.recv().await {
match pkt {
Packet::Ack => {}
Packet::Nack => {
panic!("client send Nack")
}
// handle interrupt
Packet::Interrupt => {}
Packet::Command(cmd) => {
tx_buf.clear();
let resp = self.handle_command(cmd, tx_buf.clone()).await?;
self.stream_tx.write_all(&resp).await.unwrap();
}
}
}
Ok(())
}
/// Set a breakpoint. must have an active inferior.
async fn set_breakpoint(&self, bkpt: Breakpoint) -> Result<(), Error> {
self.with_current_inferior(async move |inferior| {
let request_tx = inferior
.request_tx
.as_ref()
.ok_or(Error::SessionNotStarted)?;
let (reply_tx, reply_rx) = oneshot::channel();
let request = GdbRequest::SetBreakpoint(
Breakpoint {
ty: BreakpointType::Software,
addr: bkpt.addr,
bytecode: None,
},
reply_tx,
);
let _ = request_tx
.send(request)
.await
.map_err(|_| Error::GdbRequestSendError)?;
let reply = reply_rx.await.map_err(|_| Error::GdbRequestRecvError)??;
Ok(reply)
})
.await
}
async fn remove_breakpoint(&self, bkpt: Breakpoint) -> Result<(), Error> {
self.with_current_inferior(async move |inferior| {
let request_tx = inferior
.request_tx
.as_ref()
.ok_or(Error::SessionNotStarted)?;
let (reply_tx, reply_rx) = oneshot::channel();
let request = GdbRequest::RemoveBreakpoint(
Breakpoint {
ty: BreakpointType::Software,
addr: bkpt.addr,
bytecode: None,
},
reply_tx,
);
request_tx
.send(request)
.await
.map_err(|_| Error::GdbRequestSendError)?;
let reply = reply_rx.await.map_err(|_| Error::GdbRequestRecvError)??;
Ok(reply)
})
.await
}
async fn read_inferior_memory(&self, addr: u64, size: usize) -> Result<Vec<u8>, Error> {
self.with_current_inferior(async move |inferior| {
let request_tx = inferior
.request_tx
.as_ref()
.ok_or(Error::SessionNotStarted)?;
let (reply_tx, reply_rx) = oneshot::channel();
let request = GdbRequest::ReadInferiorMemory(addr, size, reply_tx);
let _ = request_tx
.send(request)
.await
.map_err(|_| Error::GdbRequestSendError)?;
let reply = reply_rx.await.map_err(|_| Error::GdbRequestRecvError)??;
Ok(reply)
})
.await
}
async fn write_inferior_memory(
&self,
addr: u64,
size: usize,
data: Vec<u8>,
) -> Result<(), Error> {
let data = data.clone();
self.with_current_inferior(async move |inferior| {
let request_tx = inferior
.request_tx
.as_ref()
.ok_or(Error::SessionNotStarted)?;
let (reply_tx, reply_rx) = oneshot::channel();
let request = GdbRequest::WriteInferiorMemory(addr, size, data, reply_tx);
let _ = request_tx
.send(request)
.await
.map_err(|_| Error::GdbRequestSendError)?;
let reply = reply_rx.await.map_err(|_| Error::GdbRequestRecvError)??;
Ok(reply)
})
.await
}
async fn read_registers(&self) -> Result<Amd64CoreRegs, Error> {
self.with_current_inferior(async move |inferior| {
let request_tx = inferior
.request_tx
.as_ref()
.ok_or(Error::SessionNotStarted)?;
let (reply_tx, reply_rx) = oneshot::channel();
let request = GdbRequest::ReadRegisters(reply_tx);
let _ = request_tx
.send(request)
.await
.map_err(|_| Error::GdbRequestSendError)?;
let reply = reply_rx.await.map_err(|_| Error::GdbRequestRecvError)??;
Ok(reply)
})
.await
}
async fn write_registers(&self, regs: Vec<u8>) -> Result<(), Error> {
self.with_current_inferior(async move |inferior| {
let regs = regs.as_slice();
let request_tx = inferior
.request_tx
.as_ref()
.ok_or(Error::SessionNotStarted)?;
let (reply_tx, reply_rx) = oneshot::channel();
let core_regs: Amd64CoreRegs =
bincode::deserialize(regs).map_err(|_| CommandParseError::MalformedRegisters)?;
let request = GdbRequest::WriteRegisters(core_regs, reply_tx);
let _ = request_tx
.send(request)
.await
.map_err(|_| Error::GdbRequestSendError)?;
let reply = reply_rx.await.map_err(|_| Error::GdbRequestRecvError)??;
Ok(reply)
})
.await
}
} | writer.put_str(unistd::unlink(&fname).map_or("F-1", |_| "F0")); |
lmschannel.go | package limedrv
import (
"fmt"
"github.com/myriadrf/limedrv/limewrap"
)
// LMSChannel is the struct that represents a Channel from a LMSDevice.
// It can be either a RX or TX Channel, defined by the field IsRX.
// It also contains the list of available antenna ports.
type LMSChannel struct {
Antennas []LMSAntenna
IsRX bool
parent *LMSDevice
parentIndex int
stream limewrap.Lms_stream_t
currentDigitalBandwidth float64
digitalFilterEnabled bool
advancedFiltering bool
}
// Enable enables this channel from the read / write callback
func (c *LMSChannel) Enable() *LMSChannel {
c.parent.EnableChannel(c.parentIndex, c.IsRX)
return c
}
// Disable disables this channel from the read / write callback
func (c *LMSChannel) Disable() *LMSChannel {
c.parent.DisableChannel(c.parentIndex, c.IsRX)
return c
}
// SetGainDB sets this channel gain in decibels
func (c *LMSChannel) SetGainDB(gain uint) *LMSChannel {
c.parent.SetGainDB(c.parentIndex, c.IsRX, gain)
return c
}
// SetGainNormalized sets the channel normalized gain. [0-1]
func (c *LMSChannel) SetGainNormalized(gain float64) *LMSChannel {
c.parent.SetGainNormalized(c.parentIndex, c.IsRX, gain)
return c
}
// GetGainDB returns the channel current gain in decibels
func (c *LMSChannel) GetGainDB() uint {
return c.parent.GetGainDB(c.parentIndex, c.IsRX)
}
// GetGainNormalized returns the channel current normalized gain. [0-1]
func (c *LMSChannel) GetGainNormalized() float64 {
return c.parent.GetGainNormalized(c.parentIndex, c.IsRX)
}
// SetLPF sets the Analog Low Pass filter bandwidth for the current channel.
func (c *LMSChannel) SetLPF(bandwidth float64) *LMSChannel {
c.parent.SetLPF(c.parentIndex, c.IsRX, bandwidth)
return c
}
// GetLPF gets the current Analog Low Pass filter bandwidth for the current channel.
func (c *LMSChannel) GetLPF() float64 {
return c.parent.GetLPF(c.parentIndex, c.IsRX)
}
// EnableLPF enables the Analog Low Pass filter for the current channel.
func (c *LMSChannel) EnableLPF() *LMSChannel {
c.parent.EnableLPF(c.parentIndex, c.IsRX)
return c
}
// DisableLPF disables the Analog Low Pass filter for the current channel.
func (c *LMSChannel) DisableLPF() *LMSChannel {
c.parent.EnableLPF(c.parentIndex, c.IsRX)
return c
}
// SetDigitalLPF sets the current channel digital filter (GFIR) to low pass with specified bandwidth.
func (c *LMSChannel) SetDigitalLPF(bandwidth float64) *LMSChannel {
c.parent.SetDigitalFilter(c.parentIndex, c.IsRX, bandwidth)
return c
}
// EnableDigitalLPF enables current channel digital filter (GFIR)
func (c *LMSChannel) EnableDigitalLPF() *LMSChannel {
c.parent.EnableDigitalFilter(c.parentIndex, c.IsRX)
return c
}
// DisableDigitalLPF disables current channel digital filter (GFIR)
func (c *LMSChannel) DisableDigitalLPF() *LMSChannel {
c.parent.DisableDigitalFilter(c.parentIndex, c.IsRX)
return c
}
// SetAntenna sets the current channel antenna port
func (c *LMSChannel) SetAntenna(idx int) *LMSChannel {
c.parent.SetAntenna(idx, c.parentIndex, c.IsRX)
return c
}
// SetAntennaByName sets the current channel antenna port by name.
// Example: LNAW
func (c *LMSChannel) SetAntennaByName(name string) *LMSChannel {
c.parent.SetAntennaByName(name, c.parentIndex, c.IsRX)
return c
}
// SetCenterFrequency sets the current channel center frequency in hertz.
func (c *LMSChannel) SetCenterFrequency(centerFrequency float64) *LMSChannel {
c.parent.SetCenterFrequency(c.parentIndex, c.IsRX, centerFrequency)
return c
}
// GetCenterFrequency returns the current channel center frequency in hertz. | func (c *LMSChannel) GetCenterFrequency() float64 {
return c.parent.GetCenterFrequency(c.parentIndex, c.IsRX)
}
// String returns a representation of the channel
func (c *LMSChannel) String() string {
var str = fmt.Sprintf("\nIs RX: %t\nAntennas: %d", c.IsRX, len(c.Antennas))
for i := 0; i < len(c.Antennas); i++ {
str = fmt.Sprintf("%s\n\t%s", str, c.Antennas[i].String())
}
return str
}
func (c *LMSChannel) start() {
if c.stream != nil {
limewrap.LMS_StartStream(c.stream)
}
}
//func (c *LMSChannel) stop() {
// if c.stream != nil {
// limewrap.LMS_StopStream(c.stream)
// }
//} | |
sheet.go | package frongo
//Sheet type, used to store CSS styles.
type Sheet struct {
Styles []Style
}
//Make a sheet out of a list of styles.
//Takes the style list as parameter.
func (s *Sheet) Make(styles ...Style) {
s.Styles = styles
}
//Put a style into the sheet.
//Takes the style as parameter.
func (s *Sheet) Put(style Style) {
s.Styles = append(s.Styles, style)
}
| s.Styles = append(s.Styles, *NewStyle(params...))
return s
} | //Put a new style into the sheet.
//Takes the selector, modifier names,
//modifier values and delimiter as parameters.
func (s *Sheet) Style(params ...string) *Sheet { |
levenshtein.go | // Package levenshtein is a Go implementation to calculate Levenshtein Distance.
//
// Implementation taken from
// https://gist.github.com/andrei-m/982927#gistcomment-1931258
package levenshtein
import "unicode/utf8"
// minLengthThreshold is the length of the string beyond which
// an allocation will be made. Strings smaller than this will be
// zero alloc.
const minLengthThreshold = 32
// ComputeDistance computes the levenshtein distance between the two
// strings passed as an argument. The return value is the levenshtein distance
//
// Works on runes (Unicode code points) but does not normalize
// the input strings. See https://blog.golang.org/normalization
// and the golang.org/x/text/unicode/norm package.
func ComputeDistance(a, b string) int {
if len(a) == 0 {
return utf8.RuneCountInString(b)
}
if len(b) == 0 {
return utf8.RuneCountInString(a)
}
if a == b {
return 0
}
// We need to convert to []rune if the strings are non-ASCII.
// This could be avoided by using utf8.RuneCountInString
// and then doing some juggling with rune indices,
// but leads to far more bounds checks. It is a reasonable trade-off.
s1 := []rune(a)
s2 := []rune(b)
// swap to save some memory O(min(a,b)) instead of O(a)
if len(s1) > len(s2) {
s1, s2 = s2, s1
}
lenS1 := len(s1)
lenS2 := len(s2)
// Init the row.
var x []uint16
if lenS1+1 > minLengthThreshold {
x = make([]uint16, lenS1+1)
} else {
// We make a small optimization here for small strings.
// Because a slice of constant length is effectively an array,
// it does not allocate. So we can re-slice it to the right length
// as long as it is below a desired threshold.
x = make([]uint16, minLengthThreshold)
x = x[:lenS1+1]
}
// we start from 1 because index 0 is already 0.
for i := 1; i < len(x); i++ {
x[i] = uint16(i)
}
// make a dummy bounds check to prevent the 2 bounds check down below.
// The one inside the loop is particularly costly.
_ = x[lenS1]
// fill in the rest
for i := 1; i <= lenS2; i++ {
prev := uint16(i)
for j := 1; j <= lenS1; j++ {
current := x[j-1] // match
if s2[i-1] != s1[j-1] {
current = min(min(x[j-1]+1, prev+1), x[j]+1)
}
x[j-1] = prev
prev = current
}
x[lenS1] = prev
}
return int(x[lenS1])
}
func min(a, b uint16) uint16 {
if a < b {
return a
} | } | return b |
clientset_generated.go | /*
Copyright 2020 AppsCode Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "go.bytebuilders.dev/crd-learner-template/client/clientset/versioned"
clusterv1alpha1 "go.bytebuilders.dev/crd-learner-template/client/clientset/versioned/typed/cluster/v1alpha1"
fakeclusterv1alpha1 "go.bytebuilders.dev/crd-learner-template/client/clientset/versioned/typed/cluster/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset |
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
var _ clientset.Interface = &Clientset{}
// ClusterV1alpha1 retrieves the ClusterV1alpha1Client
func (c *Clientset) ClusterV1alpha1() clusterv1alpha1.ClusterV1alpha1Interface {
return &fakeclusterv1alpha1.FakeClusterV1alpha1{Fake: &c.Fake}
}
| {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
} |
keys.go | // Copyright 2016, 2017 Thales e-Security, Inc
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package crypto11
import (
"crypto"
pkcs11 "github.com/miekg/pkcs11"
)
// Identify returns the ID and label for a PKCS#11 object.
//
// Either of these values may be used to retrieve the key for later use.
func (object *PKCS11Object) Identify() (id []byte, label []byte, err error) {
a := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_ID, nil),
pkcs11.NewAttribute(pkcs11.CKA_LABEL, nil),
}
if err = withSession(object.Slot, func(session *PKCS11Session) error {
a, err = instance.ctx.GetAttributeValue(session.Handle, object.Handle, a)
return err
}); err != nil {
return nil, nil, err
}
return a[0].Value, a[1].Value, nil
}
// Find a key object. For asymmetric keys this only finds one half so
// callers will call it twice.
func findKey(session *PKCS11Session, id []byte, label []byte, keyclass uint, keytype uint) (obj pkcs11.ObjectHandle, err error) {
var handles []pkcs11.ObjectHandle
var template []*pkcs11.Attribute
if keyclass != ^uint(0) {
template = append(template, pkcs11.NewAttribute(pkcs11.CKA_CLASS, keyclass))
}
if keytype != ^uint(0) {
template = append(template, pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, keytype))
}
if id != nil {
template = append(template, pkcs11.NewAttribute(pkcs11.CKA_ID, id))
}
if label != nil {
template = append(template, pkcs11.NewAttribute(pkcs11.CKA_LABEL, label))
}
if err = session.Ctx.FindObjectsInit(session.Handle, template); err != nil {
return 0, err
}
defer func() {
finalErr := session.Ctx.FindObjectsFinal(session.Handle)
if err == nil {
err = finalErr
}
}()
if handles, _, err = session.Ctx.FindObjects(session.Handle, 1); err != nil {
return 0, err
}
if len(handles) == 0 {
return 0, ErrKeyNotFound
}
return handles[0], nil
}
// FindKeyPair retrieves a previously created asymmetric key.
//
// Either (but not both) of id and label may be nil, in which case they are ignored.
func FindKeyPair(id []byte, label []byte) (crypto.PrivateKey, error) {
return FindKeyPairOnSlot(instance.slot, id, label)
}
// FindKeyPairOnSlot retrieves a previously created asymmetric key, using a specified slot.
//
// Either (but not both) of id and label may be nil, in which case they are ignored.
func FindKeyPairOnSlot(slot uint, id []byte, label []byte) (crypto.PrivateKey, error) {
var err error
var k crypto.PrivateKey
if err = ensureSessions(instance, slot); err != nil {
return nil, err
}
err = withSession(slot, func(session *PKCS11Session) error {
k, err = FindKeyPairOnSession(session, slot, id, label)
return err
})
return k, err
}
// FindKeyPairOnSession retrieves a previously created asymmetric key, using a specified session.
//
// Either (but not both) of id and label may be nil, in which case they are ignored.
func FindKeyPairOnSession(session *PKCS11Session, slot uint, id []byte, label []byte) (crypto.PrivateKey, error) {
var err error
var privHandle, pubHandle pkcs11.ObjectHandle
var pub crypto.PublicKey
if privHandle, err = findKey(session, id, label, pkcs11.CKO_PRIVATE_KEY, ^uint(0)); err != nil {
return nil, err
}
attributes := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, 0),
}
if attributes, err = session.Ctx.GetAttributeValue(session.Handle, privHandle, attributes); err != nil {
return nil, err
}
keyType := bytesToUlong(attributes[0].Value)
if pubHandle, err = findKey(session, id, label, pkcs11.CKO_PUBLIC_KEY, keyType); err != nil {
return nil, err
}
switch keyType {
case pkcs11.CKK_DSA:
if pub, err = exportDSAPublicKey(session, pubHandle); err != nil {
return nil, err
}
return &PKCS11PrivateKeyDSA{PKCS11PrivateKey{PKCS11Object{privHandle, slot}, pub}}, nil
case pkcs11.CKK_RSA:
if pub, err = exportRSAPublicKey(session, pubHandle); err != nil {
return nil, err
}
return &PKCS11PrivateKeyRSA{PKCS11PrivateKey{PKCS11Object{privHandle, slot}, pub}}, nil
case pkcs11.CKK_ECDSA:
if pub, err = exportECDSAPublicKey(session, pubHandle); err != nil {
return nil, err
}
return &PKCS11PrivateKeyECDSA{PKCS11PrivateKey{PKCS11Object{privHandle, slot}, pub}}, nil
default:
return nil, ErrUnsupportedKeyType
}
}
// Public returns the public half of a private key.
//
// This partially implements the go.crypto.Signer and go.crypto.Decrypter interfaces for
// PKCS11PrivateKey. (The remains of the implementation is in the
// key-specific types.)
func (signer PKCS11PrivateKey) Public() crypto.PublicKey {
return signer.PubKey
}
// FindKey retrieves a previously created symmetric key.
//
// Either (but not both) of id and label may be nil, in which case they are ignored.
func FindKey(id []byte, label []byte) (*PKCS11SecretKey, error) {
return FindKeyOnSlot(instance.slot, id, label)
}
// FindKeyOnSlot retrieves a previously created symmetric key, using a specified slot.
//
// Either (but not both) of id and label may be nil, in which case they are ignored.
func FindKeyOnSlot(slot uint, id []byte, label []byte) (*PKCS11SecretKey, error) {
var err error
var k *PKCS11SecretKey
if err = ensureSessions(instance, slot); err != nil {
return nil, err
}
err = withSession(slot, func(session *PKCS11Session) error {
k, err = FindKeyOnSession(session, slot, id, label)
return err
})
return k, err
}
// FindKeyOnSession retrieves a previously created symmetric key, using a specified session.
//
// Either (but not both) of id and label may be nil, in which case they are ignored.
func | (session *PKCS11Session, slot uint, id []byte, label []byte) (key *PKCS11SecretKey, err error) {
var privHandle pkcs11.ObjectHandle
if privHandle, err = findKey(session, id, label, pkcs11.CKO_SECRET_KEY, ^uint(0)); err != nil {
return
}
attributes := []*pkcs11.Attribute{
pkcs11.NewAttribute(pkcs11.CKA_KEY_TYPE, 0),
}
if attributes, err = session.Ctx.GetAttributeValue(session.Handle, privHandle, attributes); err != nil {
return
}
if cipher, ok := Ciphers[int(bytesToUlong(attributes[0].Value))]; ok {
key = &PKCS11SecretKey{PKCS11Object{privHandle, slot}, cipher}
} else {
err = ErrUnsupportedKeyType
return
}
return
}
| FindKeyOnSession |
icon.app_management-js.min.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[30],{4002:function(e,t,a){"use strict";a.r(t),a.d(t,"icon",(function(){return o}));a(12),a(4),a(2),a(6),a(3),a(10);var n=a(0),r=a.n(n);function | (){return(l=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var a=arguments[t];for(var n in a)Object.prototype.hasOwnProperty.call(a,n)&&(e[n]=a[n])}return e}).apply(this,arguments)}function i(e,t){if(null==e)return{};var a,n,r=function(e,t){if(null==e)return{};var a,n,r={},l=Object.keys(e);for(n=0;n<l.length;n++)a=l[n],t.indexOf(a)>=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n<l.length;n++)a=l[n],t.indexOf(a)>=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var o=function(e){var t=e.title,a=e.titleId,n=i(e,["title","titleId"]);return r.a.createElement("svg",l({width:32,height:32,viewBox:"0 0 32 32",xmlns:"http://www.w3.org/2000/svg","aria-labelledby":a},n),t?r.a.createElement("title",{id:a},t):null,r.a.createElement("path",{className:"euiIcon__fillSecondary",d:"M16 21a5 5 0 110-10 5 5 0 010 10zm0-8a3 3 0 100 6 3 3 0 000-6z"}),r.a.createElement("path",{d:"M20 32h-8v-4.06a1 1 0 00-1.61-.67l-2.88 2.87-5.65-5.65 2.87-2.87a.92.92 0 00.2-1 .93.93 0 00-.86-.6H0V12h4.06a.92.92 0 00.85-.58.94.94 0 00-.19-1L1.86 7.51l5.65-5.65 2.87 2.87A1 1 0 0012 4.06V0h8v4.06a1 1 0 001.61.67l2.87-2.87 5.66 5.66-2.87 2.87a.92.92 0 00-.2 1 .93.93 0 00.86.6H32v8h-4.06a.92.92 0 00-.85.58.94.94 0 00.19 1l2.87 2.87-5.66 5.66-2.87-2.87a1 1 0 00-1.61.67L20 32zm-6-2h4v-2.06a3 3 0 015-2.08l1.46 1.46 2.83-2.83L25.86 23a3 3 0 012.08-5H30v-4h-2.06a3 3 0 01-2.08-5l1.46-1.46-2.83-2.85L23 6.14a3 3 0 01-5-2.08V2h-4v2.06a3 3 0 01-5 2.08L7.51 4.69 4.69 7.51 6.14 9a3 3 0 01-2.08 5H2v4h2.06a3 3 0 012.08 5l-1.45 1.49 2.83 2.83L9 25.86a3 3 0 015 2.08V30z"}))}}}]);
//# sourceMappingURL=icon.app_management-js.min.js.map | l |
index.ts | function | (number: string): number {
return number
.split('')
.reduce((accumulator, currentValue) => accumulator + parseInt(currentValue, 10), 0);
}
function orderWeight(strng: string): string {
return strng
.split(' ')
.sort((a, b) => {
const sumA = sumDigits(a);
const sumB = sumDigits(b);
return sumA === sumB ? a.localeCompare(b) : sumA - sumB;
})
.join(' ');
}
export default orderWeight;
| sumDigits |
db.go | package db
import (
"fmt"
"github.com/gusarow4321/HelloGo/pkg/logger"
"github.com/gusarow4321/HelloGo/tinytodo/app/config"
"github.com/gusarow4321/HelloGo/tinytodo/app/db/models"
"github.com/gusarow4321/HelloGo/tinytodo/app/db/repositories"
"gorm.io/driver/postgres"
"gorm.io/gorm"
gormLogger "gorm.io/gorm/logger"
)
func InitDB(cfg config.DBConfig, logger logger.Logger) (*repositories.Repositories, error) {
dsn := fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%s sslmode=%s",
cfg.Host, cfg.User, cfg.Password, cfg.Dbname, cfg.Port, cfg.Sslmode)
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{
QueryFields: true,
Logger: gormLogger.Default.LogMode(gormLogger.Silent),
})
if err != nil |
logger.Info(nil, "DB connected")
if err = models.MigrateModels(db); err != nil {
return nil, err
}
logger.Info(nil, "Models migrated")
return repositories.NewRepositories(db), nil
}
| {
return nil, err
} |
HMSNetworkTool.ts | /*
Copyright 2020. Huawei Technologies Co., Ltd. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { asyncExec } from './utils'
import { Cookie, DomainInfo } from './HMSCommonTypes';
export function buildNetworkURL(domainHttps: DomainInfo): Promise<String> {
return asyncExec('HMSNetworkTool', 'buildNetworkURL', [domainHttps]);
}
export function | (cookie: Cookie): Promise<String> {
return asyncExec('HMSNetworkTool', 'buildNetworkCookie', [cookie]);
}
| buildNetworkCookie |
soap.py | from suds.client import Client
from suds import WebFault
from model.project import Project
class SoapHelper:
def __init__(self, app):
|
def can_login(self, username, password):
client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_project_list(self, username, password):
project_list = []
client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
projects = client.service.mc_projects_get_user_accessible(username, password)
for i in range(len(projects)):
name = projects[i].name
description = projects[i].description
project_list.append(Project(name=name, description=description))
return project_list
| self.app = app |
model_def.py | # -*- coding: utf-8 -*-
""" Model definition functions and weight loading.
"""
from __future__ import print_function, division, unicode_literals
from os.path import exists
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence
from torchMoji.torchmoji.lstm import LSTMHardSigmoid
from torchMoji.torchmoji.attlayer import Attention
from torchMoji.torchmoji.global_variables import NB_TOKENS, NB_EMOJI_CLASSES
def torchmoji_feature_encoding(weight_path, return_attention=False):
""" Loads the pretrained torchMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
weight_path: Path to model weights to be loaded.
return_attention: If true, output will include weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = TorchMoji(nb_classes=None,
nb_tokens=NB_TOKENS,
feature_output=True,
return_attention=return_attention)
load_specific_weights(model, weight_path, exclude_names=['output_layer'])
return model
def torchmoji_emojis(weight_path, return_attention=False):
""" Loads the pretrained torchMoji model for extracting features
from the penultimate feature layer. In this way, it transforms
the text into its emotional encoding.
# Arguments:
weight_path: Path to model weights to be loaded.
return_attention: If true, output will include weight of each input token
used for the prediction
# Returns:
Pretrained model for encoding text into feature vectors.
"""
model = TorchMoji(nb_classes=NB_EMOJI_CLASSES,
nb_tokens=NB_TOKENS,
return_attention=return_attention)
model.load_state_dict(torch.load(weight_path))
return model
def torchmoji_transfer(nb_classes, weight_path=None, extend_embedding=0,
embed_dropout_rate=0.1, final_dropout_rate=0.5):
""" Loads the pretrained torchMoji model for finetuning/transfer learning.
Does not load weights for the softmax layer.
Note that if you are planning to use class average F1 for evaluation,
nb_classes should be set to 2 instead of the actual number of classes
in the dataset, since binary classification will be performed on each
class individually.
Note that for the 'new' method, weight_path should be left as None.
# Arguments:
nb_classes: Number of classes in the dataset.
weight_path: Path to model weights to be loaded.
extend_embedding: Number of tokens that have been added to the
vocabulary on top of NB_TOKENS. If this number is larger than 0,
the embedding layer's dimensions are adjusted accordingly, with the
additional weights being set to random values.
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
# Returns:
Model with the given parameters.
"""
model = TorchMoji(nb_classes=nb_classes,
nb_tokens=NB_TOKENS + extend_embedding,
embed_dropout_rate=embed_dropout_rate,
final_dropout_rate=final_dropout_rate,
output_logits=True)
if weight_path is not None:
load_specific_weights(model, weight_path,
exclude_names=['output_layer'],
extend_embedding=extend_embedding)
return model
class TorchMoji(nn.Module):
def __init__(self, nb_classes, nb_tokens, feature_output=False, output_logits=False,
embed_dropout_rate=0, final_dropout_rate=0, return_attention=False):
"""
torchMoji model.
IMPORTANT: The model is loaded in evaluation mode by default (self.eval())
# Arguments:
nb_classes: Number of classes in the dataset.
nb_tokens: Number of tokens in the dataset (i.e. vocabulary size).
feature_output: If True the model returns the penultimate
feature vector rather than Softmax probabilities
(defaults to False).
output_logits: If True the model returns logits rather than probabilities
(defaults to False).
embed_dropout_rate: Dropout rate for the embedding layer.
final_dropout_rate: Dropout rate for the final Softmax layer.
return_attention: If True the model also returns attention weights over the sentence
(defaults to False).
"""
super(TorchMoji, self).__init__()
embedding_dim = 256
hidden_size = 512
attention_size = 4 * hidden_size + embedding_dim
self.feature_output = feature_output
self.embed_dropout_rate = embed_dropout_rate
self.final_dropout_rate = final_dropout_rate
self.return_attention = return_attention
self.hidden_size = hidden_size
self.output_logits = output_logits
self.nb_classes = nb_classes
self.add_module('embed', nn.Embedding(nb_tokens, embedding_dim))
# dropout2D: embedding channels are dropped out instead of words
# many exampels in the datasets contain few words that losing one or more words can alter the emotions completely
self.add_module('embed_dropout', nn.Dropout2d(embed_dropout_rate))
self.add_module('lstm_0', LSTMHardSigmoid(embedding_dim, hidden_size, batch_first=True, bidirectional=True))
self.add_module('lstm_1', LSTMHardSigmoid(hidden_size*2, hidden_size, batch_first=True, bidirectional=True))
self.add_module('attention_layer', Attention(attention_size=attention_size, return_attention=return_attention))
if not feature_output:
self.add_module('final_dropout', nn.Dropout(final_dropout_rate))
if output_logits:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1)))
else:
self.add_module('output_layer', nn.Sequential(nn.Linear(attention_size, nb_classes if self.nb_classes > 2 else 1),
nn.Softmax() if self.nb_classes > 2 else nn.Sigmoid()))
self.init_weights()
# Put model in evaluation mode by default
self.eval()
def init_weights(self):
"""
Here we reproduce Keras default initialization weights for consistency with Keras version
"""
ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)
hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
nn.init.uniform(self.embed.weight.data, a=-0.5, b=0.5)
for t in ih:
nn.init.xavier_uniform(t)
for t in hh:
nn.init.orthogonal(t)
for t in b:
nn.init.constant(t, 0)
if not self.feature_output:
nn.init.xavier_uniform(self.output_layer[0].weight.data)
def forward(self, input_seqs):
""" Forward pass.
# Arguments:
input_seqs: Can be one of Numpy array, Torch.LongTensor, Torch.Variable, Torch.PackedSequence.
# Return:
Same format as input format (except for PackedSequence returned as Variable).
"""
# Check if we have Torch.LongTensor inputs or not Torch.Variable (assume Numpy array in this case), take note to return same format
return_numpy = False
return_tensor = False
if isinstance(input_seqs, (torch.LongTensor, torch.cuda.LongTensor)):
input_seqs = Variable(input_seqs)
return_tensor = True
elif not isinstance(input_seqs, Variable):
input_seqs = Variable(torch.from_numpy(input_seqs.astype('int64')).long())
return_numpy = True
# If we don't have a packed inputs, let's pack it
reorder_output = False
if not isinstance(input_seqs, PackedSequence):
ho = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
# Reorder batch by sequence length
input_lengths = torch.LongTensor([torch.max(input_seqs[i, :].data.nonzero()) + 1 for i in range(input_seqs.size()[0])])
input_lengths, perm_idx = input_lengths.sort(0, descending=True)
input_seqs = input_seqs[perm_idx][:, :input_lengths.max()]
# Pack sequence and work on data tensor to reduce embeddings/dropout computations
packed_input = pack_padded_sequence(input_seqs, input_lengths.cpu().numpy(), batch_first=True)
reorder_output = True
else:
ho = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
co = self.lstm_0.weight_hh_l0.data.data.new(2, input_seqs.size()[0], self.hidden_size).zero_()
input_lengths = input_seqs.batch_sizes
packed_input = input_seqs
hidden = (Variable(ho, requires_grad=False), Variable(co, requires_grad=False))
# Embed with an activation function to bound the values of the embeddings
x = self.embed(packed_input.data)
x = nn.Tanh()(x)
# pyTorch 2D dropout2d operate on axis 1 which is fine for us
x = self.embed_dropout(x)
# Update packed sequence data for RNN
packed_input = PackedSequence(x, packed_input.batch_sizes)
# skip-connection from embedding to output eases gradient-flow and allows access to lower-level features
# ordering of the way the merge is done is important for consistency with the pretrained model
lstm_0_output, _ = self.lstm_0(packed_input, hidden)
lstm_1_output, _ = self.lstm_1(lstm_0_output, hidden)
# Update packed sequence data for attention layer
packed_input = PackedSequence(torch.cat((lstm_1_output.data,
lstm_0_output.data,
packed_input.data), dim=1),
packed_input.batch_sizes)
input_seqs, _ = pad_packed_sequence(packed_input, batch_first=True)
x, att_weights = self.attention_layer(input_seqs, input_lengths)
# output class probabilities or penultimate feature vector
if not self.feature_output:
x = self.final_dropout(x)
outputs = self.output_layer(x)
else:
outputs = x
# Reorder output if needed
if reorder_output:
reorered = Variable(outputs.data.new(outputs.size()))
reorered[perm_idx] = outputs
outputs = reorered
# Adapt return format if needed
if return_tensor:
outputs = outputs.data
if return_numpy:
outputs = outputs.data.numpy()
if self.return_attention:
return outputs, att_weights
else:
return outputs
def | (model, weight_path, exclude_names=[], extend_embedding=0, verbose=True):
""" Loads model weights from the given file path, excluding any
given layers.
# Arguments:
model: Model whose weights should be loaded.
weight_path: Path to file containing model weights.
exclude_names: List of layer names whose weights should not be loaded.
extend_embedding: Number of new words being added to vocabulary.
verbose: Verbosity flag.
# Raises:
ValueError if the file at weight_path does not exist.
"""
if not exists(weight_path):
raise ValueError('ERROR (load_weights): The weights file at {} does '
'not exist. Refer to the README for instructions.'
.format(weight_path))
if extend_embedding and 'embed' in exclude_names:
raise ValueError('ERROR (load_weights): Cannot extend a vocabulary '
'without loading the embedding weights.')
# Copy only weights from the temporary model that are wanted
# for the specific task (e.g. the Softmax is often ignored)
weights = torch.load(weight_path)
for key, weight in weights.items():
if any(excluded in key for excluded in exclude_names):
if verbose:
print('Ignoring weights for {}'.format(key))
continue
try:
model_w = model.state_dict()[key]
except KeyError:
raise KeyError("Weights had parameters {},".format(key)
+ " but could not find this parameters in model.")
if verbose:
print('Loading weights for {}'.format(key))
# extend embedding layer to allow new randomly initialized words
# if requested. Otherwise, just load the weights for the layer.
if 'embed' in key and extend_embedding > 0:
weight = torch.cat((weight, model_w[NB_TOKENS:, :]), dim=0)
if verbose:
print('Extended vocabulary for embedding layer ' +
'from {} to {} tokens.'.format(
NB_TOKENS, NB_TOKENS + extend_embedding))
try:
model_w.copy_(weight)
except:
print('While copying the weigths named {}, whose dimensions in the model are'
' {} and whose dimensions in the saved file are {}, ...'.format(
key, model_w.size(), weight.size()))
raise
| load_specific_weights |
persistentvolumestatus.go | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
import (
v1 "k8s.io/api/core/v1"
)
// PersistentVolumeStatusApplyConfiguration represents an declarative configuration of the PersistentVolumeStatus type for use
// with apply.
type PersistentVolumeStatusApplyConfiguration struct {
Phase *v1.PersistentVolumePhase `json:"phase,omitempty"`
Message *string `json:"message,omitempty"`
Reason *string `json:"reason,omitempty"`
}
// PersistentVolumeStatusApplyConfiguration constructs an declarative configuration of the PersistentVolumeStatus type for use with
// apply.
func | () *PersistentVolumeStatusApplyConfiguration {
return &PersistentVolumeStatusApplyConfiguration{}
}
// WithPhase sets the Phase field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Phase field is set to the value of the last call.
func (b *PersistentVolumeStatusApplyConfiguration) WithPhase(value v1.PersistentVolumePhase) *PersistentVolumeStatusApplyConfiguration {
b.Phase = &value
return b
}
// WithMessage sets the Message field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Message field is set to the value of the last call.
func (b *PersistentVolumeStatusApplyConfiguration) WithMessage(value string) *PersistentVolumeStatusApplyConfiguration {
b.Message = &value
return b
}
// WithReason sets the Reason field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Reason field is set to the value of the last call.
func (b *PersistentVolumeStatusApplyConfiguration) WithReason(value string) *PersistentVolumeStatusApplyConfiguration {
b.Reason = &value
return b
}
| PersistentVolumeStatus |
test_2018_d10.py | """TEST MODULE TEMPLATE"""
from advent_of_code.utils.parse import parse_star_vectors
from advent_of_code.y2018.d10 import solution_1
def test_solution_1():
| example_input = """position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>"""
solution_1(parse_star_vectors(example_input))
assert 1 == 0 |
|
usecase.go | package wiremock
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/prongbang/wiremock/pkg/api/core"
"github.com/prongbang/wiremock/pkg/config"
"github.com/prongbang/wiremock/pkg/status"
)
type UseCase interface {
CasesMatching(r *http.Request, path string, cases map[string]Cases, params Parameters) CaseMatching
ParameterMatching(params Parameters) Matching
GetMockResponse(resp Response) []byte
ReadSourceRouteYml(routeName string) []byte
}
type useCase struct {
}
func (u *useCase) CasesMatching(r *http.Request, path string, cases map[string]Cases, params Parameters) CaseMatching {
// Get request
body := map[string]interface{}{}
_ = json.NewDecoder(r.Body).Decode(&body)
// Process header matching
require := map[string]interface{}{}
errors := map[string]interface{}{}
matchingHeader := 0
for k, v := range params.ReqBody.Mock {
vs := fmt.Sprintf("%v", v)
ks := fmt.Sprintf("%v", params.ReqHeader.Http[k])
if vs == ks {
matchingHeader = matchingHeader + 1
continue
}
if params.ReqHeader.Http[k] == nil {
errors[k] = "Require header " + k
} else {
errors[k] = "The header " + k + " not match"
}
}
if len(errors) > 0 {
require["errors"] = errors
}
require["message"] = "validation error"
require["status"] = "error"
result, err := json.Marshal(require)
if err != nil {
result = []byte("{}")
}
matchingHeaderRequest := len(params.ReqBody.Mock) == matchingHeader
// Process body matching
matchingBodyRequest := false
var foundCase Cases
for _, vMock := range cases {
matchingBody := 0
vMock.Response.FileName = path
if len(body) == 0 {
body = core.BindCaseBody(vMock.Body, r)
}
for ck, cv := range vMock.Body {
vs := fmt.Sprintf("%v", cv)
ks := fmt.Sprintf("%v", body[ck])
// Check require field value is not empty
if vs == "*" {
if body[ck] != nil {
matchingBody = matchingBody + 1
}
}
// Value matching
if vs == ks {
matchingBody = matchingBody + 1
}
}
// Contains value
matchingBodyRequest = len(vMock.Body) == matchingBody
if matchingBodyRequest {
foundCase = vMock
break
}
}
return CaseMatching{
IsMatch: matchingBodyRequest && matchingHeaderRequest,
Result: result,
Case: foundCase,
}
}
func (u *useCase) ParameterMatching(params Parameters) Matching {
require := map[string]interface{}{}
errors := map[string]interface{}{}
matchingHeader := 0
matchingBody := 0
for k, v := range params.ReqBody.Mock {
vs := fmt.Sprintf("%v", v)
ks := fmt.Sprintf("%v", params.ReqBody.Http[k])
if vs == ks {
matchingBody = matchingBody + 1
continue
}
if params.ReqBody.Http[k] == nil {
errors[k] = "Require " + k
} else {
errors[k] = "The " + k + " not match"
}
}
for k, v := range params.ReqHeader.Mock {
vs := fmt.Sprintf("%v", v)
ks := fmt.Sprintf("%v", params.ReqHeader.Http[k])
if vs == ks {
matchingHeader = matchingHeader + 1
continue
}
if params.ReqHeader.Http[k] == nil {
errors[k] = "Require header " + k
} else {
errors[k] = "The header " + k + " not match"
}
}
if len(errors) > 0 {
require["errors"] = errors
require["message"] = "validation error"
require["status"] = "error"
}
result, err := json.Marshal(require)
if err != nil {
result = []byte("{}")
}
isMatchHeader := len(params.ReqHeader.Mock) == matchingHeader
isMatchBody := len(params.ReqBody.Mock) == matchingBody
return Matching{
Result: result,
IsMatch: isMatchBody && isMatchHeader,
}
}
func (u *useCase) GetMockResponse(resp Response) []byte {
if resp.BodyFile != "" {
bodyFile := fmt.Sprintf(config.MockResponsePath, resp.FileName, resp.BodyFile)
source, err := ioutil.ReadFile(bodyFile)
if err != nil {
return []byte("{}")
}
return source
}
return []byte(resp.Body)
}
func (u *useCase) ReadSourceRouteYml(routeName string) []byte {
pattern := status.Pattern() | }
return source
}
func NewUseCase() UseCase {
return &useCase{}
} | filename := fmt.Sprintf(config.MockRouteYmlPath, routeName)
source, err := ioutil.ReadFile(filename)
if err != nil {
panic(pattern) |
dataframe.py | # Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import datetime
import decimal
import functools
import voluptuous
from werkzeug import datastructures
from cloudkitty.utils import json
from cloudkitty.utils import tz as tzutils
from cloudkitty.utils import validation as vutils
# NOTE(peschk_l): qty and price are converted to strings to avoid
# floating-point conversion issues:
# Decimal(0.121) == Decimal('0.12099999999999999644728632119')
# Decimal(str(0.121)) == Decimal('0.121')
DATAPOINT_SCHEMA = voluptuous.Schema({
voluptuous.Required('vol'): {
voluptuous.Required('unit'): vutils.get_string_type(),
voluptuous.Required('qty'): voluptuous.Coerce(str),
},
voluptuous.Required('rating', default={}): {
voluptuous.Required('price', default=0):
voluptuous.Coerce(str),
},
voluptuous.Required('groupby'): vutils.DictTypeValidator(str, str),
voluptuous.Required('metadata'): vutils.DictTypeValidator(str, str),
})
_DataPointBase = collections.namedtuple(
"DataPoint",
field_names=("unit", "qty", "price", "groupby", "metadata"))
class DataPoint(_DataPointBase):
def __new__(cls, unit, qty, price, groupby, metadata):
return _DataPointBase.__new__(
cls,
unit or "undefined",
# NOTE(peschk_l): avoids floating-point issues.
decimal.Decimal(str(qty) if isinstance(qty, float) else qty),
decimal.Decimal(str(price) if isinstance(price, float) else price),
datastructures.ImmutableDict(groupby),
datastructures.ImmutableDict(metadata),
)
def set_price(self, price):
"""Sets the price of the DataPoint and returns a new object."""
return self._replace(price=price)
def as_dict(self, legacy=False, mutable=False):
"""Returns a dict representation of the object.
The returned dict is immutable by default and has the
following format::
{
"vol": {
"unit": "GiB",
"qty": 1.2,
},
"rating": {
"price": 0.04,
},
"groupby": {
"group_one": "one",
"group_two": "two",
},
"metadata": {
"attr_one": "one",
"attr_two": "two",
},
}
The dict can also be returned in the legacy (v1 storage) format. In
that case, `groupby` and `metadata` will be removed and merged together
into the `desc` key.
:param legacy: Defaults to False. If True, returned dict is in legacy
format.
:type legacy: bool
:param mutable: Defaults to False. If True, returns a normal dict
instead of an ImmutableDict.
:type mutable: bool
"""
output = {
"vol": {
"unit": self.unit,
"qty": self.qty,
},
"rating": {
"price": self.price,
},
"groupby": dict(self.groupby) if mutable else self.groupby,
"metadata": dict(self.metadata) if mutable else self.metadata,
}
if legacy:
desc = output.pop("metadata")
desc.update(output.pop("groupby"))
output['desc'] = desc
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
"""Returns a json representation of the dict returned by `as_dict`.
:param legacy: Defaults to False. If True, returned dict is in legacy
format.
:type legacy: bool
:rtype: str
"""
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
"""Returns a new DataPoint instance build from a dict.
:param dict_: Dict to build the DataPoint from
:type dict_: dict
:param legacy: Set to true to convert the dict to a the new format
before validating it.
:rtype: DataPoint
"""
try:
if legacy:
dict_['groupby'] = dict_.pop('desc')
dict_['metadata'] = {}
valid = DATAPOINT_SCHEMA(dict_)
return cls(
unit=valid["vol"]["unit"],
qty=valid["vol"]["qty"],
price=valid["rating"]["price"],
groupby=valid["groupby"],
metadata=valid["metadata"],
)
except (voluptuous.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataPoint: {}".format(dict_, e))
@property
def desc(self):
output = dict(self.metadata)
output.update(self.groupby)
return datastructures.ImmutableDict(output)
DATAFRAME_SCHEMA = voluptuous.Schema({
voluptuous.Required('period'): {
voluptuous.Required('begin'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
voluptuous.Required('end'): voluptuous.Any(
datetime.datetime, voluptuous.Coerce(tzutils.dt_from_iso)),
},
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, DataPoint.from_dict),
})
class DataFrame(object):
__slots__ = ("start", "end", "_usage")
def __init__(self, start, end, usage=None):
if not isinstance(start, datetime.datetime):
raise TypeError(
'"start" must be of type datetime.datetime, not {}'.format(
type(start)))
if not isinstance(end, datetime.datetime):
raise TypeError(
'"end" must be of type datetime.datetime, not {}'.format(
type(end)))
if usage is not None and not isinstance(usage, dict):
raise TypeError(
'"usage" must be a dict, not {}'.format(type(usage)))
self.start = start
self.end = end
self._usage = collections.OrderedDict()
if usage:
for key in sorted(usage.keys()):
self.add_points(usage[key], key)
def as_dict(self, legacy=False, mutable=False):
output = {
"period": {"begin": self.start, "end": self.end},
"usage": {
key: [v.as_dict(legacy=legacy, mutable=mutable) for v in val]
for key, val in self._usage.items()
},
}
return output if mutable else datastructures.ImmutableDict(output)
def json(self, legacy=False):
return json.dumps(self.as_dict(legacy=legacy, mutable=True))
@classmethod
def from_dict(cls, dict_, legacy=False):
try:
schema = DATAFRAME_SCHEMA
if legacy:
validator = functools.partial(DataPoint.from_dict, legacy=True)
# NOTE(peschk_l): __name__ is required for voluptuous exception
# message formatting
validator.__name__ = 'DataPoint.from_dict'
# NOTE(peschk_l): In case the legacy format is required, we
# create a new schema where DataPoint.from_dict is called with
# legacy=True. The "extend" method does create a new objects,
# and replaces existing keys with new ones.
schema = DATAFRAME_SCHEMA.extend({
voluptuous.Required('usage'): vutils.IterableValuesDict(
str, validator
),
})
valid = schema(dict_)
return cls(
valid["period"]["begin"],
valid["period"]["end"],
usage=valid["usage"])
except (voluptuous.error.Invalid, KeyError) as e:
raise ValueError("{} isn't a valid DataFrame: {}".format(dict_, e))
def | (self, points, type_):
"""Adds multiple points to the DataFrame
:param points: DataPoints to add.
:type point: list of DataPoints
"""
if type_ in self._usage:
self._usage[type_] += points
else:
self._usage[type_] = points
def add_point(self, point, type_):
"""Adds a single point to the DataFrame
:param point: DataPoint to add.
:type point: DataPoint
"""
if type_ in self._usage:
self._usage[type_].append(point)
else:
self._usage[type_] = [point]
def iterpoints(self):
"""Iterates over all datapoints of the dataframe.
Yields (type, point) tuples.
:rtype: (str, DataPoint)
"""
for type_, points in self._usage.items():
for point in points:
yield type_, point
def itertypes(self):
"""Iterates over all types of the dataframe.
Yields (type, (point, )) tuples.
:rtype: (str, (DataPoint, ))
"""
for type_, points in self._usage.items():
yield type_, points
def __repr__(self):
return 'DataFrame(metrics=[{}])'.format(','.join(self._usage.keys()))
| add_points |
rotate.go | package cmd
import (
"errors"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/sts"
cloudAWS "github.com/buzzsurfr/cloudkey/cloud/aws"
"github.com/spf13/cobra"
)
// rotateCmd represents the rotate command
var rotateCmd = &cobra.Command{
Use: "rotate",
Short: "Rotate the cloud access key",
Long: `Rotate uses the "active" access key (or the access key found with the --profile
option) to request a new access key, applies the access key locally, then uses
the new access key to remove the old access key.
Rotate will replace the access key in the same destination as the source, so
environment variables are replaced or the config file (credentials file) is
modified.`,
Run: rotateFunc,
}
func rotateFunc(cmd *cobra.Command, args []string) {
// fmt.Println("rotate called")
var p cloudAWS.Profile
var err error
if profileName != "" {
p, err = cloudAWS.GetByName(profileName)
} else {
p, err = cloudAWS.Current()
}
if err != nil {
panic(err)
}
// fmt.Printf("Profile: %s\n", p.String())
err = p.NewSession()
if err != nil {
panic(err)
}
// List Access Keys
userName, err := SessionUserName(p.Session)
if err != nil {
panic(err)
}
// Get Access Keys
oldIamSvc := iam.New(p.Session)
result, err := oldIamSvc.ListAccessKeys(&iam.ListAccessKeysInput{
UserName: aws.String(userName),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case iam.ErrCodeNoSuchEntityException:
fmt.Println(iam.ErrCodeNoSuchEntityException, aerr.Error())
case iam.ErrCodeServiceFailureException:
fmt.Println(iam.ErrCodeServiceFailureException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
// fmt.Printf("ListAccessKeys: %+v\n", result)
if len(result.AccessKeyMetadata) != 1 {
fmt.Println("Too many access keys")
return
}
// Create new access key
newAccessKey, err := oldIamSvc.CreateAccessKey(&iam.CreateAccessKeyInput{
UserName: aws.String(userName),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case iam.ErrCodeNoSuchEntityException:
fmt.Println(iam.ErrCodeNoSuchEntityException, aerr.Error())
case iam.ErrCodeLimitExceededException:
fmt.Println(iam.ErrCodeLimitExceededException, aerr.Error())
case iam.ErrCodeServiceFailureException:
fmt.Println(iam.ErrCodeServiceFailureException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
// fmt.Printf("CreateAccessKey: %+v\n", newAccessKey)
// Create new credential from access key
cred, err := cloudAWS.FromAccessKey(*newAccessKey.AccessKey)
if err != nil {
panic(err)
}
// Save old access key
// oldSess := p.Session
oldCred := p.Cred
// Save cred to profile
p.UpdateCredential(cred)
// Create new AWS session
err = p.NewSession()
if err != nil {
panic(err)
}
// Sleep for 15 seconds to allow access key to activate
time.Sleep(15 * time.Second)
// Deactivate old access key using new access key
newIamSvc := iam.New(p.Session)
_, err = newIamSvc.UpdateAccessKey(&iam.UpdateAccessKeyInput{
AccessKeyId: aws.String(oldCred.AccessKeyID),
Status: aws.String("Inactive"),
UserName: aws.String(userName),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case iam.ErrCodeNoSuchEntityException:
fmt.Println(iam.ErrCodeNoSuchEntityException, aerr.Error())
case iam.ErrCodeLimitExceededException:
fmt.Println(iam.ErrCodeLimitExceededException, aerr.Error())
case iam.ErrCodeServiceFailureException:
fmt.Println(iam.ErrCodeServiceFailureException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
// Delete old access key using new access key
_, err = newIamSvc.DeleteAccessKey(&iam.DeleteAccessKeyInput{
AccessKeyId: aws.String(oldCred.AccessKeyID),
UserName: aws.String(userName),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case iam.ErrCodeNoSuchEntityException:
fmt.Println(iam.ErrCodeNoSuchEntityException, aerr.Error())
case iam.ErrCodeLimitExceededException:
fmt.Println(iam.ErrCodeLimitExceededException, aerr.Error())
case iam.ErrCodeServiceFailureException:
fmt.Println(iam.ErrCodeServiceFailureException, aerr.Error())
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
}
// SessionUserName gets the user name of the current session
func | (sess *session.Session) (string, error) {
var userName string
// AWS sts:GetCallerIdentity API
svc := sts.New(sess)
result, err := svc.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return "", err
}
// Parse ARN
resultArn, err := arn.Parse(*result.Arn)
if err != nil {
return "", err
}
// Verify is a user
s := strings.Split(resultArn.Resource, "/")
if s[0] != "user" {
return "", errors.New("Not a user")
}
userName = s[1]
return userName, nil
}
func init() {
rootCmd.AddCommand(rotateCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// rotateCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// rotateCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
rotateCmd.Flags().StringVarP(&profileName, "profile", "p", "", "Profile to rotate")
}
| SessionUserName |
test_protobuf.py | import pytest
from mitmproxy import certs
from mitmproxy import http
from mitmproxy import exceptions
from mitmproxy.test import tflow, tutils
from mitmproxy.io import protobuf
class TestProtobuf:
def test_roundtrip_client(self):
c = tflow.tclient_conn()
del c.reply
c.rfile = None
c.wfile = None
pc = protobuf._dump_http_client_conn(c)
lc = protobuf._load_http_client_conn(pc)
assert c.__dict__ == lc.__dict__
def test_roundtrip_client_cert(self, tdata):
c = tflow.tclient_conn()
c.rfile = None
c.wfile = None
del c.reply
with open(tdata.path("mitmproxy/net/data/clientcert/client.pem"), "rb") as f:
d = f.read()
c.clientcert = certs.Cert.from_pem(d)
pc = protobuf._dump_http_client_conn(c)
lc = protobuf._load_http_client_conn(pc)
assert c.__dict__ == lc.__dict__
def test_roundtrip_server(self):
s = tflow.tserver_conn()
del s.reply
s.wfile = None
s.rfile = None
ps = protobuf._dump_http_server_conn(s)
ls = protobuf._load_http_server_conn(ps)
assert s.__dict__ == ls.__dict__
def test_roundtrip_server_cert(self, tdata):
s = tflow.tserver_conn()
del s.reply
s.wfile = None
s.rfile = None
with open(tdata.path("mitmproxy/net/data/text_cert"), "rb") as f:
d = f.read()
s.cert = certs.Cert.from_pem(d)
ps = protobuf._dump_http_server_conn(s)
ls = protobuf._load_http_server_conn(ps)
assert s.__dict__ == ls.__dict__
def test_roundtrip_server_via(self):
s = tflow.tserver_conn()
s.via = tflow.tserver_conn()
del s.reply
s.wfile = None
s.rfile = None
ps = protobuf._dump_http_server_conn(s)
ls = protobuf._load_http_server_conn(ps)
assert s.__dict__ == ls.__dict__
del s.via.reply
s.via.wfile = None
s.via.rfile = None
assert s.via.__dict__ == ls.via.__dict__
def test_roundtrip_http_request(self):
req = http.HTTPRequest.wrap(tutils.treq())
preq = protobuf._dump_http_request(req)
lreq = protobuf._load_http_request(preq)
assert req.__dict__ == lreq.__dict__
def | (self):
req = http.HTTPRequest.wrap(tutils.treq(content=b""))
preq = protobuf._dump_http_request(req)
lreq = protobuf._load_http_request(preq)
assert req.__dict__ == lreq.__dict__
def test_roundtrip_http_response(self):
res = http.HTTPResponse.wrap(tutils.tresp())
pres = protobuf._dump_http_response(res)
lres = protobuf._load_http_response(pres)
assert res.__dict__ == lres.__dict__
def test_roundtrip_http_response_empty_content(self):
res = http.HTTPResponse.wrap(tutils.tresp(content=b""))
pres = protobuf._dump_http_response(res)
lres = protobuf._load_http_response(pres)
assert res.__dict__ == lres.__dict__
def test_roundtrip_http_error(self):
err = tflow.terr()
perr = protobuf._dump_http_error(err)
lerr = protobuf._load_http_error(perr)
assert err.__dict__ == lerr.__dict__
def test_roundtrip_http_flow_only_req(self):
f = tflow.tflow()
f.reply = None
pf = protobuf.dumps(f)
lf = protobuf.loads(pf, "http")
assert f.__dict__ == lf.__dict__
def test_roundtrip_http_flow_res(self):
f = tflow.tflow(resp=True)
f.reply = None
pf = protobuf.dumps(f)
lf = protobuf.loads(pf, "http")
assert f.__dict__ == lf.__dict__
def test_unsupported_dumps(self):
w = tflow.twebsocketflow()
with pytest.raises(exceptions.TypeError):
protobuf.dumps(w)
def test_unsupported_loads(self):
b = b"blobs"
with pytest.raises(exceptions.TypeError):
protobuf.loads(b, 'not-http')
| test_roundtrip_http_request_empty_content |
sidebar-items.js | initSidebarItems({"struct":[["R","Register `tx_iq_gain_hw1` reader"],["TX_IQ_GAIN_COMP_GC1_R","Field `tx_iq_gain_comp_gc1` reader - "],["TX_IQ_GAIN_COMP_GC1_W","Field `tx_iq_gain_comp_gc1` writer - "],["TX_IQ_GAIN_HW1_SPEC","tx_iq_gain_hw1."],["TX_IQ_PHASE_COMP_GC1_R","Field `tx_iq_phase_comp_gc1` reader - "],["TX_IQ_PHASE_COMP_GC1_W","Field `tx_iq_phase_comp_gc1` writer - "],["W","Register `tx_iq_gain_hw1` writer"]]}); |
||
defaults.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
RegisterDefaults(scheme)
return scheme.AddDefaultingFuncs(
SetDefaults_ClusterSpec,
)
}
func SetDefaults_ClusterSpec(obj *ClusterSpec) {
if obj.Topology == nil {
obj.Topology = &TopologySpec{}
}
if obj.Topology.Masters == "" {
obj.Topology.Masters = TopologyPublic
}
if obj.Topology.Nodes == "" {
obj.Topology.Nodes = TopologyPublic
}
if obj.Topology.DNS == nil {
obj.Topology.DNS = &DNSSpec{}
}
| }
if obj.API == nil {
obj.API = &AccessSpec{}
}
if obj.API.IsEmpty() {
switch obj.Topology.Masters {
case TopologyPublic:
obj.API.DNS = &DNSAccessSpec{}
case TopologyPrivate:
obj.API.LoadBalancer = &LoadBalancerAccessSpec{}
default:
glog.Infof("unknown master topology type: %q", obj.Topology.Masters)
}
}
if obj.API.LoadBalancer != nil && obj.API.LoadBalancer.Type == "" {
obj.API.LoadBalancer.Type = LoadBalancerTypePublic
}
} | if obj.Topology.DNS.Type == "" {
obj.Topology.DNS.Type = DNSTypePublic |
simple.rs | use prometheus_exporter_base::{render_prometheus, MetricType, PrometheusMetric};
use std::fs::read_dir;
#[derive(Debug, Clone, Default)]
struct MyOptions {}
fn calculate_file_size(path: &str) -> Result<u64, std::io::Error> {
let mut total_size: u64 = 0;
for entry in read_dir(path)? { | }
Ok(total_size)
}
#[tokio::main]
async fn main() {
let addr = ([0, 0, 0, 0], 32221).into();
println!("starting exporter on {}", addr);
render_prometheus(addr, MyOptions::default(), |request, options| {
async move {
println!(
"in our render_prometheus(request == {:?}, options == {:?})",
request, options
);
let total_size_log = calculate_file_size("/var/log").unwrap();
let pc =
PrometheusMetric::new("folder_size", MetricType::Counter, "Size of the folder");
let mut s = pc.render_header();
let mut attributes = Vec::new();
attributes.push(("folder", "/var/log/"));
s.push_str(&pc.render_sample(Some(&attributes), total_size_log));
Ok(s)
}
})
.await;
} | let p = entry?.path();
if p.is_file() {
total_size += p.metadata()?.len();
} |
resource_monitoring_alert_policy_test.go | package google
import (
"fmt"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
// Stackdriver tests cannot be run in parallel otherwise they will error out with:
// Error 503: Too many concurrent edits to the project configuration. Please try again.
func TestAccMonitoringAlertPolicy(t *testing.T) |
func testAccMonitoringAlertPolicy_basic(t *testing.T) {
alertName := fmt.Sprintf("tf-test-%s", randString(t, 10))
conditionName := fmt.Sprintf("tf-test-%s", randString(t, 10))
filter := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"`
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAlertPolicyDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccMonitoringAlertPolicy_basicCfg(alertName, conditionName, "ALIGN_RATE", filter),
},
{
ResourceName: "google_monitoring_alert_policy.basic",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func testAccMonitoringAlertPolicy_update(t *testing.T) {
alertName := fmt.Sprintf("tf-test-%s", randString(t, 10))
conditionName := fmt.Sprintf("tf-test-%s", randString(t, 10))
filter1 := `metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\"`
aligner1 := "ALIGN_RATE"
filter2 := `metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\"`
aligner2 := "ALIGN_MAX"
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAlertPolicyDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccMonitoringAlertPolicy_basicCfg(alertName, conditionName, aligner1, filter1),
},
{
ResourceName: "google_monitoring_alert_policy.basic",
ImportState: true,
ImportStateVerify: true,
},
{
Config: testAccMonitoringAlertPolicy_basicCfg(alertName, conditionName, aligner2, filter2),
},
{
ResourceName: "google_monitoring_alert_policy.basic",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func testAccMonitoringAlertPolicy_full(t *testing.T) {
alertName := fmt.Sprintf("tf-test-%s", randString(t, 10))
conditionName1 := fmt.Sprintf("tf-test-%s", randString(t, 10))
conditionName2 := fmt.Sprintf("tf-test-%s", randString(t, 10))
vcrTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAlertPolicyDestroyProducer(t),
Steps: []resource.TestStep{
{
Config: testAccMonitoringAlertPolicy_fullCfg(alertName, conditionName1, conditionName2),
},
{
ResourceName: "google_monitoring_alert_policy.full",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
func testAccCheckAlertPolicyDestroyProducer(t *testing.T) func(s *terraform.State) error {
return func(s *terraform.State) error {
config := googleProviderConfig(t)
for _, rs := range s.RootModule().Resources {
if rs.Type != "google_monitoring_alert_policy" {
continue
}
name := rs.Primary.Attributes["name"]
url := fmt.Sprintf("https://monitoring.googleapis.com/v3/%s", name)
_, err := sendRequest(config, "GET", "", url, nil)
if err == nil {
return fmt.Errorf("Error, alert policy %s still exists", name)
}
}
return nil
}
}
func testAccMonitoringAlertPolicy_basicCfg(alertName, conditionName, aligner, filter string) string {
return fmt.Sprintf(`
resource "google_monitoring_alert_policy" "basic" {
display_name = "%s"
enabled = true
combiner = "OR"
conditions {
display_name = "%s"
condition_threshold {
aggregations {
alignment_period = "60s"
per_series_aligner = "%s"
}
duration = "60s"
comparison = "COMPARISON_GT"
filter = "%s"
threshold_value = "0.5"
}
}
}
`, alertName, conditionName, aligner, filter)
}
func testAccMonitoringAlertPolicy_fullCfg(alertName, conditionName1, conditionName2 string) string {
return fmt.Sprintf(`
resource "google_monitoring_alert_policy" "full" {
display_name = "%s"
combiner = "OR"
enabled = true
conditions {
display_name = "%s"
condition_threshold {
threshold_value = 50
filter = "metric.type=\"compute.googleapis.com/instance/disk/write_bytes_count\" AND resource.type=\"gce_instance\""
duration = "60s"
comparison = "COMPARISON_GT"
aggregations {
alignment_period = "60s"
per_series_aligner = "ALIGN_RATE"
cross_series_reducer = "REDUCE_MEAN"
group_by_fields = [
"metric.label.device_name",
"project",
"resource.label.instance_id",
"resource.label.zone",
]
}
trigger {
percent = 10
}
}
}
conditions {
display_name = "%s"
condition_absent {
duration = "3600s"
filter = "metric.type=\"compute.googleapis.com/instance/cpu/utilization\" AND resource.type=\"gce_instance\""
aggregations {
alignment_period = "60s"
cross_series_reducer = "REDUCE_MEAN"
per_series_aligner = "ALIGN_MEAN"
group_by_fields = [
"project",
"resource.label.instance_id",
"resource.label.zone",
]
}
trigger {
count = 1
}
}
}
documentation {
content = "test content"
mime_type = "text/markdown"
}
}
`, alertName, conditionName1, conditionName2)
}
| {
testCases := map[string]func(t *testing.T){
"basic": testAccMonitoringAlertPolicy_basic,
"full": testAccMonitoringAlertPolicy_full,
"update": testAccMonitoringAlertPolicy_update,
}
for name, tc := range testCases {
// shadow the tc variable into scope so that when
// the loop continues, if t.Run hasn't executed tc(t)
// yet, we don't have a race condition
// see https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables
tc := tc
t.Run(name, func(t *testing.T) {
tc(t)
})
}
} |
timeframe.rs | use crate::timeline::Timeline;
use std::time::{Duration, SystemTime};
/// Identify a slot in a *specific* timeframe
///
/// The slots are not comparable to others slots made on a
/// different time frame
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(
any(test, feature = "property-test-api"),
derive(test_strategy::Arbitrary)
)]
pub struct Slot(pub(crate) u64);
impl From<u64> for Slot {
fn from(slot_number: u64) -> Slot {
Slot(slot_number)
}
}
impl From<Slot> for u64 {
fn from(s: Slot) -> u64 {
s.0
}
}
/// Identify a slot in a specific timeframe and a leftover duration
#[derive(Debug)]
pub struct SlotAndDuration {
pub slot: Slot,
/// The offset of a specific time frame in
pub offset: Duration,
}
/// Time frame which is a timeline that is configured to be split in discrete slots
#[derive(Debug, Clone)]
pub struct TimeFrame {
timeline: Timeline,
pub(crate) slot_offset: Slot,
slot_duration: SlotDuration,
}
/// Duration of a slot
///
/// For now we only supports duration down to the seconds
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct SlotDuration(u64);
impl SlotDuration {
pub fn from_secs(seconds: u32) -> Self {
assert!(seconds < 600);
SlotDuration(seconds as u64)
}
pub fn to_duration(self) -> Duration {
Duration::from_secs(self.0)
}
}
impl TimeFrame {
/// Create a new time frame with a specific slot size
///
/// ```text
///
/// 0 1 2 3 4 5
/// x--------x--------x--------x--------x--------x frame ticking at per_slot
///
/// ^
/// |
/// timeline
/// ```
///
pub fn new(timeline: Timeline, per_slot: SlotDuration) -> Self {
TimeFrame {
timeline,
slot_offset: Slot(0),
slot_duration: per_slot,
}
}
/// Change time frame at a specific slot
///
/// Note this also change the beginning of this time frame, to start
///
/// ```text
/// 0 1 2 3 4 5
/// x--------x--------┳--------x--------x--------x frame ticking at SlotDuration::from_secs(9)
/// |
/// ┕---x---x---x---x---x returned frame
/// 2 3 4 5 6 7
/// ↑
/// |
/// frame.change_frame(Slot(2), SlotDuration::from_secs(4))
/// ```
///
pub fn change_frame(&self, slot: Slot, duration_per_slot: SlotDuration) -> Self {
let d = Duration::from_secs(slot.0 * self.slot_duration.0);
let new_timeline = self.timeline.advance(d);
TimeFrame {
timeline: new_timeline,
slot_offset: Slot(self.slot_offset.0 + slot.0),
slot_duration: duration_per_slot,
}
}
pub fn slot0(&self) -> Slot {
Slot(self.slot_offset.0)
}
/// Given a system time get the slot and associated duration leftover
pub fn slot_at_precise(&self, at: &SystemTime) -> Option<SlotAndDuration> {
match self.timeline.differential(at) {
None => None,
Some(t) => {
let slot_nb = t.0.as_secs() / self.slot_duration.0;
let e = slot_nb * self.slot_duration.0;
let d = t.0 - Duration::from_secs(e); // cannot wrap
Some(SlotAndDuration {
slot: Slot(self.slot_offset.0 + slot_nb),
offset: d,
})
}
}
}
/// Get the slot associated with the given system time.
///
/// It returns None if the system time doesn't represent a valid slot in this time frame, for
/// example if the system time is before the time frame starting point.
pub fn slot_at(&self, at: &SystemTime) -> Option<Slot> {
match self.timeline.differential(at) {
None => None,
Some(t) => {
let slot_nb = t.0.as_secs() / self.slot_duration.0;
Some(Slot(self.slot_offset.0 + slot_nb))
}
}
}
/// Get the system time associated with a slot on a specific timeframe
///
/// Note if the slot is not supposed to be in this reference frame, then
/// None is returned
pub fn slot_to_systemtime(&self, slot: Slot) -> Option<SystemTime> {
slot.0
.checked_sub(self.slot_offset.0)
.map(|sd| self.timeline.0 + Duration::from_secs(sd * self.slot_duration.0))
}
/// Returns slot duration value.
pub fn slot_duration(&self) -> u64 {
self.slot_duration.0
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::timeline::Timeline;
#[test]
pub fn it_works() {
let now = SystemTime::now();
let t0 = Timeline::new(now);
let f0 = SlotDuration::from_secs(5);
| {
let expected_slot = Slot(16);
let x = now + Duration::from_secs(expected_slot.0 * f0.0);
assert_eq!(tf0.slot_at(&x), Some(expected_slot));
}
let f1 = SlotDuration::from_secs(2);
let tf1_start = now + Duration::from_secs(10);
let s0 = tf0.slot_at(&tf1_start);
assert_eq!(s0, Some(Slot(2)));
let s0 = s0.unwrap();
let tf1 = tf0.change_frame(s0, f1);
assert_eq!(tf1.slot_at(&tf1_start), Some(Slot(2)));
assert_eq!(tf1.slot_at(&now), None);
let t2 = tf1_start + Duration::from_secs(10);
assert_eq!(tf1.slot_at(&t2), Some(Slot(7)));
assert_eq!(tf0.slot_at(&t2), Some(Slot(4)));
}
} | let tf0 = TimeFrame::new(t0, f0);
|
sketch.js | var canvas;
var backgroundImage, car1_img, car2_img, track;
var database, gameState;
var form, player, playerCount;
var allPlayers, car1, car2;
var cars = [];
function preload() {
backgroundImage = loadImage("./assets/background.png");
car1_img = loadImage("assets/car1.png");
car2_img = loadImage("assets/car2.png");
track = loadImage("assets/track.jpg");
}
function setup() {
canvas = createCanvas(windowWidth, windowHeight);
database = firebase.database();
game = new Game();
game.getState();
console.log(gameState)
game.start();
}
function draw() {
background(backgroundImage);
if (playerCount === 2) {
game.update(1);
}
if (gameState === 1) {
game.play();
}
} |
function windowResized() {
resizeCanvas(windowWidth, windowHeight);
} | |
csp.py | #!/usr/bin/env python3
import sys
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from html import escape
except ImportError:
sys.exit('ERROR: It seems like you are not running Python 3. '
'This script only works with Python 3!')
main_doc = '''
<!doctype html>
<html>
<head>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
<link rel="stylesheet" type="text/css" href="/style.css">
</head>
<body>
<h1>SEC Playground</h1>
<iframe src="https://sec.uni-stuttgart.de"></iframe>
<br>
<button id='mybutton'>Resize iframe</button>
<script>
$('#mybutton').click(function() {{
$('iframe').toggleClass('fullwidth');
}})
</script>
{result}
</body></html>
'''
style_doc = '''
iframe {
border: solid red 5px;
width: 400px;
height: 200px;
}
iframe.fullwidth {
width: 100%;
}
'''
class MyHandler(BaseHTTPRequestHandler):
def | (self):
url_dict = urlparse(self.path) # parse URL string into dictionary
get_dict = parse_qs(url_dict.query) # select query string from URL dictionary
if url_dict.path == '/style.css':
self.send_response(200)
self.send_header('Content-Type', 'text/css;charset=utf-8')
self.end_headers()
self.wfile.write(bytes(style_doc, 'UTF-8'))
return
self.send_response(200)
self.send_header('Content-Type', 'text/html;charset=utf-8')
self.send_header('X-XSS-Protection', '0') # disables XSS protection in the browser
self.send_header('Content-Security-policy', 'script-src \'self\' \'unsafe-inline\' code.jquery.com; frame-ancestors https://sec.uni-stuttgart.de; style-src \'self\';')
self.end_headers()
result = ''
if 'lookup' in get_dict:
lookup = get_dict['lookup'][0]
result = f'<h3>Search Results for {lookup}</h3>'
output = main_doc.format(result=result)
self.wfile.write(bytes(output, 'UTF-8'))
if __name__ == '__main__':
server = HTTPServer(('', 8081), MyHandler)
print ("Starting web server on http://localhost:8081/")
server.serve_forever()
| do_GET |
function.py | from inspect import signature
from typing import Callable, Any, List
import re
import copy
from .type import Type
class Function(Type):
def __init__(self, fn: Callable[..., Any], name: str = "anonymouse") -> None:
self.name = name
self.vars = list(signature(fn).parameters)
self.expr = "[built-in]"
self.fn = fn
self.varnum = len(signature(fn).parameters)
def __call__(self, *args, **kwds):
|
def __str__(self) -> str:
return f"{self.name}({','.join(self.vars)})={self.expr}"
class ListFunction(Function):
pattern = r"[a-zA-Z]+\(.+\)"
def __init__(self, expr: str, vars: List[str], name: str = "anonymouse") -> None:
self.name = name
self.expr = expr
self.vars = vars
self.varnum = len(vars)
from ..expression import infix_to_rpnlist
rpn_list = infix_to_rpnlist(expr)
for i in range(len(rpn_list)):
if (rpn_list[i] in vars):
rpn_list[i] = str(vars.index(rpn_list[i]))
self.rpn_list = rpn_list
def __call__(self, *args, **kwds):
res = copy.deepcopy(self.rpn_list)
for i in range(len(self.rpn_list)):
if isinstance(res[i], str) and res[i].isdigit():
res[i] = args[int(res[i])]
from ..expression import eval_rpn
return eval_rpn(res)
def subvars(self):
# a function to replace variables with there values
def f(m: re.Match):
from ..ft_global import user_vars
word = m.group().lower()
if word in user_vars and not isinstance(user_vars[word], Function):
return(str(user_vars[word]))
else:
return(m.group())
result = re.sub(r"[a-zA-Z]+", f, self.expr)
return result.strip()
def __str__(self) -> str:
result = self.subvars()
return f"{self.name}({','.join(self.vars)}) = {result}"
| return self.fn(*args, **kwds) |
eventlogger_types.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/bakito/k8s-event-logger-operator/version"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EventLoggerSpec defines the desired state of EventLogger
type EventLoggerSpec struct {
// Kinds the kinds to log the events for
// +kubebuilder:validation:MinItems=1
Kinds []Kind `json:"kinds,omitempty"`
// EventTypes the event types to log. If empty all events are logged.
// +kubebuilder:validation:MinItems=0
EventTypes []string `json:"eventTypes,omitempty"`
// Labels additional labels for the logger pod
Labels map[string]string `json:"labels,omitempty" validate:"k8s-label-keys,k8s-label-values"`
// Labels additional annotations for the logger pod
Annotations map[string]string `json:"annotations,omitempty" validate:"k8s-annotation-keys"`
// ScrapeMetrics if true, prometheus scrape annotations are added to the pod
ScrapeMetrics *bool `json:"scrapeMetrics,omitempty"`
// namespace the namespace to watch on, may be an empty string
// +nullable
// +optional
Namespace *string `json:"namespace,omitempty"`
// ServiceAccount the service account to use for the logger pod
ServiceAccount string `json:"serviceAccount,omitempty"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty" validate:"k8s-label-keys,k8s-label-values"`
// LogFields fields ot the event to be logged.
LogFields []LogField `json:"logFields,omitempty"`
}
// Kind defines a kind to log events for
type Kind struct {
// +kubebuilder:validation:MinLength=3
Name string `json:"name"`
// +optional
// +nullable
ApiGroup *string `json:"apiGroup,omitempty"`
// EventTypes the event types to log. If empty events are logged as defined in spec.
// +kubebuilder:validation:MinItems=0
EventTypes []string `json:"eventTypes,omitempty"`
// Reasons the event reasons to log. If empty events with any reasons are logged.
// +kubebuilder:validation:MinItems=0
Reasons []string `json:"reasons,omitempty"`
// MatchingPatterns optional regex pattern that must be contained in the message to be logged
// +kubebuilder:validation:MinItems=0
MatchingPatterns []string `json:"matchingPatterns,omitempty"`
// SkipOnMatch skip the entry if matched
SkipOnMatch *bool `json:"skipOnMatch,omitempty"`
}
// LogField defines a log field
type LogField struct {
// name of the log field
Name string `json:"name"`
// Path within the corev1.Event struct https://github.com/kubernetes/api/blob/master/core/v1/types.go
// +kubebuilder:validation:MinItems=1
Path []string `json:"path,omitempty"`
// Value a static value of the log field. Can be uses to add static log fields
// +optional
// +nullable
Value *string `json:"value,omitempty"`
}
// EventLoggerStatus defines the observed state of EventLogger
type EventLoggerStatus struct {
// OperatorVersion the version of the operator that processed the cr
OperatorVersion string `json:"operatorVersion"`
// LastProcessed the timestamp the cr was last processed
LastProcessed metav1.Time `json:"lastProcessed"`
// Hash
Hash string `json:"hash,omitempty"`
// Error
Error string `json:"error,omitempty"`
}
// +kubebuilder:object:root=true
// EventLogger is the Schema for the eventloggers API
type EventLogger struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec EventLoggerSpec `json:"spec,omitempty"`
Status EventLoggerStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// EventLoggerList contains a list of EventLogger
type EventLoggerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []EventLogger `json:"items"`
}
func | () {
SchemeBuilder.Register(&EventLogger{}, &EventLoggerList{})
}
// Apply update the status of the current event logger
func (in *EventLogger) Apply(err error) {
if err != nil {
in.Status.Error = err.Error()
} else {
in.Status.Error = ""
}
in.Status.LastProcessed = metav1.Now()
in.Status.OperatorVersion = version.Version
}
| init |
vocab_element.py | # -*- coding: utf-8 -*-
import abc
__author__ = "Patrick Hohenecker"
__copyright__ = (
"Copyright (c) 2017, Patrick Hohenecker\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are met:\n"
"\n" | " this list of conditions and the following disclaimer in the documentation\n"
" and/or other materials provided with the distribution.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n"
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n"
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n"
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
)
__license__ = "BSD-2-Clause"
__version__ = "2017.1"
__date__ = "Nov 12, 2017"
__maintainer__ = "Patrick Hohenecker"
__email__ = "[email protected]"
__status__ = "Development"
class VocabElement(metaclass=abc.ABCMeta):
"""This is an interface that defines a basic scaffolding for classes that are used to specify some part of the
vocabulary of a knowledge graph.
"""
@property
@abc.abstractmethod
def index(self) -> int:
"""int: A unique index that identifies a ``VocabElement``."""
pass
@property
@abc.abstractmethod
def name(self) -> str:
"""str: A unique name that, just like :attr:`index`, identifies a ``VocabElement``."""
pass | "1. Redistributions of source code must retain the above copyright notice, this\n"
" list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright notice,\n" |
renew_web_site_instance.go | package green
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// RenewWebSiteInstance invokes the green.RenewWebSiteInstance API synchronously
// api document: https://help.aliyun.com/api/green/renewwebsiteinstance.html
func (client *Client) RenewWebSiteInstance(request *RenewWebSiteInstanceRequest) (response *RenewWebSiteInstanceResponse, err error) {
response = CreateRenewWebSiteInstanceResponse()
err = client.DoAction(request, response)
return
}
// RenewWebSiteInstanceWithChan invokes the green.RenewWebSiteInstance API asynchronously
// api document: https://help.aliyun.com/api/green/renewwebsiteinstance.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) RenewWebSiteInstanceWithChan(request *RenewWebSiteInstanceRequest) (<-chan *RenewWebSiteInstanceResponse, <-chan error) {
responseChan := make(chan *RenewWebSiteInstanceResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.RenewWebSiteInstance(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// RenewWebSiteInstanceWithCallback invokes the green.RenewWebSiteInstance API asynchronously
// api document: https://help.aliyun.com/api/green/renewwebsiteinstance.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) RenewWebSiteInstanceWithCallback(request *RenewWebSiteInstanceRequest, callback func(response *RenewWebSiteInstanceResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *RenewWebSiteInstanceResponse
var err error
defer close(result)
response, err = client.RenewWebSiteInstance(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// RenewWebSiteInstanceRequest is the request struct for api RenewWebSiteInstance
type RenewWebSiteInstanceRequest struct {
*requests.RpcRequest
ClientToken string `position:"Query" name:"ClientToken"`
OrderNum requests.Integer `position:"Query" name:"OrderNum"`
CommodityCode string `position:"Query" name:"CommodityCode"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
Duration requests.Integer `position:"Query" name:"Duration"`
InstanceId string `position:"Query" name:"InstanceId"`
PricingCycle string `position:"Query" name:"PricingCycle"`
OrderType string `position:"Query" name:"OrderType"`
}
// RenewWebSiteInstanceResponse is the response struct for api RenewWebSiteInstance
type RenewWebSiteInstanceResponse struct {
*responses.BaseResponse
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
OrderId string `json:"OrderId" xml:"OrderId"`
InstanceId string `json:"InstanceId" xml:"InstanceId"`
RequestId string `json:"RequestId" xml:"RequestId"`
InstanceIds InstanceIdsInRenewWebSiteInstance `json:"InstanceIds" xml:"InstanceIds"`
}
// CreateRenewWebSiteInstanceRequest creates a request to invoke RenewWebSiteInstance API
func CreateRenewWebSiteInstanceRequest() (request *RenewWebSiteInstanceRequest) {
request = &RenewWebSiteInstanceRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Green", "2017-08-23", "RenewWebSiteInstance", "green", "openAPI")
return
}
// CreateRenewWebSiteInstanceResponse creates a response to parse from RenewWebSiteInstance response
func CreateRenewWebSiteInstanceResponse() (response *RenewWebSiteInstanceResponse) | {
response = &RenewWebSiteInstanceResponse{
BaseResponse: &responses.BaseResponse{},
}
return
} |
|
catlr.py | # -*- coding: utf-8 -*-
from .misc import *
def | (e_esp, Mp_esp, Mp_folga, ts_esp, polos_MA, zeros_MA, Kp_MA, gaindc, pos_polo_c=-0.01):
"""
e_esp : erro esperado em regime permanente
Mp_esp : Overshoot máximo esperado
Mp_folga : Folga dada ao overshoot maximo esperado -> em casos de aproximações de plantas que não sejam de segunda ordem
ts_esp : Tempo de pico / tempo de subida desejado
polos_MA : Polos da planta em MA -> obtido pela função pole(G_MA)
zeros_MA : zeros da planta em MA -> obtido pela função zero(G_MA)
Kp_MA : Ganho em Malha Aberta da planta
gaindc : Ganho da planta em MA quando s->0
pos_polo_c : Posição do polo do compensador desejado
"""
# Determinando as especificações do compensador
Kp, Kc = get_kc_lr(e_esp, gaindc)
psi = get_psi(Mp_esp, Mp_folga)
Wn = get_wn(ts_esp, psi)
sigma, Wd = get_paramOrd2(psi,Wn)
print(f"Kc = {Kc}")
print(f"Kp = {Kp}")
print(f"ξ = {psi}")
print(f"Wn = {Wn}\t rad/s")
print(f"σ = {sigma}")
print(f"Wd = {Wd}")
print("*********************************************\n")
# Polos dominantes do sistema
polesDominant = get_poleDominant(sigma, Wd)
print(f"Polo dominante 1 -> {polesDominant[0]}")
print(f"Polo dominante 2 -> {polesDominant[1]}")
print("*********************************************\n")
# Determinando polo do compensador
polo_c = complex(-abs(pos_polo_c), 0)
print(f"Polo controlador -> {polo_c}")
print("*********************************************\n")
# Determinando zero do compensador
# %Kp = lim {Kc *G(s)*(s+Z_c)/(s+P_c)} quando s tende a 0
# (P_c * Kp)/(Kc * dcgain(G))
zero_c = complex(-abs(float(polo_c*(Kp /(gaindc)))), 0)
print(f"Zero controlador -> {zero_c}")
print("*********************************************\n")
# Determinando ganho do compensador, Kc, usando Condição de Modulo
Kc = get_KcByCM(polesDominant, polos_MA, zeros_MA, zero_c, polo_c, Kp_MA)
print(f"Kc = {Kc}")
print("*********************************************\n")
# Monta equação do controlador
numC = np.array([1, abs(zero_c)], dtype=float)
denC = np.array([1, abs(polo_c)], dtype=float)
C = tf(float(Kc)*numC, denC) # Controlador
print(f"Controle de atraso = Kc * (s+z)/(s+p) = ")
print(f"\t= {Kc} * (s+z)/(s+p) = \t{C}")
# Plota os locais dos polos e zeros do controlador
plot_c(polesDominant, zero_c, polo_c)
# Retorna o controlador
return C
#if __name__ == "__main__":
# Catlr = catlr(e_esp=0.01, Mp_esp=10, Mp_folga=5, ts_esp=0.01, polos_MA=[-47.93], Kp_MA=63.08, gaindc=63.08/47.93, pos_polo_c=-0.01) | catlr |
operation-call.py | #!/usr/bin/env python3
import pyperf
import pyvips
def operation_call(loops):
range_it = range(loops)
t0 = pyperf.perf_counter()
|
return pyperf.perf_counter() - t0
runner = pyperf.Runner()
runner.bench_time_func('Operation.call', operation_call) | for loops in range_it:
_ = pyvips.Operation.call('black', 10, 10) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.