file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
create.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/validation/field"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kubernetes/pkg/api/validation/genericvalidation"
"k8s.io/kubernetes/pkg/api/validation/path"
)
// RESTCreateStrategy defines the minimum validation, accepted input, and
// name generation behavior to create an object that follows Kubernetes
// API conventions.
type RESTCreateStrategy interface {
runtime.ObjectTyper
// The name generator is used when the standard GenerateName field is set.
// The NameGenerator will be invoked prior to validation.
names.NameGenerator
// NamespaceScoped returns true if the object must be within a namespace.
NamespaceScoped() bool
// PrepareForCreate is invoked on create before validation to normalize
// the object. For example: remove fields that are not to be persisted,
// sort order-insensitive list fields, etc. This should not remove fields
// whose presence would be considered a validation error.
PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object)
// Validate returns an ErrorList with validation errors or nil. Validate
// is invoked after default fields in the object have been filled in
// before the object is persisted. This method should not mutate the
// object.
Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList
// Canonicalize allows an object to be mutated into a canonical form. This
// ensures that code that operates on these objects can rely on the common
// form for things like comparison. Canonicalize is invoked after
// validation has succeeded but before the object has been persisted.
// This method may mutate the object.
Canonicalize(obj runtime.Object)
}
// BeforeCreate ensures that common operations for all resources are performed on creation. It only returns
// errors that can be converted to api.Status. It invokes PrepareForCreate, then GenerateName, then Validate.
// It returns nil if the object should be created.
func BeforeCreate(strategy RESTCreateStrategy, ctx genericapirequest.Context, obj runtime.Object) error {
objectMeta, kind, kerr := objectMetaAndKind(strategy, obj)
if kerr != nil {
return kerr
}
if strategy.NamespaceScoped() {
if !ValidNamespace(ctx, objectMeta) {
return errors.NewBadRequest("the namespace of the provided object does not match the namespace sent on the request")
}
} else {
objectMeta.Namespace = metav1.NamespaceNone
}
objectMeta.DeletionTimestamp = nil
objectMeta.DeletionGracePeriodSeconds = nil
strategy.PrepareForCreate(ctx, obj)
FillObjectMetaSystemFields(ctx, objectMeta)
if len(objectMeta.GenerateName) > 0 && len(objectMeta.Name) == 0 {
objectMeta.Name = strategy.GenerateName(objectMeta.GenerateName)
}
// ClusterName is ignored and should not be saved
objectMeta.ClusterName = ""
if errs := strategy.Validate(ctx, obj); len(errs) > 0 {
|
// Now run common validation on object meta
// Do this *after* custom validation so that specific error messages are shown whenever possible
if errs := genericvalidation.ValidateObjectMeta(objectMeta, strategy.NamespaceScoped(), path.ValidatePathSegmentName, field.NewPath("metadata")); len(errs) > 0 {
return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs)
}
strategy.Canonicalize(obj)
return nil
}
// CheckGeneratedNameError checks whether an error that occurred creating a resource is due
// to generation being unable to pick a valid name.
func CheckGeneratedNameError(strategy RESTCreateStrategy, err error, obj runtime.Object) error {
if !errors.IsAlreadyExists(err) {
return err
}
objectMeta, kind, kerr := objectMetaAndKind(strategy, obj)
if kerr != nil {
return kerr
}
if len(objectMeta.GenerateName) == 0 {
return err
}
return errors.NewServerTimeoutForKind(kind.GroupKind(), "POST", 0)
}
// objectMetaAndKind retrieves kind and ObjectMeta from a runtime object, or returns an error.
func objectMetaAndKind(typer runtime.ObjectTyper, obj runtime.Object) (*metav1.ObjectMeta, schema.GroupVersionKind, error) {
objectMeta, err := metav1.ObjectMetaFor(obj)
if err != nil {
return nil, schema.GroupVersionKind{}, errors.NewInternalError(err)
}
kinds, _, err := typer.ObjectKinds(obj)
if err != nil {
return nil, schema.GroupVersionKind{}, errors.NewInternalError(err)
}
return objectMeta, kinds[0], nil
}
// NamespaceScopedStrategy has a method to tell if the object must be in a namespace.
type NamespaceScopedStrategy interface {
// NamespaceScoped returns if the object must be in a namespace.
NamespaceScoped() bool
}
|
return errors.NewInvalid(kind.GroupKind(), objectMeta.Name, errs)
}
// Custom validation (including name validation) passed
|
response.go
|
package client
import "fmt"
func SetHeader(key string, value interface{}) error {
context, _ := handlerSessions.Get(Gid())
var ack bool
return context.Call("RequestSession.Write",
&EchoPacket{context.(*Context).SessionId,
[]byte(fmt.Sprintf("%s=%v", key, value)), 40,
}, &ack)
}
|
context.WriteHeader(statusCode)
}
|
func SetHttpCode(statusCode int) {
context, _ := handlerSessions.Get(Gid())
|
_version.py
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440-pre"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "keras_ocr/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
|
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands, ))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
["describe", "--tags", "--dirty", "--always", "--long", "--match",
"%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date")
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None
}
|
"""Exception raised if a method is not valid for the current scenario."""
|
mockup_test.go
|
/*
* Copyright 2019 Nalej
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
|
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package inventory
import "github.com/onsi/ginkgo"
var _ = ginkgo.Describe("Inventory provider", func() {
sp := NewMockupInventoryProvider()
RunTest(sp)
})
|
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
|
data_translator.py
|
import dataclasses
from enum import unique
import click
import datasets
from datasets import features
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict
from src.ner_model.chunker.abstract_model import Chunker
from src.utils.utils import remove_BIE
import dataclasses
from seqeval.metrics.sequence_labeling import get_entities
from collections import defaultdict
from logging import getLogger
from src.utils.params import span_length
from hydra.utils import get_original_cwd
from hashlib import md5
import prettytable
from src.ner_model.chunker import ChunkerConfig
from omegaconf import MISSING
logger = getLogger(__name__)
@dataclasses.dataclass
class MSCConfig:
ner_dataset: str = MISSING
output_dir: str = MISSING
with_o: bool = False
chunker: ChunkerConfig = ChunkerConfig()
o_sampling_ratio: float = 1.0
# hard_o_sampling: bool = False
# o_outside_entity: bool = False
# weight_of_hard_o_for_easy_o: float = 0.5 #
from tqdm import tqdm
from collections import Counter
import random
def remove_misguided_fns(starts, ends, labels):
new_starts, new_ends, new_labels = [], [], []
misguided_tokens = set()
for s, e, l in zip(starts, ends, labels):
if l == "MISGUIDANCE":
for i in range(s, e):
misguided_tokens.add(i)
for s, e, l in zip(starts, ends, labels):
if l != "MISGUIDANCE":
if l.startswith("nc"):
span = set(range(s, e))
if span & misguided_tokens:
continue
new_starts.append(s)
new_ends.append(e)
new_labels.append(l)
return new_starts, new_ends, new_labels
def undersample_thesaurus_negatives(pre_span_classification_dataset):
label_counter = Counter(
[label for snt in pre_span_classification_dataset["labels"] for label in snt]
)
pass
positive_labels = [
label for label in label_counter.keys() if not label.startswith("nc-")
]
max_positive_count = max(label_counter[label] for label in positive_labels)
thesaurus_negative_class_sampling_ratio = {
label: max_positive_count / count
for label, count in label_counter.items()
if label != "nc-O" and label.startswith("nc-")
}
new_pre_span_classification_dataset = defaultdict(list)
pscd = pre_span_classification_dataset
for tokens, starts, ends, labels in zip(
pscd["tokens"], pscd["starts"], pscd["ends"], pscd["labels"]
):
new_starts = []
new_ends = []
new_labels = []
for s, e, l in zip(starts, ends, labels):
if (
l != "nc-O"
and l.startswith("nc-")
and random.random() > thesaurus_negative_class_sampling_ratio[l]
):
continue
new_starts.append(s)
new_ends.append(e)
new_labels.append(l)
new_pre_span_classification_dataset["tokens"].append(tokens)
new_pre_span_classification_dataset["starts"].append(new_starts)
new_pre_span_classification_dataset["ends"].append(new_ends)
new_pre_span_classification_dataset["labels"].append(new_labels)
return new_pre_span_classification_dataset
def ner_datasets_to_span_classification_datasets(
ner_datasets: datasets.DatasetDict,
data_args: MSCConfig,
enumerator: Chunker,
) -> datasets.DatasetDict:
pre_span_classification_datasets = dict()
label_names = sorted(
set(
[
remove_BIE(tag)
for tag in ner_datasets["test"].features["ner_tags"].feature.names
if tag != "O"
]
)
)
if data_args.with_o:
if "nc-O" not in label_names:
label_names = ["nc-O"] + label_names
info = datasets.DatasetInfo(
features=datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"starts": datasets.Sequence(datasets.Value("int32")),
"ends": datasets.Sequence(datasets.Value("int32")),
"labels": datasets.Sequence(datasets.ClassLabel(names=label_names)),
}
)
)
for key in ner_datasets:
pre_span_classification_dataset = defaultdict(list)
ner_tag_labels = ner_datasets[key].features["ner_tags"].feature.names
for snt in tqdm(ner_datasets[key]):
registered_chunks = set()
ner_tags = [ner_tag_labels[tag] for tag in snt["ner_tags"]]
starts = []
ends = []
labels = []
for label, s, e in get_entities(ner_tags):
starts.append(s)
ends.append(e + 1)
labels.append(label)
registered_chunks.add((s, e))
if data_args.with_o and key in {"train", "validation"}:
for s, e in enumerator.predict(snt["tokens"]):
if (
(s, e) not in registered_chunks
and data_args.o_sampling_ratio > random.random()
):
starts.append(s)
ends.append(e)
labels.append("nc-O")
starts, ends, labels = remove_misguided_fns(starts, ends, labels)
if labels:
pre_span_classification_dataset["tokens"].append(snt["tokens"])
pre_span_classification_dataset["starts"].append(starts)
pre_span_classification_dataset["ends"].append(ends)
pre_span_classification_dataset["labels"].append(labels)
# if key == "train":
# pre_span_classification_dataset = undersample_thesaurus_negatives(
# pre_span_classification_dataset
# )
pre_span_classification_datasets[key] = datasets.Dataset.from_dict(
pre_span_classification_dataset, info=info
)
return datasets.DatasetDict(pre_span_classification_datasets)
import numpy as np
def label_balancing_span_classification_datasets(
span_classification_datasets: datasets.DatasetDict, o_and_min_label_count_ratio=1
):
ret_datasets = dict()
if "test" in span_classification_datasets:
info = datasets.DatasetInfo(
features=span_classification_datasets["test"].features
)
else:
info = datasets.DatasetInfo(
features=span_classification_datasets["train"].features
)
for split_key, dataset_split in span_classification_datasets.items():
if split_key != "test":
if "labels" in dataset_split.features:
# for multi span classification datasets
span_classification_dataset = {
"tokens": [],
"starts": [],
"ends": [],
"labels": [],
}
label_count = Counter(
[l for snt in dataset_split["labels"] for l in snt]
)
min_label_count = min(label_count.values())
logger.info("min label count: %d" % min_label_count)
undersampling_ratio = {
label: min_label_count / count
for label, count in label_count.items()
}
for snt in tqdm(dataset_split):
starts = []
ends = []
labels = []
for s, e, l in zip(snt["starts"], snt["ends"], snt["labels"]):
if random.random() < undersampling_ratio[l]:
starts.append(s)
ends.append(e)
labels.append(l)
if labels:
span_classification_dataset["tokens"].append(snt["tokens"])
span_classification_dataset["starts"].append(starts)
span_classification_dataset["ends"].append(ends)
span_classification_dataset["labels"].append(labels)
ret_datasets[split_key] = datasets.Dataset.from_dict(
span_classification_dataset, info=info
)
elif "label" in dataset_split.features:
# for one span classification datasets
span_classification_dataset = {
"tokens": [],
"start": [],
"end": [],
"label": [],
}
label_names = dataset_split.features["label"].names
label_count = Counter(dataset_split["label"])
min_label_count = min(label_count.values())
logger.info("min label count: %d" % min_label_count)
undersampling_ratio = dict()
for label, count in label_count.items():
if label_names[label] == "O":
undersampling_ratio[label] = (
min_label_count / count * o_and_min_label_count_ratio
)
else:
undersampling_ratio[label] = min_label_count / count
for snt in tqdm(dataset_split):
if random.random() < undersampling_ratio[snt["label"]]:
for key, value in snt.items():
span_classification_dataset[key].append(value)
ret_datasets[split_key] = datasets.Dataset.from_dict(
span_classification_dataset, info=info
)
else:
raise NotImplementedError
else:
ret_datasets[split_key] = dataset_split
return datasets.DatasetDict(ret_datasets)
import os
from pathlib import Path
def print_label_statistics(span_classification_datasets: datasets.DatasetDict):
for split_key, dataset_split in span_classification_datasets.items():
if "label" in dataset_split.features:
label_names = dataset_split.features["label"].names
label_count = Counter([label_names[l] for l in dataset_split["label"]])
else:
pass
label_names = dataset_split.features["labels"].feature.names
label_count = Counter(
[label_names[l] for snt in dataset_split["labels"] for l in snt]
)
logger.info("label count of %s split: %s" % (split_key, label_count))
from copy import deepcopy
from typing import Dict, List
import random
def load_o_label_spans(unlabelled_corpus: Dataset, span_num: int) -> List:
# 各文から取得するスパン数を指定
# 各文に対してspan_length長のスパンをかき集めてくる
# 各文に定められた個数になるまでサンプリング
# 全体の断片から決められたスパン数になるまでサンプリング
pass
snt_num = len(unlabelled_corpus)
span_num_per_snt = int(span_num / snt_num) + 100
o_label_spans = []
for snt in unlabelled_corpus["tokens"]:
spans = [
(s, e)
for s in range(len(snt))
for e in range(s + 1, len(snt) + 1)
if e - s <= MSCConfig.span_length
]
for s, e in random.sample(spans, min(span_num_per_snt, len(spans))):
o_label_spans.append(snt[s:e])
return random.sample(o_label_spans, min(span_num, len(o_label_spans)))
import spacy
from itertools import islice
from dataclasses import MISSING, dataclass
@dataclass
class Term2CatBasedDatasetArgs:
label_balance: bool = False
pass
def load_term2cat_based_span_classification_dataset(
term2cat: Dict, unlabelled_corpus: Dataset, args: Term2CatBasedDatasetArgs
):
tokenizer = spacy.load("en_core_sci_sm")
tokenizer.remove_pipe("ner")
dataset = {"tokens": [], "start": [], "end": [], "label": []}
label_names = ["O"] + sorted(set(term2cat.values()))
dict_label_count = Counter(term2cat.values())
if args.label_balance:
over_sampling_ratio = {
l: dict_label_count.most_common()[0][1] / dict_label_count[l]
for l in dict_label_count
}
else:
over_sampling_ratio = {l: 1 for l in dict_label_count}
for term, cat in tqdm(term2cat.items()):
osr = over_sampling_ratio[cat]
tokenized_terms = tokenizer(term)
while True:
if 0 < osr < 1:
if osr > random.random():
break
elif osr <= 0:
break
dataset["tokens"].append([w.text for w in tokenized_terms])
dataset["start"].append(0)
dataset["end"].append(len(tokenized_terms))
dataset["label"].append(label_names.index(cat))
osr -= 1
if args.label_balance:
span_num = dict_label_count.most_common()[0][1]
else:
span_num = sum(dict_label_count.values())
o_labeled_spans = load_o_label_spans(unlabelled_corpus, span_num)
for span in o_labeled_spans:
dataset["tokens"].append(span)
dataset["start"].append(0)
dataset["end"].append(len(span))
dataset["label"].append(label_names.index("O"))
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"start": datasets.Value("int32"),
"end": datasets.Value("int32"),
"label": datasets.ClassLabel(names=label_names),
}
)
# new_dataset_dictに追加
return Dataset.from_dict(dataset, features=features)
def split_span_classification_dataset(datasets: Dataset):
features = datasets.features
split_num = int(len(datasets) * 0.9)
splitted_datasets = dict()
from random import shuffle
indexes = list(range(len(datasets)))
shuffle(indexes)
splitted_datasets["train"] = Dataset.from_dict(
datasets.__getitem__(indexes[:split_num]), features=features
)
splitted_datasets["validation"] = Dataset.from_dict(
datasets.__getitem__(indexes[split_num:]), features=features
)
return DatasetDict(splitted_datasets)
def join_span_classification_datasets(
main_datasets: DatasetDict, sub_datasets: DatasetDict
):
pass
new_dataset_dict = dict()
for key, split in main_datasets.items():
if key in sub_datasets:
sub_split = sub_datasets[key]
new_dataset = {feature: split[feature] for feature in split.features}
main_label_names = split.features["label"].names
sub_label_names = sub_split.features["label"].names
assert len(main_label_names) == len(sub_label_names)
assert len(split.features) == len(sub_split.features)
label_map = {
|
if feature == "label":
new_dataset[feature] += [label_map[l] for l in sub_split[feature]]
else:
new_dataset[feature] += sub_split[feature]
new_dataset_dict[key] = Dataset.from_dict(new_dataset, split.features)
else:
new_dataset_dict[key] = split
return DatasetDict(new_dataset_dict)
def log_label_ratio(msc_datasets: DatasetDict):
table = prettytable.PrettyTable(["Label", "Count", "Ratio (%)"])
pass
train_dataset = msc_datasets["train"]
label_names = train_dataset.features["labels"].feature.names
c = Counter([label for snt in train_dataset["labels"] for label in snt])
label_sum = sum(c.values())
for lid, count in c.most_common():
table.add_row([label_names[lid], count, "%.2f" % (100 * count / label_sum)])
logger.info(table.get_string())
def translate_into_msc_datasets(
ner_datasets: DatasetDict,
msc_args: MSCConfig,
enumerator: Chunker,
):
input_hash = {k: v._fingerprint for k, v in ner_datasets.items()}
input_hash["msc_args"] = str(msc_args)
input_hash["enumerator"] = str(enumerator.config)
output_dir = Path(get_original_cwd()).joinpath(
"data", "buffer", md5(str(input_hash).encode()).hexdigest()
)
logger.info("output_dir of msc_datasets: " + str(output_dir))
if not output_dir.exists():
msc_datasets = ner_datasets_to_span_classification_datasets(
ner_datasets, msc_args, enumerator
)
msc_datasets.save_to_disk(output_dir)
else:
msc_datasets = DatasetDict.load_from_disk(output_dir)
log_label_ratio(msc_datasets)
return msc_datasets
|
i: sub_label_names.index(l) for i, l in enumerate(main_label_names)
}
for feature in sub_split.features:
|
my-observation.module.ts
|
import { NgModule } from '@angular/core';
import { IonicPageModule } from 'ionic-angular';
|
import { MyObservationPage } from './my-observation';
import { ComponentsModule } from '../../../components/components.module';
import { TranslateModule } from '@ngx-translate/core';
import { AddObservationFormPageModule } from '../add-observation-form/add-observation-form.module';
@NgModule({
declarations: [
MyObservationPage,
],
imports: [
ComponentsModule,
AddObservationFormPageModule,
TranslateModule,
IonicPageModule.forChild(MyObservationPage),
],
})
export class MyObservationPageModule {}
| |
generate.go
|
// Tencent is pleased to support the open source community by making TKEStack
// available.
//
// Copyright (C) 2012-2020 Tencent. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at
//
// https://opensource.org/licenses/Apache-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
package notebook
import (
"fmt"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/tkestack/elastic-jupyter-operator/api/v1alpha1"
)
const (
defaultImage = "jupyter/base-notebook:python-3.9.7"
defaultContainerName = "notebook"
defaultPortName = "notebook"
defaultPort = 8888
LabelNotebook = "notebook"
LabelNS = "namespace"
argumentGatewayURL = "--gateway-url"
argumentNotebookToken = "--NotebookApp.token"
argumentNotebookPassword = "--NotebookApp.password"
)
type generator struct {
nb *v1alpha1.JupyterNotebook
}
// newGenerator creates a new Generator.
func newGenerator(nb *v1alpha1.JupyterNotebook) (
*generator, error) {
if nb == nil {
return nil, fmt.Errorf("the notebook is null")
}
g := &generator{
nb: nb,
}
return g, nil
}
func (g generator) DesiredDeploymentWithoutOwner() (*appsv1.Deployment, error) {
if g.nb.Spec.Template == nil && g.nb.Spec.Gateway == nil {
return nil, fmt.Errorf("no gateway and template applied")
}
podSpec := v1.PodSpec{}
podLabels := g.labels()
podAnnotations := g.annotations()
labels := g.labels()
annotations := g.annotations()
selector := &metav1.LabelSelector{
MatchLabels: labels,
}
terminationGracePeriodSeconds := int64(30)
if g.nb.Spec.Template != nil {
if g.nb.Spec.Template.Labels != nil {
for k, v := range g.nb.Spec.Template.Labels {
podLabels[k] = v
|
podAnnotations[k] = v
}
}
podSpec = completePodSpec(&g.nb.Spec.Template.Spec)
} else {
podSpec = v1.PodSpec{
Containers: []v1.Container{
{
Name: defaultContainerName,
Image: defaultImage,
ImagePullPolicy: v1.PullIfNotPresent,
TerminationMessagePath: v1.TerminationMessagePathDefault,
TerminationMessagePolicy: v1.TerminationMessageReadFile,
Args: []string{
"start-notebook.sh",
},
Ports: []v1.ContainerPort{
{
Name: defaultPortName,
ContainerPort: defaultPort,
Protocol: v1.ProtocolTCP,
},
},
},
},
RestartPolicy: v1.RestartPolicyAlways,
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
DNSPolicy: v1.DNSClusterFirst,
SecurityContext: &v1.PodSecurityContext{},
SchedulerName: v1.DefaultSchedulerName,
}
}
replicas := int32(1)
revisionHistoryLimit := int32(10)
progressDeadlineSeconds := int32(600)
maxUnavailable := intstr.FromInt(25)
d := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: g.nb.Namespace,
Name: g.nb.Name,
Labels: labels,
Annotations: annotations,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: selector,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
Annotations: podAnnotations,
},
Spec: podSpec,
},
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.DeploymentStrategyType(appsv1.RollingUpdateDaemonSetStrategyType),
RollingUpdate: &appsv1.RollingUpdateDeployment{
MaxUnavailable: &maxUnavailable,
MaxSurge: &maxUnavailable,
},
},
RevisionHistoryLimit: &revisionHistoryLimit,
ProgressDeadlineSeconds: &progressDeadlineSeconds,
},
}
if g.nb.Spec.Gateway != nil {
gatewayURL := fmt.Sprintf("http://%s.%s:%d",
g.nb.Spec.Gateway.Name, g.nb.Spec.Gateway.Namespace, defaultPort)
d.Spec.Template.Spec.Containers[0].Args = append(
d.Spec.Template.Spec.Containers[0].Args, argumentGatewayURL, gatewayURL)
}
// Set the auth configuration to notebook instance.
if g.nb.Spec.Auth != nil {
auth := g.nb.Spec.Auth
// Set the token and password to empty.
if auth.Mode == v1alpha1.ModeJupyterAuthDisable {
d.Spec.Template.Spec.Containers[0].Args = append(
d.Spec.Template.Spec.Containers[0].Args,
argumentNotebookToken, "",
argumentNotebookPassword, "",
)
} else {
if auth.Token != nil {
d.Spec.Template.Spec.Containers[0].Args = append(
d.Spec.Template.Spec.Containers[0].Args,
argumentNotebookToken, *auth.Token,
)
}
if auth.Password != nil {
d.Spec.Template.Spec.Containers[0].Args = append(
d.Spec.Template.Spec.Containers[0].Args,
argumentNotebookPassword, *auth.Password,
)
}
}
}
return d, nil
}
func (g generator) labels() map[string]string {
return map[string]string{
LabelNS: g.nb.Namespace,
LabelNotebook: g.nb.Name,
}
}
func (g generator) annotations() map[string]string {
return map[string]string{}
}
func completePodSpec(old *v1.PodSpec) v1.PodSpec {
new := old.DeepCopy()
for i := range new.Containers {
if new.Containers[i].TerminationMessagePath == "" {
new.Containers[i].TerminationMessagePath = v1.TerminationMessagePathDefault
}
if new.Containers[i].TerminationMessagePolicy == v1.TerminationMessagePolicy("") {
new.Containers[i].TerminationMessagePolicy = v1.TerminationMessageReadFile
}
if new.Containers[i].ImagePullPolicy == v1.PullPolicy("") {
new.Containers[i].ImagePullPolicy = v1.PullIfNotPresent
}
}
if new.RestartPolicy == v1.RestartPolicy("") {
new.RestartPolicy = v1.RestartPolicyAlways
}
if new.TerminationGracePeriodSeconds == nil {
d := int64(v1.DefaultTerminationGracePeriodSeconds)
new.TerminationGracePeriodSeconds = &d
}
if new.DNSPolicy == v1.DNSPolicy("") {
new.DNSPolicy = v1.DNSClusterFirst
}
if new.SecurityContext == nil {
new.SecurityContext = &v1.PodSecurityContext{}
}
if new.SchedulerName == "" {
new.SchedulerName = v1.DefaultSchedulerName
}
return *new
}
|
}
}
if g.nb.Spec.Template.Annotations != nil {
for k, v := range g.nb.Spec.Template.Annotations {
|
txn_config.py
|
from lusidtools.lpt import lpt
from lusidtools.lpt import lse
from lusidtools.lpt import stdargs
from lusidtools.lpt import txn_config_yaml as tcy
TOOLNAME = "txn_cfg"
TOOLTIP = "Get/Set the transaction configuration"
def parse(extend=None, args=None):
return (
stdargs.Parser(
"Get/Set transaction configuration", ["filename", "limit", "NODFQ"]
)
.add(
"action",
choices=("get", "set", "try"),
help="get or set the config. 'Try' can be used to validate a custom encoding",
)
.add("--raw", action="store_true", help="use raw (non custom) encoding")
.add("--json", action="store_true", help="display the json to be sent")
.add("--group", action="store_true", help="set a single group")
.add(
"--force",
action="store_true",
help="set a single group, remove existing aliases for the group",
)
.extend(extend)
.parse(args)
)
def validate_group(txn_types, group):
for txn in txn_types:
for alias in txn.aliases:
assert alias.transaction_group == group, "More than one group in the list"
def rem_groups(txn_types_old, group, arg):
def still_valid(tt):
for cand in tt.aliases:
if cand.transaction_group != group:
return True
if arg is not True:
raise AssertionError(
"Existing group detected, use '--force' to remove them"
)
return False
def clear_out_group(tt):
check = len(tt.aliases)
tt.aliases = [cand for cand in tt.aliases if cand.transaction_group != group]
if len(tt.aliases) != check and arg != True:
raise AssertionError(
"Existing group detected, use '--force' to remove them"
)
return tt
return [clear_out_group(t) for t in txn_types_old if still_valid(t)]
def
|
(txn_types_old, txn_types, arg):
group = txn_types[0].aliases[0].transaction_group
validate_group(txn_types, group)
txn_types_clean = rem_groups(txn_types_old, group, arg)
txn_types += txn_types_clean
return txn_types
def process_args(api, args):
y = tcy.TxnConfigYaml(api.models)
if args.action == "get":
def get_success(result):
y.dump(
y.TransactionSetConfigurationDataNoLinks(
result.content.transaction_configs, result.content.side_definitions
),
args.filename,
args.raw,
)
return None
return api.call.list_configuration_transaction_types().bind(get_success)
if args.action == "try":
ffs = y.load(args.filename)
y.dump(ffs, "{}-try".format(args.filename))
if args.action == "set":
def set_success(result):
print(y.get_yaml(result.content))
return None
if args.group:
txn_types = y.load(args.filename)
result = api.call.list_configuration_transaction_types()
if result.right is not None:
txn_types_old = result.right.content
else:
raise ValueError("Api call did not return correct result")
txn_types = y.load_update_str(
y.get_yaml(merge_sets(txn_types_old, txn_types, args.force))
)
else:
txn_types = y.load_update(args.filename)
# y.dump(ffs,"{}-set".format(args.filename),True)
if args.json:
print(txn_types)
return None
else:
return api.call.set_configuration_transaction_types(types=txn_types).bind(
set_success
)
# Standalone tool
def main(parse=parse):
lpt.standard_flow(parse, lse.connect, process_args)
|
merge_sets
|
jest.setupAfterEnv.ts
|
import isFunction from 'isFunction';
import './globals.d';
// eslint-disable-next-line complexity
const isDeepCopyOf = (
source: any,
clone: any
): { pass: boolean; message: () => string } => {
const queue = [[source, clone]];
outer: while (queue.length) {
// @ts-expect-error - ts thinks it may be undefined, but it's unlikely, and will fail the tests anyway if it fails.
const [source, clone] = queue.shift();
if (!source || typeof source !== 'object') {
if (!isFunction(clone)) {
if (clone !== source) {
return {
pass: false,
message: () => 'Source and clone are not identical',
};
}
}
continue;
}
if (clone === source) {
return {
pass: false,
message: () =>
`Source and clone are the same object. Expected a deep copy. ${JSON.stringify(
source
)}===${JSON.stringify(clone)}`,
};
}
|
if (
(clone && !source) ||
(source && !clone) ||
typeof source !== typeof clone
) {
return {
pass: false,
message: () =>
`Source and clone are not of the same type: ${JSON.stringify(
source
)} does not equal ${JSON.stringify(clone)}`,
};
}
if (Array.isArray(source)) {
// Short circuit
if (!Array.isArray(clone) || source.length !== clone.length) {
return {
pass: false,
message: () =>
`source and clone arrays are not identical. ${JSON.stringify(
source
)} does not equal ${JSON.stringify(clone)}`,
};
}
source.forEach((_, i) => {
queue.push([source[i], clone[i]]);
});
} else if (typeof source === 'object') {
Object.keys(source).forEach(key => queue.push([source[key], clone[key]]));
}
continue outer;
}
return { pass: true, message: () => 'success' };
};
expect.extend({
isDeepCopyOf,
});
|
// Short circuit
|
facturacion-routing.module.ts
|
import { Routes, RouterModule } from '@angular/router';
const routes: Routes = [];
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule]
})
export class FacturacionRoutingModule { }
|
import { NgModule } from '@angular/core';
|
|
main.rs
|
use configuration_bdd::lire_configuration;
use lecture_notifications::demarrer;
use native_tls::{Certificate, Identity, TlsConnector};
use postgres_native_tls::MakeTlsConnector;
use simple_signal::{self, Signal};
use std::fs;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio_postgres::Error;
mod configuration_bdd;
mod lecture_notifications;
#[tokio::main]
async fn main() -> Result<(), Error>
|
{
let operationnel = Arc::new(AtomicBool::new(true));
let r = operationnel.clone();
simple_signal::set_handler(&[Signal::Int, Signal::Term], move |signal_recu| {
println!("Signal reçu : '{:?}'", signal_recu);
r.store(false, Ordering::SeqCst);
});
lire_configuration();
let configuration_bdd = lire_configuration();
let mut connector_builder = TlsConnector::builder();
//fichier crt du serveur de base de données
let cert = fs::read(configuration_bdd.certificat_serveur.to_owned()).unwrap();
let cert = Certificate::from_pem(&cert).unwrap();
connector_builder.add_root_certificate(cert);
//fichier pfx et mot de passe du fichier pfx
if let Some(ref certificat_client) = configuration_bdd.certificat_client {
if let Some(ref mot_de_passe_certificat_client) =
configuration_bdd.mot_de_passe_certificat_client
{
let certificat_client = fs::read(certificat_client.to_owned()).unwrap();
let certificat_client =
Identity::from_pkcs12(&certificat_client, &mot_de_passe_certificat_client).unwrap();
connector_builder.identity(certificat_client);
}
}
let connecteur_tls = connector_builder.build().unwrap();
let connector = MakeTlsConnector::new(connecteur_tls);
demarrer(&operationnel, configuration_bdd, connector).await?;
Ok(())
}
|
|
messages.js
|
import { defineMessages } from 'react-intl';
export default defineMessages({
title: 'There was an error in your request:',
|
});
|
|
ConvexHull2.js
|
// Copyright 2013-2020, University of Colorado Boulder
/**
* Construction of 2D convex hulls from a list of points.
*
* For example:
* #begin canvasExample grahamScan 256x128
* #on
* var points = _.range( 50 ).map( function() {
* return new dot.Vector2( 5 + ( 256 - 10 ) * Math.random(), 5 + ( 128 - 10 ) * Math.random() );
* } );
* var hullPoints = dot.ConvexHull2.grahamScan( points, false );
* #off
* context.beginPath();
* hullPoints.forEach( function( point ) {
* context.lineTo( point.x, point.y );
* } );
* context.closePath();
* context.fillStyle = '#eee';
* context.fill();
* context.strokeStyle = '#f00';
* context.stroke();
*
* context.beginPath();
* points.forEach( function( point ) {
* context.arc( point.x, point.y, 2, 0, Math.PI * 2, false );
* context.closePath();
* } );
* context.fillStyle = '#00f';
* context.fill();
* #end canvasExample
*
* @author Jonathan Olson <[email protected]>
*/
import dot from './dot.js';
/**
* counter-clockwise turn if > 0, clockwise turn if < 0, collinear if === 0.
* @param {Vector2} p1
* @param {Vector2} p2
* @param {Vector2} p3
* @returns {number}
*/
function ccw( p1, p2, p3 ) {
return p2.minus( p1 ).crossScalar( p3.minus( p1 ) );
}
const ConvexHull2 = {
// TODO testing: all collinear, multiple ways of having same angle, etc.
/**
* Given multiple points, this performs a Graham Scan (http://en.wikipedia.org/wiki/Graham_scan) to identify an
* ordered list of points which define the minimal polygon that contains all of the points.
* @public
*
* @param {Array.<Vector2>} points
* @param {boolean} includeCollinear - If a point is along an edge of the convex hull (not at one of its vertices),
* should it be included?
* @returns {Array.<Vector2>}
*/
grahamScan: ( points, includeCollinear ) => {
if ( points.length <= 2 ) {
return points;
}
// find the point 'p' with the lowest y value
let minY = Number.POSITIVE_INFINITY;
let p = null;
_.each( points, point => {
if ( point.y <= minY ) {
// if two points have the same y value, take the one with the lowest x
if ( point.y === minY && p ) {
if ( point.x < p.x ) {
p = point;
}
}
else {
minY = point.y;
p = point;
}
}
} );
// sorts the points by their angle. Between 0 and PI
points = _.sortBy( points, point => {
return point.minus( p ).angle;
} );
// remove p from points (relies on the above statement making a defensive copy)
points.splice( _.indexOf( points, p ), 1 );
// our result array
const result = [ p ];
_.each( points, point => {
// ignore points equal to our starting point
if ( p.x === point.x && p.y === point.y ) { return; }
function isRightTurn() {
|
return false;
}
const cross = ccw( result[ result.length - 2 ], result[ result.length - 1 ], point );
return includeCollinear ? ( cross < 0 ) : ( cross <= 0 );
}
while ( isRightTurn() ) {
result.pop();
}
result.push( point );
} );
return result;
}
};
dot.register( 'ConvexHull2', ConvexHull2 );
export default ConvexHull2;
|
if ( result.length < 2 ) {
|
lexer_test.go
|
package lexer
import (
"reflect"
"testing"
)
func
|
(t *testing.T) {
type args struct {
input string
}
tests := []struct {
name string
args args
want *Lexer
}{
{
name: "Setup Correctly",
args: args{
input: "some_identifier",
},
want: &Lexer{
buffer: []rune("some_identifier"),
ch: 's',
position: 0,
nextPosition: 1,
currentChar: 1,
currentLine: 0,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := New(tt.args.input); !reflect.DeepEqual(got, tt.want) {
t.Errorf("New() = %v, want %v", got, tt.want)
}
})
}
}
|
TestNew
|
Monit.js
|
// pm2-htop
// Library who interacts with PM2 to display processes resources in htop way
// by Strzelewicz Alexandre
var multimeter = require('pm2-multimeter');
var os = require('os');
var p = require('path');
var chalk = require('chalk');
var CliUx = require('./CliUx');
var debug = require('debug')('pm2:monit');
// Cst for light programs
const RATIO_T1 = Math.floor(os.totalmem() / 500);
// Cst for medium programs
const RATIO_T2 = Math.floor(os.totalmem() / 50);
// Cst for heavy programs
const RATIO_T3 = Math.floor(os.totalmem() / 5);
// Cst for heavy programs
const RATIO_T4 = Math.floor(os.totalmem());
var Monit = {};
//helper to get bars.length (num bars printed)
Object.size = function(obj) {
var size = 0, key;
for (key in obj) {
if (obj.hasOwnProperty(key)) size++;
}
return size;
};
/**
* Reset the monitor through charm, basically \033c
* @param String msg optional message to show
* @return Monit
*/
Monit.reset = function(msg) {
this.multi.charm.reset();
this.multi.write('\x1B[32m⌬ PM2 \x1B[39mmonitoring\x1B[96m (To go further check out https://app.keymetrics.io) \x1B[39m\n\n');
if(msg) {
this.multi.write(msg);
}
this.bars = {};
return this;
}
/**
* Synchronous Monitor init method
* @method init
* @return Monit
*/
Monit.init = function() {
this.multi = multimeter(process);
this.multi.on('^C', this.stop);
this.reset();
return this;
}
/**
* Stops monitor
* @method stop
*/
Monit.stop = function() {
this.multi.charm.destroy();
process.exit(0);
}
/**
* Refresh monitor
* @method refresh
* @param {} processes
* @return this
|
Monit.refresh = function(processes) {
debug('Monit refresh');
if(!processes) {
processes = [];
}
var num = processes.length;
this.num_bars = Object.size(this.bars);
if(num !== this.num_bars) {
debug('Monit addProcesses - actual: %s, new: %s', this.num_bars, num);
return this.addProcesses(processes);
} else {
if(num === 0) {
return;
}
debug('Monit refresh');
var proc;
for(var i = 0; i < num; i++) {
proc = processes[i];
//this is to avoid a print issue when the process is restarted for example
//we might also check for the pid but restarted|restarting will be rendered bad
if(this.bars[proc.pm_id] && proc.pm2_env.status !== this.bars[proc.pm_id].status) {
debug('bars for %s does not exists', proc.pm_id);
this.addProcesses(processes);
break;
}
this.updateBars(proc);
}
}
return this;
}
Monit.addProcess = function(proc, i) {
if(proc.pm_id in this.bars) {
return ;
}
if (proc.monit.error)
throw new Error(JSON.stringify(proc.monit.error));
var process_name = proc.pm2_env.name || p.basename(proc.pm2_env.pm_exec_path);
var status = proc.pm2_env.status == 'online' ? chalk.green.bold('●') : chalk.red.bold('●');
this.multi.write(' ' + status + ' ' + chalk.green.bold(process_name));
this.multi.write('\n');
this.multi.write('[' + proc.pm2_env.pm_id + '] [' + proc.pm2_env.exec_mode + ']\n');
var bar_cpu = this.multi(40, (i * 2) + 3 + i, {
width: 30,
solid: {
text: '|',
foreground: 'white',
background: 'blue'
},
empty: {
text: ' '
}
});
var bar_memory = this.multi(40, (i * 2) + 4 + i, {
width: 30,
solid: {
text: '|',
foreground: 'white',
background: 'red'
},
empty: {
text: ' '
}
});
this.bars[proc.pm_id] = {
memory: bar_memory,
cpu: bar_cpu,
status: proc.pm2_env.status
};
this.updateBars(proc);
this.multi.write('\n');
return this;
}
Monit.addProcesses = function(processes) {
if(!processes) {
processes = [];
}
this.reset();
var num = processes.length;
if(num > 0) {
for(var i = 0; i < num; i++) {
this.addProcess(processes[i], i);
}
} else {
this.reset('No processes to monit');
}
}
// Draw memory bars
/**
* Description
* @method drawRatio
* @param {} bar_memory
* @param {} memory
* @return
*/
Monit.drawRatio = function(bar_memory, memory) {
var scale = 0;
if (memory < RATIO_T1) scale = RATIO_T1;
else if (memory < RATIO_T2) scale = RATIO_T2;
else if (memory < RATIO_T3) scale = RATIO_T3;
else scale = RATIO_T4;
bar_memory.ratio(memory,
scale,
CliUx.bytesToSize(memory, 3));
};
/**
* Updates bars informations
* @param {} proc proc object
* @return this
*/
Monit.updateBars = function(proc) {
if (this.bars[proc.pm_id]) {
if (proc.pm2_env.status !== 'online' || proc.pm2_env.status !== this.bars[proc.pm_id].status) {
this.bars[proc.pm_id].cpu.percent(0, chalk.red(proc.pm2_env.status));
this.drawRatio(this.bars[proc.pm_id].memory, 0, chalk.red(proc.pm2_env.status));
} else if (!proc.monit) {
this.bars[proc.pm_id].cpu.percent(0, chalk.red('No data'));
this.drawRatio(this.bars[proc.pm_id].memory, 0, chalk.red('No data'));
} else {
this.bars[proc.pm_id].cpu.percent(proc.monit.cpu);
this.drawRatio(this.bars[proc.pm_id].memory, proc.monit.memory);
}
}
return this;
}
module.exports = Monit;
|
*/
|
joi-resolver.ts
|
import type { FormErrors } from '../../types';
interface JoiError {
path: (string | number)[];
message: string;
}
interface JoiResults {
success: boolean;
error: {
details: JoiError[];
};
}
|
export function joiResolver(schema: any) {
const _schema: JoiSchema = schema;
return (values: Record<string, any>): FormErrors => {
const parsed = _schema.validate(values, { abortEarly: false });
if (!parsed.error) {
return {};
}
const results = {};
parsed.error.details.forEach((error) => {
results[error.path.join('.')] = error.message;
});
return results;
};
}
|
interface JoiSchema {
validate(values: Record<string, any>, options: { abortEarly: boolean }): JoiResults;
}
|
out_command.go
|
package resource
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/google/go-github/v39/github"
)
type OutCommand struct {
github GitHub
writer io.Writer
}
func NewOutCommand(github GitHub, writer io.Writer) *OutCommand
|
func (c *OutCommand) Run(sourceDir string, request OutRequest) (OutResponse, error) {
params := request.Params
name, err := c.fileContents(filepath.Join(sourceDir, request.Params.NamePath))
if err != nil {
return OutResponse{}, err
}
tag, err := c.fileContents(filepath.Join(sourceDir, request.Params.TagPath))
if err != nil {
return OutResponse{}, err
}
tag = request.Params.TagPrefix + tag
var body string
bodySpecified := false
if request.Params.BodyPath != "" {
bodySpecified = true
body, err = c.fileContents(filepath.Join(sourceDir, request.Params.BodyPath))
if err != nil {
return OutResponse{}, err
}
}
targetCommitish := ""
if request.Params.CommitishPath != "" {
targetCommitish, err = c.fileContents(filepath.Join(sourceDir, request.Params.CommitishPath))
if err != nil {
return OutResponse{}, err
}
}
draft := request.Source.Drafts
prerelease := false
if request.Source.PreRelease == true && request.Source.Release == false {
prerelease = request.Source.PreRelease
}
release := &github.RepositoryRelease{
Name: github.String(name),
TagName: github.String(tag),
Body: github.String(body),
Draft: github.Bool(draft),
Prerelease: github.Bool(prerelease),
TargetCommitish: github.String(targetCommitish),
}
existingReleases, err := c.github.ListReleases()
if err != nil {
return OutResponse{}, err
}
var existingRelease *github.RepositoryRelease
for _, e := range existingReleases {
if e.TagName != nil && *e.TagName == tag {
existingRelease = e
break
}
}
if existingRelease != nil {
releaseAssets, err := c.github.ListReleaseAssets(*existingRelease)
if err != nil {
return OutResponse{}, err
}
existingRelease.Name = github.String(name)
existingRelease.TargetCommitish = github.String(targetCommitish)
existingRelease.Draft = github.Bool(draft)
existingRelease.Prerelease = github.Bool(prerelease)
if bodySpecified {
existingRelease.Body = github.String(body)
} else {
existingRelease.Body = nil
}
for _, asset := range releaseAssets {
fmt.Fprintf(c.writer, "clearing existing asset: %s\n", *asset.Name)
err := c.github.DeleteReleaseAsset(*asset)
if err != nil {
return OutResponse{}, err
}
}
fmt.Fprintf(c.writer, "updating release %s\n", name)
release, err = c.github.UpdateRelease(*existingRelease)
if err != nil {
return OutResponse{}, err
}
} else {
fmt.Fprintf(c.writer, "creating release %s\n", name)
release, err = c.github.CreateRelease(*release)
if err != nil {
return OutResponse{}, err
}
}
for _, fileGlob := range params.Globs {
matches, err := filepath.Glob(filepath.Join(sourceDir, fileGlob))
if err != nil {
return OutResponse{}, err
}
if len(matches) == 0 {
return OutResponse{}, fmt.Errorf("could not find file that matches glob '%s'", fileGlob)
}
for _, filePath := range matches {
err := c.upload(release, filePath)
if err != nil {
return OutResponse{}, err
}
}
}
return OutResponse{
Version: versionFromRelease(release),
Metadata: metadataFromRelease(release, ""),
}, nil
}
func (c *OutCommand) fileContents(path string) (string, error) {
contents, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
return strings.TrimSpace(string(contents)), nil
}
func (c *OutCommand) upload(release *github.RepositoryRelease, filePath string) error {
fmt.Fprintf(c.writer, "uploading %s\n", filePath)
name := filepath.Base(filePath)
var retryErr error
for i := 0; i < 10; i++ {
file, err := os.Open(filePath)
if err != nil {
return err
}
defer file.Close()
retryErr = c.github.UploadReleaseAsset(*release, name, file)
if retryErr == nil {
break
}
assets, err := c.github.ListReleaseAssets(*release)
if err != nil {
return err
}
for _, asset := range assets {
if asset.Name != nil && *asset.Name == name {
err = c.github.DeleteReleaseAsset(*asset)
if err != nil {
return err
}
break
}
}
}
if retryErr != nil {
return retryErr
}
return nil
}
|
{
return &OutCommand{
github: github,
writer: writer,
}
}
|
ku.js
|
/*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/
|
CKEDITOR.plugins.setLang( 'preview', 'ku', {
preview: 'پێشبینین'
} );
|
|
mod.rs
|
pub mod cmd52;
pub mod cmd53;
| ||
PrevFrameEnumChildTestV3.py
|
# coding: utf-8
def g():
pass
def f():
d1 = {42: 100}
d2 = {'abc': 'fob'}
d3 = {1e1000: d1}
s = set([frozenset([2,3,4])])
class C(object):
abc = 42
def f(self): pass
cinst = C()
class C2(object):
abc = 42
def __init__(self):
self.oar = 100
self.self = self
def __repr__(self):
|
def __hex__(self):
return 'myhex'
def f(self): pass
c2inst = C2()
l = [1, 2, ]
i = 3
g()
f()
|
return 'myrepr'
|
handlers.rs
|
// SPDX-License-Identifier: MIT
// Copyright (C) 2018-present iced project and contributors
use crate::decoder::*;
use crate::instruction_internal;
use crate::*;
use alloc::boxed::Box;
use alloc::vec::Vec;
// SAFETY:
// code: let this = unsafe { &*(self_ptr as *const Self) };
// The first arg (`self_ptr`) to decode() is always the handler itself, cast to a `*const OpCodeHandler`.
// All handlers are `#[repr(C)]` structs so the OpCodeHandler fields are always at the same offsets.
pub(super) type OpCodeHandlerDecodeFn = fn(*const OpCodeHandler, &mut Decoder<'_>, &mut Instruction);
#[allow(trivial_casts)]
#[must_use]
#[inline]
pub(super) fn is_null_instance_handler(handler: *const OpCodeHandler) -> bool {
handler as *const u8 == &NULL_HANDLER as *const _ as *const u8
}
#[rustfmt::skip]
pub(super) static NULL_HANDLER: OpCodeHandler_Invalid = OpCodeHandler_Invalid {
decode: OpCodeHandler_Invalid::decode,
has_modrm: true,
};
#[rustfmt::skip]
pub(super) static INVALID_HANDLER: OpCodeHandler_Invalid = OpCodeHandler_Invalid {
decode: OpCodeHandler_Invalid::decode,
has_modrm: true,
};
#[rustfmt::skip]
pub(super) static INVALID_NO_MODRM_HANDLER: OpCodeHandler_Invalid = OpCodeHandler_Invalid {
decode: OpCodeHandler_Invalid::decode,
has_modrm: false,
};
#[repr(C)]
pub(super) struct OpCodeHandler {
pub(super) decode: OpCodeHandlerDecodeFn,
pub(super) has_modrm: bool,
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Invalid {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
}
impl OpCodeHandler_Invalid {
fn decode(_self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, _instruction: &mut Instruction) {
decoder.set_invalid_instruction();
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Simple {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
code: u32,
}
impl OpCodeHandler_Simple {
pub(super) fn new(code: u32) -> Self {
Self { decode: OpCodeHandler_Simple::decode, has_modrm: false, code }
}
pub(super) fn new_modrm(code: u32) -> Self {
Self { decode: OpCodeHandler_Simple::decode, has_modrm: true, code }
}
fn decode(self_ptr: *const OpCodeHandler, _decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
instruction_internal::internal_set_code_u32(instruction, this.code);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Group8x8 {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
table_low: Vec<&'static OpCodeHandler>,
table_high: Vec<&'static OpCodeHandler>,
}
impl OpCodeHandler_Group8x8 {
pub(super) fn new(table_low: Vec<&'static OpCodeHandler>, table_high: Vec<&'static OpCodeHandler>) -> Self {
debug_assert_eq!(table_low.len(), 8);
debug_assert_eq!(table_high.len(), 8);
Self { decode: OpCodeHandler_Group8x8::decode, has_modrm: true, table_low, table_high }
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let handler = if decoder.state.mod_ == 3 {
unsafe { *this.table_high.get_unchecked(decoder.state.reg as usize) }
} else {
unsafe { *this.table_low.get_unchecked(decoder.state.reg as usize) }
};
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Group8x64 {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
table_low: Vec<&'static OpCodeHandler>,
table_high: Vec<&'static OpCodeHandler>,
}
impl OpCodeHandler_Group8x64 {
pub(super) fn new(table_low: Vec<&'static OpCodeHandler>, table_high: Vec<&'static OpCodeHandler>) -> Self {
debug_assert_eq!(table_low.len(), 8);
debug_assert_eq!(table_high.len(), 0x40);
Self { decode: OpCodeHandler_Group8x64::decode, has_modrm: true, table_low, table_high }
}
#[allow(trivial_casts)]
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let mut handler;
if decoder.state.mod_ == 3 {
handler = unsafe { *this.table_high.get_unchecked((decoder.state.modrm & 0x3F) as usize) };
if handler as *const _ as *const u8 == &NULL_HANDLER as *const _ as *const u8 {
handler = unsafe { *this.table_low.get_unchecked(decoder.state.reg as usize) };
}
} else {
handler = unsafe { *this.table_low.get_unchecked(decoder.state.reg as usize) };
}
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Group {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
group_handlers: Vec<&'static OpCodeHandler>,
}
impl OpCodeHandler_Group {
pub(super) fn new(group_handlers: Vec<&'static OpCodeHandler>) -> Self {
debug_assert_eq!(group_handlers.len(), 8);
Self { decode: OpCodeHandler_Group::decode, has_modrm: true, group_handlers }
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let handler = unsafe { *this.group_handlers.get_unchecked(decoder.state.reg as usize) };
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_AnotherTable {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
handlers: Box<[&'static OpCodeHandler; 0x100]>,
}
impl OpCodeHandler_AnotherTable {
#[allow(clippy::unwrap_used)]
pub(super) fn new(handlers: Vec<&'static OpCodeHandler>) -> Self {
let handlers = handlers.into_boxed_slice();
assert!(handlers.len() == 0x100);
// SAFETY: handlers size is verified to be 0x100
let handlers = unsafe { Box::from_raw(Box::into_raw(handlers) as *mut [_; 0x100]) };
Self { decode: OpCodeHandler_AnotherTable::decode, has_modrm: false, handlers }
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
decoder.decode_table(&this.handlers, instruction);
}
}
#[cfg(any(not(feature = "no_vex"), not(feature = "no_xop"), not(feature = "no_evex")))]
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_MandatoryPrefix2 {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
handlers: [&'static OpCodeHandler; 4],
}
#[cfg(any(not(feature = "no_vex"), not(feature = "no_xop"), not(feature = "no_evex")))]
impl OpCodeHandler_MandatoryPrefix2 {
pub(super) fn new(
has_modrm: bool, handler: *const OpCodeHandler, handler_66: *const OpCodeHandler, handler_f3: *const OpCodeHandler,
handler_f2: *const OpCodeHandler,
) -> Self {
const_assert_eq!(MandatoryPrefixByte::None as u32, 0);
const_assert_eq!(MandatoryPrefixByte::P66 as u32, 1);
const_assert_eq!(MandatoryPrefixByte::PF3 as u32, 2);
const_assert_eq!(MandatoryPrefixByte::PF2 as u32, 3);
debug_assert!(!is_null_instance_handler(handler));
debug_assert!(!is_null_instance_handler(handler_66));
debug_assert!(!is_null_instance_handler(handler_f3));
debug_assert!(!is_null_instance_handler(handler_f2));
let handlers = unsafe { [&*handler, &*handler_66, &*handler_f3, &*handler_f2] };
debug_assert_eq!(handlers[0].has_modrm, has_modrm);
debug_assert_eq!(handlers[1].has_modrm, has_modrm);
debug_assert_eq!(handlers[2].has_modrm, has_modrm);
debug_assert_eq!(handlers[3].has_modrm, has_modrm);
Self { decode: OpCodeHandler_MandatoryPrefix2::decode, has_modrm, handlers: unsafe { [&*handler, &*handler_66, &*handler_f3, &*handler_f2] } }
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
debug_assert!(
decoder.state.encoding() == EncodingKind::VEX
|| decoder.state.encoding() == EncodingKind::EVEX
|| decoder.state.encoding() == EncodingKind::XOP
);
let handler = unsafe { *this.handlers.get_unchecked(decoder.state.mandatory_prefix as usize) };
(handler.decode)(handler, decoder, instruction);
}
}
#[cfg(any(not(feature = "no_vex"), not(feature = "no_xop"), not(feature = "no_evex")))]
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_W {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
handlers: [&'static OpCodeHandler; 2],
}
#[cfg(any(not(feature = "no_vex"), not(feature = "no_xop"), not(feature = "no_evex")))]
impl OpCodeHandler_W {
pub(super) fn new(handler_w0: *const OpCodeHandler, handler_w1: *const OpCodeHandler) -> Self {
debug_assert!(!is_null_instance_handler(handler_w0));
debug_assert!(!is_null_instance_handler(handler_w1));
Self { decode: OpCodeHandler_W::decode, has_modrm: true, handlers: unsafe { [&*handler_w0, &*handler_w1] } }
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
debug_assert!(
decoder.state.encoding() == EncodingKind::VEX
|| decoder.state.encoding() == EncodingKind::EVEX
|| decoder.state.encoding() == EncodingKind::XOP
);
const_assert_eq!(StateFlags::W, 0x80);
let index = (decoder.state.flags >> 7) & 1;
let handler = unsafe { *this.handlers.get_unchecked(index as usize) };
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Bitness {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
handler1632: &'static OpCodeHandler,
handler64: &'static OpCodeHandler,
}
impl OpCodeHandler_Bitness {
pub(super) fn new(handler1632: *const OpCodeHandler, handler64: *const OpCodeHandler) -> Self {
debug_assert!(!is_null_instance_handler(handler1632));
debug_assert!(!is_null_instance_handler(handler64));
Self { decode: OpCodeHandler_Bitness::decode, has_modrm: false, handler1632: unsafe { &*handler1632 }, handler64: unsafe { &*handler64 } }
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let handler = if decoder.is64_mode { this.handler64 } else { this.handler1632 };
if handler.has_modrm {
decoder.read_modrm();
}
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Bitness_DontReadModRM {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
handler1632: &'static OpCodeHandler,
handler64: &'static OpCodeHandler,
}
impl OpCodeHandler_Bitness_DontReadModRM {
pub(super) fn new(handler1632: *const OpCodeHandler, handler64: *const OpCodeHandler) -> Self {
debug_assert!(!is_null_instance_handler(handler1632));
debug_assert!(!is_null_instance_handler(handler64));
Self {
decode: OpCodeHandler_Bitness_DontReadModRM::decode,
has_modrm: true,
handler1632: unsafe { &*handler1632 },
handler64: unsafe { &*handler64 },
}
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let handler = if decoder.is64_mode { this.handler64 } else { this.handler1632 };
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_RM {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
reg: &'static OpCodeHandler,
mem: &'static OpCodeHandler,
}
impl OpCodeHandler_RM {
pub(super) fn new(reg: *const OpCodeHandler, mem: *const OpCodeHandler) -> Self {
debug_assert!(!is_null_instance_handler(reg));
debug_assert!(!is_null_instance_handler(mem));
Self { decode: OpCodeHandler_RM::decode, has_modrm: true, reg: unsafe { &*reg }, mem: unsafe { &*mem } }
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let handler = if decoder.state.mod_ == 3 { this.reg } else { this.mem };
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Options1632 {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
default_handler: &'static OpCodeHandler,
infos: [(&'static OpCodeHandler, u32); 2],
info_options: u32,
}
impl OpCodeHandler_Options1632 {
#[allow(trivial_casts)]
pub(super) fn new(default_handler: *const OpCodeHandler, handler1: *const OpCodeHandler, options1: u32) -> Self {
debug_assert!(!is_null_instance_handler(default_handler));
debug_assert!(!is_null_instance_handler(handler1));
Self {
decode: OpCodeHandler_Options1632::decode,
has_modrm: false,
default_handler: unsafe { &*default_handler },
infos: [(unsafe { &*handler1 }, options1), (unsafe { &*(&INVALID_NO_MODRM_HANDLER as *const _ as *const OpCodeHandler) }, 0)],
info_options: options1,
}
}
pub(super) fn new2(
default_handler: *const OpCodeHandler, handler1: *const OpCodeHandler, options1: u32, handler2: *const OpCodeHandler, options2: u32,
) -> Self {
debug_assert!(!is_null_instance_handler(default_handler));
debug_assert!(!is_null_instance_handler(handler1));
debug_assert!(!is_null_instance_handler(handler2));
Self {
decode: OpCodeHandler_Options1632::decode,
has_modrm: false,
default_handler: unsafe { &*default_handler },
infos: [(unsafe { &*handler1 }, options1), (unsafe { &*handler2 }, options2)],
info_options: options1 | options2,
}
}
fn
|
(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let mut handler = this.default_handler;
let options = decoder.options;
if !decoder.is64_mode && (decoder.options & this.info_options) != 0 {
for info in &this.infos {
if (options & info.1) != 0 {
handler = info.0;
break;
}
}
}
if handler.has_modrm {
decoder.read_modrm();
}
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Options {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
default_handler: &'static OpCodeHandler,
infos: [(&'static OpCodeHandler, u32); 2],
info_options: u32,
}
impl OpCodeHandler_Options {
#[allow(trivial_casts)]
pub(super) fn new(default_handler: *const OpCodeHandler, handler1: *const OpCodeHandler, options1: u32) -> Self {
debug_assert!(!is_null_instance_handler(default_handler));
debug_assert!(!is_null_instance_handler(handler1));
Self {
decode: OpCodeHandler_Options::decode,
has_modrm: false,
default_handler: unsafe { &*default_handler },
infos: [(unsafe { &*handler1 }, options1), (unsafe { &*(&INVALID_NO_MODRM_HANDLER as *const _ as *const OpCodeHandler) }, 0)],
info_options: options1,
}
}
pub(super) fn new2(
default_handler: *const OpCodeHandler, handler1: *const OpCodeHandler, options1: u32, handler2: *const OpCodeHandler, options2: u32,
) -> Self {
debug_assert!(!is_null_instance_handler(default_handler));
debug_assert!(!is_null_instance_handler(handler1));
debug_assert!(!is_null_instance_handler(handler2));
Self {
decode: OpCodeHandler_Options::decode,
has_modrm: false,
default_handler: unsafe { &*default_handler },
infos: [(unsafe { &*handler1 }, options1), (unsafe { &*handler2 }, options2)],
info_options: options1 | options2,
}
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let mut handler = this.default_handler;
let options = decoder.options;
if (decoder.options & this.info_options) != 0 {
for info in &this.infos {
if (options & info.1) != 0 {
handler = info.0;
break;
}
}
}
if handler.has_modrm {
decoder.read_modrm();
}
(handler.decode)(handler, decoder, instruction);
}
}
#[allow(non_camel_case_types)]
#[repr(C)]
pub(super) struct OpCodeHandler_Options_DontReadModRM {
decode: OpCodeHandlerDecodeFn,
has_modrm: bool,
default_handler: &'static OpCodeHandler,
opt_handler: &'static OpCodeHandler,
flags: u32,
}
impl OpCodeHandler_Options_DontReadModRM {
pub(super) fn new(default_handler: *const OpCodeHandler, opt_handler: *const OpCodeHandler, flags: u32) -> Self {
debug_assert!(!is_null_instance_handler(default_handler));
debug_assert!(!is_null_instance_handler(opt_handler));
Self {
decode: OpCodeHandler_Options_DontReadModRM::decode,
has_modrm: true,
default_handler: unsafe { &*default_handler },
opt_handler: unsafe { &*opt_handler },
flags,
}
}
fn decode(self_ptr: *const OpCodeHandler, decoder: &mut Decoder<'_>, instruction: &mut Instruction) {
let this = unsafe { &*(self_ptr as *const Self) };
let mut handler = this.default_handler;
let options = decoder.options;
if (options & this.flags) != 0 {
handler = this.opt_handler;
}
(handler.decode)(handler, decoder, instruction);
}
}
|
decode
|
starsupTable.reducer.js
|
import isEmpty from 'lodash/isEmpty'
import * as ActionTypes from '../constants/starsup.constants'
const DEFAULT_QUEUE_LINE = 0
const initialState = {
levels: [],
structure: {},
clones: 0,
level: 1,
users: {
list: [],
install: null,
query: {
limit: 8,
offset: 0,
},
meta: {
total: 0,
page: 0,
},
},
queue: {
list: [],
query: {
limit: 9,
offset: 0,
line: DEFAULT_QUEUE_LINE,
name: '',
},
meta: {
total: 0,
page: 0,
},
},
loadings: {
installMatrix: false,
installClones: false,
structure: false,
arrange: false,
levels: false,
users: false,
queue: false,
},
errors: {
installMatrix: null,
installClones: null,
structure: null,
arrange: null,
levels: null,
users: null,
queue: null,
},
modals: {
arrange: false,
users: false,
},
}
const EMPTY_MATRIX_OBJ = {
name: null,
attributes: { avatar: null },
}
function upChildren(children) {
return children.map((object, index) => {
if (isEmpty(object)) {
return { ...EMPTY_MATRIX_OBJ, place: index + 1 }
} else {
return {
...object,
children: object.attributes.last ? [] : upChildren(object.children),
}
}
})
}
const starsupTableReducer = (state = initialState, action) => {
switch (action.type) {
case ActionTypes.STARS_UP_LEVELS_REQUEST: {
return {
...state,
loadings: { ...state.loadings, levels: true },
errors: { ...state.errors, levels: null },
}
}
case ActionTypes.STARS_UP_LEVELS_SUCCESS: {
return {
...state,
levels: action.payload.items,
loadings: { ...state.loadings, levels: false },
errors: { ...state.errors, levels: null },
}
}
case ActionTypes.STARS_UP_LEVELS_ERROR: {
return {
...state,
loadings: { ...state.loadings, levels: false },
errors: { ...state.errors, levels: action.payload },
}
}
|
case ActionTypes.STARS_UP_STRUCTURE_REQUEST: {
return {
...state,
loadings: { ...state.loadings, structure: true },
errors: { ...state.errors, structure: null },
}
}
case ActionTypes.STARS_UP_STRUCTURE_SUCCESS: {
const { matrix, count: clones, level } = action.payload
const structure = { ...matrix, children: upChildren(matrix.children) }
return {
...state,
level,
clones,
structure,
loadings: { ...state.loadings, structure: false },
errors: { ...state.errors, structure: null },
}
}
case ActionTypes.STARS_UP_STRUCTURE_ERROR: {
return {
...state,
loadings: { ...state.loadings, structure: false },
errors: { ...state.errors, structure: action.payload },
}
}
case ActionTypes.STARS_UP_USERS_FOR_INSTALL_REQUEST: {
return {
...state,
users: {
...state.users,
meta: { ...state.users.meta, page: 0 },
query: { ...state.users.query, offset: 0 },
},
loadings: { ...state.loadings, users: true },
errors: { ...state.errors, users: null },
}
}
case ActionTypes.STARS_UP_USERS_FOR_INSTALL_SUCCESS: {
const { count, items } = action.payload
return {
...state,
users: {
...state.users,
list: items,
meta: { ...state.users.meta, total: Number(count) },
},
loadings: { ...state.loadings, users: false },
errors: { ...state.errors, users: null },
}
}
case ActionTypes.STARS_UP_USERS_FOR_INSTALL_ERROR: {
return {
...state,
loadings: { ...state.loadings, users: false },
errors: { ...state.errors, users: action.payload },
}
}
case ActionTypes.SET_STARSUP_USERS_FOR_INSTALL_PAGE: {
const page = action.payload.page
return {
...state,
users: {
...state.users,
meta: { ...state.users.meta, page },
query: {
...state.users.query,
offset: state.users.query.limit * page,
},
},
loadings: { ...state.loadings, users: true },
errors: { ...state.errors, users: null },
}
}
case ActionTypes.TOGGLE_STARS_UP_USERS_FOR_INSTALL_MODAL: {
const newValue = action.payload.visible ? action.payload.visible : !state.modals.users
return {
...state,
users: {
...state.users,
install: newValue ? action.payload.installUser : null,
},
modals: { ...state.modals, users: newValue },
}
}
case ActionTypes.STARS_UP_INSTALL_MATRIX_REQUEST: {
return {
...state,
loadings: { ...state.loadings, installMatrix: true },
errors: { ...state.errors, installMatrix: null },
}
}
case ActionTypes.STARS_UP_INSTALL_MATRIX_SUCCESS: {
return {
...state,
loadings: { ...state.loadings, installMatrix: false },
errors: { ...state.errors, installMatrix: null },
}
}
case ActionTypes.STARS_UP_INSTALL_MATRIX_ERROR: {
return {
...state,
loadings: { ...state.loadings, installMatrix: false },
errors: { ...state.errors, installMatrix: action.payload },
}
}
case ActionTypes.STARS_UP_INSTALL_CLONE_REQUEST: {
return {
...state,
loadings: { ...state.loadings, installClones: true },
errors: { ...state.errors, installClones: null },
}
}
case ActionTypes.STARS_UP_INSTALL_CLONE_SUCCESS: {
return {
...state,
loadings: { ...state.loadings, installClones: false },
errors: { ...state.errors, installClones: null },
}
}
case ActionTypes.STARS_UP_INSTALL_CLONE_ERROR: {
return {
...state,
loadings: { ...state.loadings, installClones: false },
errors: { ...state.errors, installClones: action.payload },
}
}
case ActionTypes.STARS_UP_QUEUE_REQUEST: {
const { filter } = action.payload
return {
...state,
queue: {
...state.queue,
meta: { ...state.queue.meta, page: 0, ...filter.meta },
query: {
...state.queue.query,
line: DEFAULT_QUEUE_LINE,
offset: 0,
name: '',
...filter.query,
},
},
loadings: { ...state.loadings, queue: true },
errors: { ...state.errors, queue: null },
}
}
case ActionTypes.STARS_UP_QUEUE_SUCCESS: {
const { count, items } = action.payload
return {
...state,
queue: {
...state.queue,
list: items,
meta: { ...state.queue.meta, total: Number(count) },
},
loadings: { ...state.loadings, queue: false },
errors: { ...state.errors, queue: null },
}
}
case ActionTypes.STARS_UP_QUEUE_ERROR: {
return {
...state,
loadings: { ...state.loadings, queue: false },
errors: { ...state.errors, queue: action.payload },
}
}
case ActionTypes.SET_STARS_UP_QUEUE_LINE: {
const line = action.payload.line
return {
...state,
queue: {
...state.queue,
meta: { ...state.queue.meta, page: 0 },
query: { ...state.queue.query, offset: 0, name: '', line },
},
loadings: { ...state.loadings, queue: true },
errors: { ...state.errors, queue: null },
}
}
case ActionTypes.SET_STARS_UP__QUEUE_SEARCH: {
const name = action.payload.name
return {
...state,
queue: {
...state.queue,
query: { ...state.queue.query, offset: 0, name },
meta: { ...state.queue.meta, page: 0 },
},
isLoading: true,
isError: null,
}
}
case ActionTypes.SET_STARS_UP_QUEUE_PAGE: {
const page = action.payload.page
return {
...state,
queue: {
...state.queue,
meta: { ...state.queue.meta, page },
query: {
...state.queue.query,
offset: state.queue.query.limit * page,
},
},
loadings: { ...state.loadings, queue: true },
errors: { ...state.errors, queue: null },
}
}
case ActionTypes.STARS_UP_ARRANGE_CLONES_REQUEST: {
return {
...state,
loadings: { ...state.loadings, arrange: true },
errors: { ...state.errors, arrange: null },
}
}
case ActionTypes.STARS_UP_ARRANGE_CLONES_SUCCESS: {
return {
...state,
loadings: { ...state.loadings, arrange: false },
errors: { ...state.errors, arrange: null },
}
}
case ActionTypes.STARS_UP_ARRANGE_CLONES_ERROR: {
return {
...state,
loadings: { ...state.loadings, arrange: false },
errors: { ...state.errors, arrange: action.payload },
}
}
case ActionTypes.TOGGLE_STARS_UP_ARRANGE_CLONES_MODAL: {
const newValue = action.payload ? action.payload : !state.modals.arrange
return { ...state, modals: { ...state.modals, arrange: newValue } }
}
default:
return state
}
}
export default starsupTableReducer
| |
validate.go
|
package gonhentai
import (
"errors"
"fmt"
"regexp"
)
// validateDoujinUrl is a function that checks if the url of doujinshi is valid.
func validateDoujinUrl(doujinUrl string) bool {
// Check if it's a valid url
match, _ := regexp.MatchString(
`^https:\/\/(www.)?nhentai\.net\/g\/[0-9]{1,6}[\/]?$`,
doujinUrl,
)
return match
}
// validateNhentaiId is a function that checks if the id of doujinshi is valid.
func validateNhentaiId(doujinId int) bool {
doujinIdString := fmt.Sprintf("%d", doujinId)
// Check if it's a valid nhentai id
ok, _ := regexp.MatchString(`^[0-9]{1,6}$`, doujinIdString)
return ok
}
// validateNhentaiImageUrl is a function that checks if the url of image is valid.
func validateNhentaiImageUrl(url string) bool {
ok, _ := regexp.MatchString(
`^https:\/\/(t|i)\.nhentai\.net\/(galleries|avatars)\/[0-9]+\/?.+\.(png|jpg|gif)?.+$`,
url,
)
return ok
}
// validateNhentaiImageUrl is a function that checks if the url of comment is valid.
func validateCommentUrl(commentUrl string) bool {
ok, _ := regexp.MatchString(
`^https:\/\/(www.)?nhentai\.net\/g\/[0-9]{0,6}\/#comment-[0-9]+\/?$`,
commentUrl,
)
return ok
}
// validateNhentaiImageUrl is a function that checks if the url of user is valid.
func validateUserUrl(userUrl string) bool {
ok, _ := regexp.MatchString(
`^https:\/\/(www.)?nhentai\.net\/users\/[0-9]+\/.+$`,
userUrl,
)
return ok
}
// validateImageType is a function that checks if the image type is valid
func validateImageType(ext string) bool {
if (ext != "jpg") && (ext != "png") && (ext != "gif") {
return false
}
return true
}
// validateQuerySort is a function that checks if the query sort is valid
func validateQuerySort(sort Sort) bool {
if sort != Recent && sort != PopularAllTime && sort != PopularToday && sort != PopularThisWeek {
return false
}
return true
}
// validateFilters is a function that checks if the filters are valid
func validateFilters(filters []Filter) error {
for _, filter := range filters {
if filter.Type == ""
|
}
return nil
}
|
{
return errors.New("Type attribute of filter must be specified")
}
|
db.go
|
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ffldb
import (
"bytes"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"runtime"
"sort"
"sync"
"github.com/pkt-cash/pktd/btcutil/er"
"github.com/pkt-cash/pktd/wire/protocol"
"github.com/pkt-cash/pktd/btcutil"
"github.com/pkt-cash/pktd/chaincfg/chainhash"
"github.com/pkt-cash/pktd/database"
"github.com/pkt-cash/pktd/database/internal/treap"
"github.com/pkt-cash/pktd/goleveldb/leveldb"
"github.com/pkt-cash/pktd/goleveldb/leveldb/comparer"
ldberrors "github.com/pkt-cash/pktd/goleveldb/leveldb/errors"
"github.com/pkt-cash/pktd/goleveldb/leveldb/filter"
"github.com/pkt-cash/pktd/goleveldb/leveldb/iterator"
"github.com/pkt-cash/pktd/goleveldb/leveldb/opt"
"github.com/pkt-cash/pktd/goleveldb/leveldb/util"
"github.com/pkt-cash/pktd/wire"
)
const (
// metadataDbName is the name used for the metadata database.
metadataDbName = "metadata"
// blockHdrSize is the size of a block header. This is simply the
// constant from wire and is only provided here for convenience since
// wire.MaxBlockHeaderPayload is quite long.
blockHdrSize = wire.MaxBlockHeaderPayload
)
var (
// bytesMiB is the number of bytes in a mebibyte.
bytesMiB = 1024 * 1024
// minAvailableSpaceUpdate is the minimum space available (in bytes) to
// allow a write transaction. // TODO(jhj) This should be configurable.
minAvailableSpaceUpdate = 192 * bytesMiB
// byteOrder is the preferred byte order used through the database and
// block files. Sometimes big endian will be used to allow ordered byte
// sortable integer values.
byteOrder = binary.LittleEndian
// bucketIndexPrefix is the prefix used for all entries in the bucket
// index.
bucketIndexPrefix = []byte("bidx")
// curBucketIDKeyName is the name of the key used to keep track of the
// current bucket ID counter.
curBucketIDKeyName = []byte("bidx-cbid")
// metadataBucketID is the ID of the top-level metadata bucket.
// It is the value 0 encoded as an unsigned big-endian uint32.
metadataBucketID = [4]byte{}
// blockIdxBucketID is the ID of the internal block metadata bucket.
// It is the value 1 encoded as an unsigned big-endian uint32.
blockIdxBucketID = [4]byte{0x00, 0x00, 0x00, 0x01}
// blockIdxBucketName is the bucket used internally to track block
// metadata.
blockIdxBucketName = []byte("ffldb-blockidx")
// writeLocKeyName is the key used to store the current write file
// location.
writeLocKeyName = []byte("ffldb-writeloc")
)
// Common error strings.
const (
// errDbNotOpenStr is the text to use for the database.ErrDbNotOpen
// error code.
errDbNotOpenStr = "database is not open"
// errTxClosedStr is the text to use for the database.ErrTxClosed error
// code.
errTxClosedStr = "database tx is closed"
)
// bulkFetchData is allows a block location to be specified along with the
// index it was requested from. This in turn allows the bulk data loading
// functions to sort the data accesses based on the location to improve
// performance while keeping track of which result the data is for.
type bulkFetchData struct {
*blockLocation
replyIndex int
}
// bulkFetchDataSorter implements sort.Interface to allow a slice of
// bulkFetchData to be sorted. In particular it sorts by file and then
// offset so that reads from files are grouped and linear.
type bulkFetchDataSorter []bulkFetchData
// Len returns the number of items in the slice. It is part of the
// sort.Interface implementation.
func (s bulkFetchDataSorter) Len() int {
return len(s)
}
// Swap swaps the items at the passed indices. It is part of the
// sort.Interface implementation.
func (s bulkFetchDataSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less returns whether the item with index i should sort before the item with
// index j. It is part of the sort.Interface implementation.
func (s bulkFetchDataSorter) Less(i, j int) bool {
if s[i].blockFileNum < s[j].blockFileNum {
return true
}
if s[i].blockFileNum > s[j].blockFileNum {
return false
}
return s[i].fileOffset < s[j].fileOffset
}
// makeDbErr creates a database.Error given a set of arguments.
func makeDbErr(c *er.ErrorCode, desc string, err er.R) er.R {
return c.New(desc, err)
}
// convertErr converts the passed leveldb error into a database error with an
// equivalent error code and the passed description. It also sets the passed
// error as the underlying error.
func convertErr(desc string, ldbErr error) er.R
|
// copySlice returns a copy of the passed slice. This is mostly used to copy
// leveldb iterator keys and values since they are only valid until the iterator
// is moved instead of during the entirety of the transaction.
func copySlice(slice []byte) []byte {
ret := make([]byte, len(slice))
copy(ret, slice)
return ret
}
// cursor is an internal type used to represent a cursor over key/value pairs
// and nested buckets of a bucket and implements the database.Cursor interface.
type cursor struct {
bucket *bucket
dbIter iterator.Iterator
pendingIter iterator.Iterator
currentIter iterator.Iterator
}
// Enforce cursor implements the database.Cursor interface.
var _ database.Cursor = (*cursor)(nil)
// Bucket returns the bucket the cursor was created for.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Bucket() database.Bucket {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return nil
}
return c.bucket
}
// Delete removes the current key/value pair the cursor is at without
// invalidating the cursor.
//
// Returns the following errors as required by the interface contract:
// - ErrIncompatibleValue if attempted when the cursor points to a nested
// bucket
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Delete() er.R {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return err
}
// Error if the cursor is exhausted.
if c.currentIter == nil {
str := "cursor is exhausted"
return makeDbErr(database.ErrIncompatibleValue, str, nil)
}
// Do not allow buckets to be deleted via the cursor.
key := c.currentIter.Key()
if bytes.HasPrefix(key, bucketIndexPrefix) {
str := "buckets may not be deleted from a cursor"
return makeDbErr(database.ErrIncompatibleValue, str, nil)
}
c.bucket.tx.deleteKey(copySlice(key), true)
return nil
}
// skipPendingUpdates skips any keys at the current database iterator position
// that are being updated by the transaction. The forwards flag indicates the
// direction the cursor is moving.
func (c *cursor) skipPendingUpdates(forwards bool) {
for c.dbIter.Valid() {
var skip bool
key := c.dbIter.Key()
if c.bucket.tx.pendingRemove.Has(key) {
skip = true
} else if c.bucket.tx.pendingKeys.Has(key) {
skip = true
}
if !skip {
break
}
if forwards {
c.dbIter.Next()
} else {
c.dbIter.Prev()
}
}
}
// chooseIterator first skips any entries in the database iterator that are
// being updated by the transaction and sets the current iterator to the
// appropriate iterator depending on their validity and the order they compare
// in while taking into account the direction flag. When the cursor is being
// moved forwards and both iterators are valid, the iterator with the smaller
// key is chosen and vice versa when the cursor is being moved backwards.
func (c *cursor) chooseIterator(forwards bool) bool {
// Skip any keys at the current database iterator position that are
// being updated by the transaction.
c.skipPendingUpdates(forwards)
// When both iterators are exhausted, the cursor is exhausted too.
if !c.dbIter.Valid() && !c.pendingIter.Valid() {
c.currentIter = nil
return false
}
// Choose the database iterator when the pending keys iterator is
// exhausted.
if !c.pendingIter.Valid() {
c.currentIter = c.dbIter
return true
}
// Choose the pending keys iterator when the database iterator is
// exhausted.
if !c.dbIter.Valid() {
c.currentIter = c.pendingIter
return true
}
// Both iterators are valid, so choose the iterator with either the
// smaller or larger key depending on the forwards flag.
compare := bytes.Compare(c.dbIter.Key(), c.pendingIter.Key())
if (forwards && compare > 0) || (!forwards && compare < 0) {
c.currentIter = c.pendingIter
} else {
c.currentIter = c.dbIter
}
return true
}
// First positions the cursor at the first key/value pair and returns whether or
// not the pair exists.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) First() bool {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return false
}
// Seek to the first key in both the database and pending iterators and
// choose the iterator that is both valid and has the smaller key.
c.dbIter.First()
c.pendingIter.First()
return c.chooseIterator(true)
}
// Last positions the cursor at the last key/value pair and returns whether or
// not the pair exists.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Last() bool {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return false
}
// Seek to the last key in both the database and pending iterators and
// choose the iterator that is both valid and has the larger key.
c.dbIter.Last()
c.pendingIter.Last()
return c.chooseIterator(false)
}
// Next moves the cursor one key/value pair forward and returns whether or not
// the pair exists.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Next() bool {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return false
}
// Nothing to return if cursor is exhausted.
if c.currentIter == nil {
return false
}
// Move the current iterator to the next entry and choose the iterator
// that is both valid and has the smaller key.
c.currentIter.Next()
return c.chooseIterator(true)
}
// Prev moves the cursor one key/value pair backward and returns whether or not
// the pair exists.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Prev() bool {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return false
}
// Nothing to return if cursor is exhausted.
if c.currentIter == nil {
return false
}
// Move the current iterator to the previous entry and choose the
// iterator that is both valid and has the larger key.
c.currentIter.Prev()
return c.chooseIterator(false)
}
// Seek positions the cursor at the first key/value pair that is greater than or
// equal to the passed seek key. Returns false if no suitable key was found.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Seek(seek []byte) bool {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return false
}
// Seek to the provided key in both the database and pending iterators
// then choose the iterator that is both valid and has the larger key.
seekKey := bucketizedKey(c.bucket.id, seek)
c.dbIter.Seek(seekKey)
c.pendingIter.Seek(seekKey)
return c.chooseIterator(true)
}
// rawKey returns the current key the cursor is pointing to without stripping
// the current bucket prefix or bucket index prefix.
func (c *cursor) rawKey() []byte {
// Nothing to return if cursor is exhausted.
if c.currentIter == nil {
return nil
}
return copySlice(c.currentIter.Key())
}
// Key returns the current key the cursor is pointing to.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Key() []byte {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return nil
}
// Nothing to return if cursor is exhausted.
if c.currentIter == nil {
return nil
}
// Slice out the actual key name and make a copy since it is no longer
// valid after iterating to the next item.
//
// The key is after the bucket index prefix and parent ID when the
// cursor is pointing to a nested bucket.
key := c.currentIter.Key()
if bytes.HasPrefix(key, bucketIndexPrefix) {
key = key[len(bucketIndexPrefix)+4:]
return copySlice(key)
}
// The key is after the bucket ID when the cursor is pointing to a
// normal entry.
key = key[len(c.bucket.id):]
return copySlice(key)
}
// rawValue returns the current value the cursor is pointing to without
// stripping without filtering bucket index values.
func (c *cursor) rawValue() []byte {
// Nothing to return if cursor is exhausted.
if c.currentIter == nil {
return nil
}
return copySlice(c.currentIter.Value())
}
// Value returns the current value the cursor is pointing to. This will be nil
// for nested buckets.
//
// This function is part of the database.Cursor interface implementation.
func (c *cursor) Value() []byte {
// Ensure transaction state is valid.
if err := c.bucket.tx.checkClosed(); err != nil {
return nil
}
// Nothing to return if cursor is exhausted.
if c.currentIter == nil {
return nil
}
// Return nil for the value when the cursor is pointing to a nested
// bucket.
if bytes.HasPrefix(c.currentIter.Key(), bucketIndexPrefix) {
return nil
}
return copySlice(c.currentIter.Value())
}
// cursorType defines the type of cursor to create.
type cursorType int
// The following constants define the allowed cursor types.
const (
// ctKeys iterates through all of the keys in a given bucket.
ctKeys cursorType = iota
// ctBuckets iterates through all directly nested buckets in a given
// bucket.
ctBuckets
// ctFull iterates through both the keys and the directly nested buckets
// in a given bucket.
ctFull
)
// cursorFinalizer is either invoked when a cursor is being garbage collected or
// called manually to ensure the underlying cursor iterators are released.
func cursorFinalizer(c *cursor) {
c.dbIter.Release()
c.pendingIter.Release()
}
// newCursor returns a new cursor for the given bucket, bucket ID, and cursor
// type.
//
// NOTE: The caller is responsible for calling the cursorFinalizer function on
// the returned cursor.
func newCursor(b *bucket, bucketID []byte, cursorTyp cursorType) *cursor {
var dbIter, pendingIter iterator.Iterator
switch cursorTyp {
case ctKeys:
keyRange := util.BytesPrefix(bucketID)
dbIter = b.tx.snapshot.NewIterator(keyRange)
pendingKeyIter := newLdbTreapIter(b.tx, keyRange)
pendingIter = pendingKeyIter
case ctBuckets:
// The serialized bucket index key format is:
// <bucketindexprefix><parentbucketid><bucketname>
// Create an iterator for the both the database and the pending
// keys which are prefixed by the bucket index identifier and
// the provided bucket ID.
prefix := make([]byte, len(bucketIndexPrefix)+4)
copy(prefix, bucketIndexPrefix)
copy(prefix[len(bucketIndexPrefix):], bucketID)
bucketRange := util.BytesPrefix(prefix)
dbIter = b.tx.snapshot.NewIterator(bucketRange)
pendingBucketIter := newLdbTreapIter(b.tx, bucketRange)
pendingIter = pendingBucketIter
case ctFull:
fallthrough
default:
// The serialized bucket index key format is:
// <bucketindexprefix><parentbucketid><bucketname>
prefix := make([]byte, len(bucketIndexPrefix)+4)
copy(prefix, bucketIndexPrefix)
copy(prefix[len(bucketIndexPrefix):], bucketID)
bucketRange := util.BytesPrefix(prefix)
keyRange := util.BytesPrefix(bucketID)
// Since both keys and buckets are needed from the database,
// create an individual iterator for each prefix and then create
// a merged iterator from them.
dbKeyIter := b.tx.snapshot.NewIterator(keyRange)
dbBucketIter := b.tx.snapshot.NewIterator(bucketRange)
iters := []iterator.Iterator{dbKeyIter, dbBucketIter}
dbIter = iterator.NewMergedIterator(iters,
comparer.DefaultComparer, true)
// Since both keys and buckets are needed from the pending keys,
// create an individual iterator for each prefix and then create
// a merged iterator from them.
pendingKeyIter := newLdbTreapIter(b.tx, keyRange)
pendingBucketIter := newLdbTreapIter(b.tx, bucketRange)
iters = []iterator.Iterator{pendingKeyIter, pendingBucketIter}
pendingIter = iterator.NewMergedIterator(iters,
comparer.DefaultComparer, true)
}
// Create the cursor using the iterators.
return &cursor{bucket: b, dbIter: dbIter, pendingIter: pendingIter}
}
// bucket is an internal type used to represent a collection of key/value pairs
// and implements the database.Bucket interface.
type bucket struct {
tx *transaction
id [4]byte
}
// Enforce bucket implements the database.Bucket interface.
var _ database.Bucket = (*bucket)(nil)
// bucketIndexKey returns the actual key to use for storing and retrieving a
// child bucket in the bucket index. This is required because additional
// information is needed to distinguish nested buckets with the same name.
func bucketIndexKey(parentID [4]byte, key []byte) []byte {
// The serialized bucket index key format is:
// <bucketindexprefix><parentbucketid><bucketname>
indexKey := make([]byte, len(bucketIndexPrefix)+4+len(key))
copy(indexKey, bucketIndexPrefix)
copy(indexKey[len(bucketIndexPrefix):], parentID[:])
copy(indexKey[len(bucketIndexPrefix)+4:], key)
return indexKey
}
// bucketizedKey returns the actual key to use for storing and retrieving a key
// for the provided bucket ID. This is required because bucketizing is handled
// through the use of a unique prefix per bucket.
func bucketizedKey(bucketID [4]byte, key []byte) []byte {
// The serialized block index key format is:
// <bucketid><key>
bKey := make([]byte, 4+len(key))
copy(bKey, bucketID[:])
copy(bKey[4:], key)
return bKey
}
// Bucket retrieves a nested bucket with the given key. Returns nil if
// the bucket does not exist.
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) Bucket(key []byte) database.Bucket {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return nil
}
// Attempt to fetch the ID for the child bucket. The bucket does not
// exist if the bucket index entry does not exist.
childID := b.tx.fetchKey(bucketIndexKey(b.id, key))
if childID == nil {
return nil
}
childBucket := &bucket{tx: b.tx}
copy(childBucket.id[:], childID)
return childBucket
}
// CreateBucket creates and returns a new nested bucket with the given key.
//
// Returns the following errors as required by the interface contract:
// - ErrBucketExists if the bucket already exists
// - ErrBucketNameRequired if the key is empty
// - ErrIncompatibleValue if the key is otherwise invalid for the particular
// implementation
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) CreateBucket(key []byte) (database.Bucket, er.R) {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return nil, err
}
// Ensure the transaction is writable.
if !b.tx.writable {
str := "create bucket requires a writable database transaction"
return nil, makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Ensure a key was provided.
if len(key) == 0 {
str := "create bucket requires a key"
return nil, makeDbErr(database.ErrBucketNameRequired, str, nil)
}
// Ensure bucket does not already exist.
bidxKey := bucketIndexKey(b.id, key)
if b.tx.hasKey(bidxKey) {
str := "bucket already exists"
return nil, makeDbErr(database.ErrBucketExists, str, nil)
}
// Find the appropriate next bucket ID to use for the new bucket. In
// the case of the special internal block index, keep the fixed ID.
var childID [4]byte
if b.id == metadataBucketID && bytes.Equal(key, blockIdxBucketName) {
childID = blockIdxBucketID
} else {
var err er.R
childID, err = b.tx.nextBucketID()
if err != nil {
return nil, err
}
}
// Add the new bucket to the bucket index.
if err := b.tx.putKey(bidxKey, childID[:]); err != nil {
return nil, err
}
return &bucket{tx: b.tx, id: childID}, nil
}
// CreateBucketIfNotExists creates and returns a new nested bucket with the
// given key if it does not already exist.
//
// Returns the following errors as required by the interface contract:
// - ErrBucketNameRequired if the key is empty
// - ErrIncompatibleValue if the key is otherwise invalid for the particular
// implementation
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) CreateBucketIfNotExists(key []byte) (database.Bucket, er.R) {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return nil, err
}
// Ensure the transaction is writable.
if !b.tx.writable {
str := "create bucket requires a writable database transaction"
return nil, makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Return existing bucket if it already exists, otherwise create it.
if bucket := b.Bucket(key); bucket != nil {
return bucket, nil
}
return b.CreateBucket(key)
}
// DeleteBucket removes a nested bucket with the given key.
//
// Returns the following errors as required by the interface contract:
// - ErrBucketNotFound if the specified bucket does not exist
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) DeleteBucket(key []byte) er.R {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return err
}
// Ensure the transaction is writable.
if !b.tx.writable {
str := "delete bucket requires a writable database transaction"
return makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Attempt to fetch the ID for the child bucket. The bucket does not
// exist if the bucket index entry does not exist. In the case of the
// special internal block index, keep the fixed ID.
bidxKey := bucketIndexKey(b.id, key)
childID := b.tx.fetchKey(bidxKey)
if childID == nil {
str := fmt.Sprintf("bucket %q does not exist", key)
return makeDbErr(database.ErrBucketNotFound, str, nil)
}
// Remove all nested buckets and their keys.
childIDs := [][]byte{childID}
for len(childIDs) > 0 {
childID = childIDs[len(childIDs)-1]
childIDs = childIDs[:len(childIDs)-1]
// Delete all keys in the nested bucket.
keyCursor := newCursor(b, childID, ctKeys)
for ok := keyCursor.First(); ok; ok = keyCursor.Next() {
b.tx.deleteKey(keyCursor.rawKey(), false)
}
cursorFinalizer(keyCursor)
// Iterate through all nested buckets.
bucketCursor := newCursor(b, childID, ctBuckets)
for ok := bucketCursor.First(); ok; ok = bucketCursor.Next() {
// Push the id of the nested bucket onto the stack for
// the next iteration.
childID := bucketCursor.rawValue()
childIDs = append(childIDs, childID)
// Remove the nested bucket from the bucket index.
b.tx.deleteKey(bucketCursor.rawKey(), false)
}
cursorFinalizer(bucketCursor)
}
// Remove the nested bucket from the bucket index. Any buckets nested
// under it were already removed above.
b.tx.deleteKey(bidxKey, true)
return nil
}
// Cursor returns a new cursor, allowing for iteration over the bucket's
// key/value pairs and nested buckets in forward or backward order.
//
// You must seek to a position using the First, Last, or Seek functions before
// calling the Next, Prev, Key, or Value functions. Failure to do so will
// result in the same return values as an exhausted cursor, which is false for
// the Prev and Next functions and nil for Key and Value functions.
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) Cursor() database.Cursor {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return &cursor{bucket: b}
}
// Create the cursor and setup a runtime finalizer to ensure the
// iterators are released when the cursor is garbage collected.
c := newCursor(b, b.id[:], ctFull)
runtime.SetFinalizer(c, cursorFinalizer)
return c
}
// ForEachBeginningWith invokes the passed function with every key/value pair
// in the bucket whose key is greater than or equal to the passed argument beginKey.
// This does not include nested buckets or the key/value pairs within those
// nested buckets.
//
// WARNING: It is not safe to mutate data while iterating with this method.
// Doing so may cause the underlying cursor to be invalidated and return
// unexpected keys and/or values.
//
// Returns the following errors as required by the interface contract:
// - ErrTxClosed if the transaction has already been closed
//
// NOTE: The values returned by this function are only valid during a
// transaction. Attempting to access them after a transaction has ended will
// likely result in an access violation.
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) ForEachBeginningWith(beginKey []byte, fn func(k, v []byte) er.R) er.R {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return err
}
// Invoke the callback for each cursor item. Return the error returned
// from the callback when it is non-nil.
c := newCursor(b, b.id[:], ctKeys)
defer cursorFinalizer(c)
var ok bool
if len(beginKey) > 0 {
// We don't really care if the key exists or not, if it doesn't
// then the caller will find out soon enough when
ok = c.Seek(beginKey)
} else {
ok = c.First()
}
for ; ok; ok = c.Next() {
if err := fn(c.Key(), c.Value()); err != nil {
return err
}
}
return nil
}
// ForEach invokes the passed function with every key/value pair in the bucket.
// This does not include nested buckets or the key/value pairs within those
// nested buckets.
//
// WARNING: It is not safe to mutate data while iterating with this method.
// Doing so may cause the underlying cursor to be invalidated and return
// unexpected keys and/or values.
//
// Returns the following errors as required by the interface contract:
// - ErrTxClosed if the transaction has already been closed
//
// NOTE: The values returned by this function are only valid during a
// transaction. Attempting to access them after a transaction has ended will
// likely result in an access violation.
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) ForEach(fn func(k, v []byte) er.R) er.R {
return b.ForEachBeginningWith(nil, fn)
}
// ForEachBucket invokes the passed function with the key of every nested bucket
// in the current bucket. This does not include any nested buckets within those
// nested buckets.
//
// WARNING: It is not safe to mutate data while iterating with this method.
// Doing so may cause the underlying cursor to be invalidated and return
// unexpected keys.
//
// Returns the following errors as required by the interface contract:
// - ErrTxClosed if the transaction has already been closed
//
// NOTE: The values returned by this function are only valid during a
// transaction. Attempting to access them after a transaction has ended will
// likely result in an access violation.
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) ForEachBucket(fn func(k []byte) er.R) er.R {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return err
}
// Invoke the callback for each cursor item. Return the error returned
// from the callback when it is non-nil.
c := newCursor(b, b.id[:], ctBuckets)
defer cursorFinalizer(c)
for ok := c.First(); ok; ok = c.Next() {
err := fn(c.Key())
if err != nil {
return err
}
}
return nil
}
// Writable returns whether or not the bucket is writable.
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) Writable() bool {
return b.tx.writable
}
// Put saves the specified key/value pair to the bucket. Keys that do not
// already exist are added and keys that already exist are overwritten.
//
// Returns the following errors as required by the interface contract:
// - ErrKeyRequired if the key is empty
// - ErrIncompatibleValue if the key is the same as an existing bucket
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) Put(key, value []byte) er.R {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return err
}
// Ensure the transaction is writable.
if !b.tx.writable {
str := "setting a key requires a writable database transaction"
return makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Ensure a key was provided.
if len(key) == 0 {
str := "put requires a key"
return makeDbErr(database.ErrKeyRequired, str, nil)
}
return b.tx.putKey(bucketizedKey(b.id, key), value)
}
// Get returns the value for the given key. Returns nil if the key does not
// exist in this bucket. An empty slice is returned for keys that exist but
// have no value assigned.
//
// NOTE: The value returned by this function is only valid during a transaction.
// Attempting to access it after a transaction has ended results in undefined
// behavior. Additionally, the value must NOT be modified by the caller.
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) Get(key []byte) []byte {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return nil
}
// Nothing to return if there is no key.
if len(key) == 0 {
return nil
}
return b.tx.fetchKey(bucketizedKey(b.id, key))
}
// Delete removes the specified key from the bucket. Deleting a key that does
// not exist does not return an error.
//
// Returns the following errors as required by the interface contract:
// - ErrKeyRequired if the key is empty
// - ErrIncompatibleValue if the key is the same as an existing bucket
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Bucket interface implementation.
func (b *bucket) Delete(key []byte) er.R {
// Ensure transaction state is valid.
if err := b.tx.checkClosed(); err != nil {
return err
}
// Ensure the transaction is writable.
if !b.tx.writable {
str := "deleting a value requires a writable database transaction"
return makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Nothing to do if there is no key.
if len(key) == 0 {
return nil
}
b.tx.deleteKey(bucketizedKey(b.id, key), true)
return nil
}
// pendingBlock houses a block that will be written to disk when the database
// transaction is committed.
type pendingBlock struct {
hash *chainhash.Hash
bytes []byte
}
// transaction represents a database transaction. It can either be read-only or
// read-write and implements the database.Tx interface. The transaction
// provides a root bucket against which all read and writes occur.
type transaction struct {
managed bool // Is the transaction managed?
closed bool // Is the transaction closed?
writable bool // Is the transaction writable?
db *db // DB instance the tx was created from.
snapshot *dbCacheSnapshot // Underlying snapshot for txns.
metaBucket *bucket // The root metadata bucket.
blockIdxBucket *bucket // The block index bucket.
// Blocks that need to be stored on commit. The pendingBlocks map is
// kept to allow quick lookups of pending data by block hash.
pendingBlocks map[chainhash.Hash]int
pendingBlockData []pendingBlock
// Keys that need to be stored or deleted on commit.
pendingKeys *treap.Mutable
pendingRemove *treap.Mutable
// Active iterators that need to be notified when the pending keys have
// been updated so the cursors can properly handle updates to the
// transaction state.
activeIterLock sync.RWMutex
activeIters []*treap.Iterator
}
// Enforce transaction implements the database.Tx interface.
var _ database.Tx = (*transaction)(nil)
// removeActiveIter removes the passed iterator from the list of active
// iterators against the pending keys treap.
func (tx *transaction) removeActiveIter(iter *treap.Iterator) {
// An indexing for loop is intentionally used over a range here as range
// does not reevaluate the slice on each iteration nor does it adjust
// the index for the modified slice.
tx.activeIterLock.Lock()
for i := 0; i < len(tx.activeIters); i++ {
if tx.activeIters[i] == iter {
copy(tx.activeIters[i:], tx.activeIters[i+1:])
tx.activeIters[len(tx.activeIters)-1] = nil
tx.activeIters = tx.activeIters[:len(tx.activeIters)-1]
}
}
tx.activeIterLock.Unlock()
}
// addActiveIter adds the passed iterator to the list of active iterators for
// the pending keys treap.
func (tx *transaction) addActiveIter(iter *treap.Iterator) {
tx.activeIterLock.Lock()
tx.activeIters = append(tx.activeIters, iter)
tx.activeIterLock.Unlock()
}
// notifyActiveIters notifies all of the active iterators for the pending keys
// treap that it has been updated.
func (tx *transaction) notifyActiveIters() {
tx.activeIterLock.RLock()
for _, iter := range tx.activeIters {
iter.ForceReseek()
}
tx.activeIterLock.RUnlock()
}
// checkClosed returns an error if the the database or transaction is closed.
func (tx *transaction) checkClosed() er.R {
// The transaction is no longer valid if it has been closed.
if tx.closed {
return makeDbErr(database.ErrTxClosed, errTxClosedStr, nil)
}
return nil
}
// hasKey returns whether or not the provided key exists in the database while
// taking into account the current transaction state.
func (tx *transaction) hasKey(key []byte) bool {
// When the transaction is writable, check the pending transaction
// state first.
if tx.writable {
if tx.pendingRemove.Has(key) {
return false
}
if tx.pendingKeys.Has(key) {
return true
}
}
// Consult the database cache and underlying database.
return tx.snapshot.Has(key)
}
// putKey adds the provided key to the list of keys to be updated in the
// database when the transaction is committed.
//
// NOTE: This function must only be called on a writable transaction. Since it
// is an internal helper function, it does not check.
func (tx *transaction) putKey(key, value []byte) er.R {
// Prevent the key from being deleted if it was previously scheduled
// to be deleted on transaction commit.
tx.pendingRemove.Delete(key)
// Add the key/value pair to the list to be written on transaction
// commit.
tx.pendingKeys.Put(key, value)
tx.notifyActiveIters()
return nil
}
// fetchKey attempts to fetch the provided key from the database cache (and
// hence underlying database) while taking into account the current transaction
// state. Returns nil if the key does not exist.
func (tx *transaction) fetchKey(key []byte) []byte {
// When the transaction is writable, check the pending transaction
// state first.
if tx.writable {
if tx.pendingRemove.Has(key) {
return nil
}
if value := tx.pendingKeys.Get(key); value != nil {
return value
}
}
// Consult the database cache and underlying database.
return tx.snapshot.Get(key)
}
// deleteKey adds the provided key to the list of keys to be deleted from the
// database when the transaction is committed. The notify iterators flag is
// useful to delay notifying iterators about the changes during bulk deletes.
//
// NOTE: This function must only be called on a writable transaction. Since it
// is an internal helper function, it does not check.
func (tx *transaction) deleteKey(key []byte, notifyIterators bool) {
// Remove the key from the list of pendings keys to be written on
// transaction commit if needed.
tx.pendingKeys.Delete(key)
// Add the key to the list to be deleted on transaction commit.
tx.pendingRemove.Put(key, nil)
// Notify the active iterators about the change if the flag is set.
if notifyIterators {
tx.notifyActiveIters()
}
}
// nextBucketID returns the next bucket ID to use for creating a new bucket.
//
// NOTE: This function must only be called on a writable transaction. Since it
// is an internal helper function, it does not check.
func (tx *transaction) nextBucketID() ([4]byte, er.R) {
// Load the currently highest used bucket ID.
curIDBytes := tx.fetchKey(curBucketIDKeyName)
curBucketNum := binary.BigEndian.Uint32(curIDBytes)
// Increment and update the current bucket ID and return it.
var nextBucketID [4]byte
binary.BigEndian.PutUint32(nextBucketID[:], curBucketNum+1)
if err := tx.putKey(curBucketIDKeyName, nextBucketID[:]); err != nil {
return [4]byte{}, err
}
return nextBucketID, nil
}
// Metadata returns the top-most bucket for all metadata storage.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) Metadata() database.Bucket {
return tx.metaBucket
}
// hasBlock returns whether or not a block with the given hash exists.
func (tx *transaction) hasBlock(hash *chainhash.Hash) bool {
// Return true if the block is pending to be written on commit since
// it exists from the viewpoint of this transaction.
if _, exists := tx.pendingBlocks[*hash]; exists {
return true
}
return tx.hasKey(bucketizedKey(blockIdxBucketID, hash[:]))
}
// StoreBlock stores the provided block into the database. There are no checks
// to ensure the block connects to a previous block, contains double spends, or
// any additional functionality such as transaction indexing. It simply stores
// the block in the database.
//
// Returns the following errors as required by the interface contract:
// - ErrBlockExists when the block hash already exists
// - ErrTxNotWritable if attempted against a read-only transaction
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) StoreBlock(block *btcutil.Block) er.R {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return err
}
// Ensure the transaction is writable.
if !tx.writable {
str := "store block requires a writable database transaction"
return makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Reject the block if it already exists.
blockHash := block.Hash()
if tx.hasBlock(blockHash) {
str := fmt.Sprintf("block %s already exists", blockHash)
return makeDbErr(database.ErrBlockExists, str, nil)
}
blockBytes, err := block.Bytes()
if err != nil {
database.ErrDriverSpecific.New(
fmt.Sprintf("failed to get serialized bytes for block %s", blockHash),
err)
}
// Add the block to be stored to the list of pending blocks to store
// when the transaction is committed. Also, add it to pending blocks
// map so it is easy to determine the block is pending based on the
// block hash.
if tx.pendingBlocks == nil {
tx.pendingBlocks = make(map[chainhash.Hash]int)
}
tx.pendingBlocks[*blockHash] = len(tx.pendingBlockData)
tx.pendingBlockData = append(tx.pendingBlockData, pendingBlock{
hash: blockHash,
bytes: blockBytes,
})
log.Tracef("Added block %s to pending blocks", blockHash)
return nil
}
// HasBlock returns whether or not a block with the given hash exists in the
// database.
//
// Returns the following errors as required by the interface contract:
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) HasBlock(hash *chainhash.Hash) (bool, er.R) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return false, err
}
return tx.hasBlock(hash), nil
}
// HasBlocks returns whether or not the blocks with the provided hashes
// exist in the database.
//
// Returns the following errors as required by the interface contract:
// - ErrTxClosed if the transaction has already been closed
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) HasBlocks(hashes []chainhash.Hash) ([]bool, er.R) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
}
results := make([]bool, len(hashes))
for i := range hashes {
results[i] = tx.hasBlock(&hashes[i])
}
return results, nil
}
// fetchBlockRow fetches the metadata stored in the block index for the provided
// hash. It will return ErrBlockNotFound if there is no entry.
func (tx *transaction) fetchBlockRow(hash *chainhash.Hash) ([]byte, er.R) {
blockRow := tx.blockIdxBucket.Get(hash[:])
if blockRow == nil {
str := fmt.Sprintf("block %s does not exist", hash)
return nil, makeDbErr(database.ErrBlockNotFound, str, nil)
}
return blockRow, nil
}
// FetchBlockHeader returns the raw serialized bytes for the block header
// identified by the given hash. The raw bytes are in the format returned by
// Serialize on a wire.BlockHeader.
//
// Returns the following errors as required by the interface contract:
// - ErrBlockNotFound if the requested block hash does not exist
// - ErrTxClosed if the transaction has already been closed
// - ErrCorruption if the database has somehow become corrupted
//
// NOTE: The data returned by this function is only valid during a
// database transaction. Attempting to access it after a transaction
// has ended results in undefined behavior. This constraint prevents
// additional data copies and allows support for memory-mapped database
// implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlockHeader(hash *chainhash.Hash) ([]byte, er.R) {
return tx.FetchBlockRegion(&database.BlockRegion{
Hash: hash,
Offset: 0,
Len: blockHdrSize,
})
}
// FetchBlockHeaders returns the raw serialized bytes for the block headers
// identified by the given hashes. The raw bytes are in the format returned by
// Serialize on a wire.BlockHeader.
//
// Returns the following errors as required by the interface contract:
// - ErrBlockNotFound if the any of the requested block hashes do not exist
// - ErrTxClosed if the transaction has already been closed
// - ErrCorruption if the database has somehow become corrupted
//
// NOTE: The data returned by this function is only valid during a database
// transaction. Attempting to access it after a transaction has ended results
// in undefined behavior. This constraint prevents additional data copies and
// allows support for memory-mapped database implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlockHeaders(hashes []chainhash.Hash) ([][]byte, er.R) {
regions := make([]database.BlockRegion, len(hashes))
for i := range hashes {
regions[i].Hash = &hashes[i]
regions[i].Offset = 0
regions[i].Len = blockHdrSize
}
return tx.FetchBlockRegions(regions)
}
// FetchBlock returns the raw serialized bytes for the block identified by the
// given hash. The raw bytes are in the format returned by Serialize on a
// wire.MsgBlock.
//
// Returns the following errors as required by the interface contract:
// - ErrBlockNotFound if the requested block hash does not exist
// - ErrTxClosed if the transaction has already been closed
// - ErrCorruption if the database has somehow become corrupted
//
// In addition, returns ErrDriverSpecific if any failures occur when reading the
// block files.
//
// NOTE: The data returned by this function is only valid during a database
// transaction. Attempting to access it after a transaction has ended results
// in undefined behavior. This constraint prevents additional data copies and
// allows support for memory-mapped database implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlock(hash *chainhash.Hash) ([]byte, er.R) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
}
// When the block is pending to be written on commit return the bytes
// from there.
if idx, exists := tx.pendingBlocks[*hash]; exists {
return tx.pendingBlockData[idx].bytes, nil
}
// Lookup the location of the block in the files from the block index.
blockRow, err := tx.fetchBlockRow(hash)
if err != nil {
return nil, err
}
location := deserializeBlockLoc(blockRow)
// Read the block from the appropriate location. The function also
// performs a checksum over the data to detect data corruption.
blockBytes, err := tx.db.store.readBlock(hash, location)
if err != nil {
return nil, err
}
return blockBytes, nil
}
// FetchBlocks returns the raw serialized bytes for the blocks identified by the
// given hashes. The raw bytes are in the format returned by Serialize on a
// wire.MsgBlock.
//
// Returns the following errors as required by the interface contract:
// - ErrBlockNotFound if any of the requested block hashed do not exist
// - ErrTxClosed if the transaction has already been closed
// - ErrCorruption if the database has somehow become corrupted
//
// In addition, returns ErrDriverSpecific if any failures occur when reading the
// block files.
//
// NOTE: The data returned by this function is only valid during a database
// transaction. Attempting to access it after a transaction has ended results
// in undefined behavior. This constraint prevents additional data copies and
// allows support for memory-mapped database implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlocks(hashes []chainhash.Hash) ([][]byte, er.R) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
}
// NOTE: This could check for the existence of all blocks before loading
// any of them which would be faster in the failure case, however
// callers will not typically be calling this function with invalid
// values, so optimize for the common case.
// Load the blocks.
blocks := make([][]byte, len(hashes))
for i := range hashes {
var err er.R
blocks[i], err = tx.FetchBlock(&hashes[i])
if err != nil {
return nil, err
}
}
return blocks, nil
}
// fetchPendingRegion attempts to fetch the provided region from any block which
// are pending to be written on commit. It will return nil for the byte slice
// when the region references a block which is not pending. When the region
// does reference a pending block, it is bounds checked and returns
// ErrBlockRegionInvalid if invalid.
func (tx *transaction) fetchPendingRegion(region *database.BlockRegion) ([]byte, er.R) {
// Nothing to do if the block is not pending to be written on commit.
idx, exists := tx.pendingBlocks[*region.Hash]
if !exists {
return nil, nil
}
// Ensure the region is within the bounds of the block.
blockBytes := tx.pendingBlockData[idx].bytes
blockLen := uint32(len(blockBytes))
endOffset := region.Offset + region.Len
if endOffset < region.Offset || endOffset > blockLen {
str := fmt.Sprintf("block %s region offset %d, length %d "+
"exceeds block length of %d", region.Hash,
region.Offset, region.Len, blockLen)
return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil)
}
// Return the bytes from the pending block.
return blockBytes[region.Offset:endOffset:endOffset], nil
}
// FetchBlockRegion returns the raw serialized bytes for the given block region.
//
// For example, it is possible to directly extract Bitcoin transactions and/or
// scripts from a block with this function. Depending on the backend
// implementation, this can provide significant savings by avoiding the need to
// load entire blocks.
//
// The raw bytes are in the format returned by Serialize on a wire.MsgBlock and
// the Offset field in the provided BlockRegion is zero-based and relative to
// the start of the block (byte 0).
//
// Returns the following errors as required by the interface contract:
// - ErrBlockNotFound if the requested block hash does not exist
// - ErrBlockRegionInvalid if the region exceeds the bounds of the associated
// block
// - ErrTxClosed if the transaction has already been closed
// - ErrCorruption if the database has somehow become corrupted
//
// In addition, returns ErrDriverSpecific if any failures occur when reading the
// block files.
//
// NOTE: The data returned by this function is only valid during a database
// transaction. Attempting to access it after a transaction has ended results
// in undefined behavior. This constraint prevents additional data copies and
// allows support for memory-mapped database implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlockRegion(region *database.BlockRegion) ([]byte, er.R) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
}
// When the block is pending to be written on commit return the bytes
// from there.
if tx.pendingBlocks != nil {
regionBytes, err := tx.fetchPendingRegion(region)
if err != nil {
return nil, err
}
if regionBytes != nil {
return regionBytes, nil
}
}
// Lookup the location of the block in the files from the block index.
blockRow, err := tx.fetchBlockRow(region.Hash)
if err != nil {
return nil, err
}
location := deserializeBlockLoc(blockRow)
// Calculate the actual block size by removing the metadata.
blockLen := location.blockLen - blockMetadataSize
// Ensure the region is within the bounds of the block.
endOffset := region.Offset + region.Len
if endOffset < region.Offset || endOffset > blockLen {
str := fmt.Sprintf("block %s region offset %d, length %d "+
"exceeds block length of %d", region.Hash,
region.Offset, region.Len, blockLen)
return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil)
}
// Read the region from the appropriate disk block file.
regionBytes, err := tx.db.store.readBlockRegion(location, region.Offset,
region.Len)
if err != nil {
return nil, err
}
return regionBytes, nil
}
// FetchBlockRegions returns the raw serialized bytes for the given block
// regions.
//
// For example, it is possible to directly extract Bitcoin transactions and/or
// scripts from various blocks with this function. Depending on the backend
// implementation, this can provide significant savings by avoiding the need to
// load entire blocks.
//
// The raw bytes are in the format returned by Serialize on a wire.MsgBlock and
// the Offset fields in the provided BlockRegions are zero-based and relative to
// the start of the block (byte 0).
//
// Returns the following errors as required by the interface contract:
// - ErrBlockNotFound if any of the request block hashes do not exist
// - ErrBlockRegionInvalid if one or more region exceed the bounds of the
// associated block
// - ErrTxClosed if the transaction has already been closed
// - ErrCorruption if the database has somehow become corrupted
//
// In addition, returns ErrDriverSpecific if any failures occur when reading the
// block files.
//
// NOTE: The data returned by this function is only valid during a database
// transaction. Attempting to access it after a transaction has ended results
// in undefined behavior. This constraint prevents additional data copies and
// allows support for memory-mapped database implementations.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) FetchBlockRegions(regions []database.BlockRegion) ([][]byte, er.R) {
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return nil, err
}
// NOTE: This could check for the existence of all blocks before
// deserializing the locations and building up the fetch list which
// would be faster in the failure case, however callers will not
// typically be calling this function with invalid values, so optimize
// for the common case.
// NOTE: A potential optimization here would be to combine adjacent
// regions to reduce the number of reads.
// In order to improve efficiency of loading the bulk data, first grab
// the block location for all of the requested block hashes and sort
// the reads by filenum:offset so that all reads are grouped by file
// and linear within each file. This can result in quite a significant
// performance increase depending on how spread out the requested hashes
// are by reducing the number of file open/closes and random accesses
// needed. The fetchList is intentionally allocated with a cap because
// some of the regions might be fetched from the pending blocks and
// hence there is no need to fetch those from disk.
blockRegions := make([][]byte, len(regions))
fetchList := make([]bulkFetchData, 0, len(regions))
for i := range regions {
region := ®ions[i]
// When the block is pending to be written on commit grab the
// bytes from there.
if tx.pendingBlocks != nil {
regionBytes, err := tx.fetchPendingRegion(region)
if err != nil {
return nil, err
}
if regionBytes != nil {
blockRegions[i] = regionBytes
continue
}
}
// Lookup the location of the block in the files from the block
// index.
blockRow, err := tx.fetchBlockRow(region.Hash)
if err != nil {
return nil, err
}
location := deserializeBlockLoc(blockRow)
// Calculate the actual block size by removing the metadata.
blockLen := location.blockLen - blockMetadataSize
// Ensure the region is within the bounds of the block.
endOffset := region.Offset + region.Len
if endOffset < region.Offset || endOffset > blockLen {
str := fmt.Sprintf("block %s region offset %d, length "+
"%d exceeds block length of %d", region.Hash,
region.Offset, region.Len, blockLen)
return nil, makeDbErr(database.ErrBlockRegionInvalid, str, nil)
}
fetchList = append(fetchList, bulkFetchData{&location, i})
}
sort.Sort(bulkFetchDataSorter(fetchList))
// Read all of the regions in the fetch list and set the results.
for i := range fetchList {
fetchData := &fetchList[i]
ri := fetchData.replyIndex
region := ®ions[ri]
location := fetchData.blockLocation
regionBytes, err := tx.db.store.readBlockRegion(*location,
region.Offset, region.Len)
if err != nil {
return nil, err
}
blockRegions[ri] = regionBytes
}
return blockRegions, nil
}
// close marks the transaction closed then releases any pending data, the
// underlying snapshot, the transaction read lock, and the write lock when the
// transaction is writable.
func (tx *transaction) close() {
tx.closed = true
// Clear pending blocks that would have been written on commit.
tx.pendingBlocks = nil
tx.pendingBlockData = nil
// Clear pending keys that would have been written or deleted on commit.
tx.pendingKeys = nil
tx.pendingRemove = nil
// Release the snapshot.
if tx.snapshot != nil {
tx.snapshot.Release()
tx.snapshot = nil
}
tx.db.closeLock.RUnlock()
// Release the writer lock for writable transactions to unblock any
// other write transaction which are possibly waiting.
if tx.writable {
tx.db.writeLock.Unlock()
}
}
// writePendingAndCommit writes pending block data to the flat block files,
// updates the metadata with their locations as well as the new current write
// location, and commits the metadata to the memory database cache. It also
// properly handles rollback in the case of failures.
//
// This function MUST only be called when there is pending data to be written.
func (tx *transaction) writePendingAndCommit() er.R {
// Save the current block store write position for potential rollback.
// These variables are only updated here in this function and there can
// only be one write transaction active at a time, so it's safe to store
// them for potential rollback.
wc := tx.db.store.writeCursor
wc.RLock()
oldBlkFileNum := wc.curFileNum
oldBlkOffset := wc.curOffset
wc.RUnlock()
// rollback is a closure that is used to rollback all writes to the
// block files.
rollback := func() {
// Rollback any modifications made to the block files if needed.
tx.db.store.handleRollback(oldBlkFileNum, oldBlkOffset)
}
// Loop through all of the pending blocks to store and write them.
for _, blockData := range tx.pendingBlockData {
log.Tracef("Storing block %s", blockData.hash)
location, err := tx.db.store.writeBlock(blockData.bytes)
if err != nil {
rollback()
return err
}
// Add a record in the block index for the block. The record
// includes the location information needed to locate the block
// on the filesystem as well as the block header since they are
// so commonly needed.
blockRow := serializeBlockLoc(location)
err = tx.blockIdxBucket.Put(blockData.hash[:], blockRow)
if err != nil {
rollback()
return err
}
}
// Update the metadata for the current write file and offset.
writeRow := serializeWriteRow(wc.curFileNum, wc.curOffset)
if err := tx.metaBucket.Put(writeLocKeyName, writeRow); err != nil {
rollback()
err.AddMessage("failed to store write cursor")
return err
}
// Atomically update the database cache. The cache automatically
// handles flushing to the underlying persistent storage database.
return tx.db.cache.commitTx(tx)
}
// Commit commits all changes that have been made to the root metadata bucket
// and all of its sub-buckets to the database cache which is periodically synced
// to persistent storage. In addition, it commits all new blocks directly to
// persistent storage bypassing the db cache. Blocks can be rather large, so
// this help increase the amount of cache available for the metadata updates and
// is safe since blocks are immutable.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) Commit() er.R {
// Prevent commits on managed transactions.
if tx.managed {
tx.close()
panic("managed transaction commit not allowed")
}
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return err
}
// Regardless of whether the commit succeeds, the transaction is closed
// on return.
defer tx.close()
// Ensure the transaction is writable.
if !tx.writable {
str := "Commit requires a writable database transaction"
return makeDbErr(database.ErrTxNotWritable, str, nil)
}
// Write pending data. The function will rollback if any errors occur.
return tx.writePendingAndCommit()
}
// Rollback undoes all changes that have been made to the root bucket and all of
// its sub-buckets.
//
// This function is part of the database.Tx interface implementation.
func (tx *transaction) Rollback() er.R {
// Prevent rollbacks on managed transactions.
if tx.managed {
tx.close()
panic("managed transaction rollback not allowed")
}
// Ensure transaction state is valid.
if err := tx.checkClosed(); err != nil {
return err
}
tx.close()
return nil
}
// db represents a collection of namespaces which are persisted and implements
// the database.DB interface. All database access is performed through
// transactions which are obtained through the specific Namespace.
type db struct {
writeLock sync.Mutex // Limit to one write transaction at a time.
closeLock sync.RWMutex // Make database close block while txns active.
closed bool // Is the database closed?
store *blockStore // Handles read/writing blocks to flat files.
cache *dbCache // Cache layer which wraps underlying leveldb DB.
}
// Enforce db implements the database.DB interface.
var _ database.DB = (*db)(nil)
// Type returns the database driver type the current database instance was
// created with.
//
// This function is part of the database.DB interface implementation.
func (db *db) Type() string {
return dbType
}
// begin is the implementation function for the Begin database method. See its
// documentation for more details.
//
// This function is only separate because it returns the internal transaction
// which is used by the managed transaction code while the database method
// returns the interface.
func (db *db) begin(writable bool) (*transaction, er.R) {
// Make sure there is enough available disk space so we can inform the
// user of the problem instead of causing a db failure.
if writable {
freeSpace, err := getAvailableDiskSpace(db.store.basePath)
if err != nil {
str := "failed to determine available disk space"
return nil, makeDbErr(database.ErrDriverSpecific, str, nil)
}
if freeSpace < uint64(minAvailableSpaceUpdate) {
errMsg := fmt.Sprintf("available disk space too low: "+
"%.1f MiB", float64(freeSpace)/float64(bytesMiB))
return nil, makeDbErr(database.ErrAvailableDiskSpace,
errMsg, nil)
}
}
// Whenever a new writable transaction is started, grab the write lock
// to ensure only a single write transaction can be active at the same
// time. This lock will not be released until the transaction is
// closed (via Rollback or Commit).
if writable {
db.writeLock.Lock()
}
// Whenever a new transaction is started, grab a read lock against the
// database to ensure Close will wait for the transaction to finish.
// This lock will not be released until the transaction is closed (via
// Rollback or Commit).
db.closeLock.RLock()
if db.closed {
db.closeLock.RUnlock()
if writable {
db.writeLock.Unlock()
}
return nil, makeDbErr(database.ErrDbNotOpen, errDbNotOpenStr,
nil)
}
// Grab a snapshot of the database cache (which in turn also handles the
// underlying database).
snapshot, err := db.cache.Snapshot()
if err != nil {
db.closeLock.RUnlock()
if writable {
db.writeLock.Unlock()
}
return nil, err
}
// The metadata and block index buckets are internal-only buckets, so
// they have defined IDs.
tx := &transaction{
writable: writable,
db: db,
snapshot: snapshot,
pendingKeys: treap.NewMutable(),
pendingRemove: treap.NewMutable(),
}
tx.metaBucket = &bucket{tx: tx, id: metadataBucketID}
tx.blockIdxBucket = &bucket{tx: tx, id: blockIdxBucketID}
return tx, nil
}
// Begin starts a transaction which is either read-only or read-write depending
// on the specified flag. Multiple read-only transactions can be started
// simultaneously while only a single read-write transaction can be started at a
// time. The call will block when starting a read-write transaction when one is
// already open.
//
// NOTE: The transaction must be closed by calling Rollback or Commit on it when
// it is no longer needed. Failure to do so will result in unclaimed memory.
//
// This function is part of the database.DB interface implementation.
func (db *db) Begin(writable bool) (database.Tx, er.R) {
return db.begin(writable)
}
// rollbackOnPanic rolls the passed transaction back if the code in the calling
// function panics. This is needed since the mutex on a transaction must be
// released and a panic in called code would prevent that from happening.
//
// NOTE: This can only be handled manually for managed transactions since they
// control the life-cycle of the transaction. As the documentation on Begin
// calls out, callers opting to use manual transactions will have to ensure the
// transaction is rolled back on panic if it desires that functionality as well
// or the database will fail to close since the read-lock will never be
// released.
func rollbackOnPanic(tx *transaction) {
if err := recover(); err != nil {
tx.managed = false
_ = tx.Rollback()
panic(err)
}
}
// View invokes the passed function in the context of a managed read-only
// transaction with the root bucket for the namespace. Any errors returned from
// the user-supplied function are returned from this function.
//
// This function is part of the database.DB interface implementation.
func (db *db) View(fn func(database.Tx) er.R) er.R {
// Start a read-only transaction.
tx, err := db.begin(false)
if err != nil {
return err
}
// Since the user-provided function might panic, ensure the transaction
// releases all mutexes and resources. There is no guarantee the caller
// won't use recover and keep going. Thus, the database must still be
// in a usable state on panics due to caller issues.
defer rollbackOnPanic(tx)
tx.managed = true
err = fn(tx)
tx.managed = false
if err != nil {
// The error is ignored here because nothing was written yet
// and regardless of a rollback failure, the tx is closed now
// anyways.
_ = tx.Rollback()
return err
}
return tx.Rollback()
}
// Update invokes the passed function in the context of a managed read-write
// transaction with the root bucket for the namespace. Any errors returned from
// the user-supplied function will cause the transaction to be rolled back and
// are returned from this function. Otherwise, the transaction is committed
// when the user-supplied function returns a nil error.
//
// This function is part of the database.DB interface implementation.
func (db *db) Update(fn func(database.Tx) er.R) er.R {
// Start a read-write transaction.
tx, err := db.begin(true)
if err != nil {
return err
}
// Since the user-provided function might panic, ensure the transaction
// releases all mutexes and resources. There is no guarantee the caller
// won't use recover and keep going. Thus, the database must still be
// in a usable state on panics due to caller issues.
defer rollbackOnPanic(tx)
tx.managed = true
err = fn(tx)
tx.managed = false
if err != nil {
// The error is ignored here because nothing was written yet
// and regardless of a rollback failure, the tx is closed now
// anyways.
_ = tx.Rollback()
return err
}
return tx.Commit()
}
// Close cleanly shuts down the database and syncs all data. It will block
// until all database transactions have been finalized (rolled back or
// committed).
//
// This function is part of the database.DB interface implementation.
func (db *db) Close() er.R {
// Since all transactions have a read lock on this mutex, this will
// cause Close to wait for all readers to complete.
db.closeLock.Lock()
defer db.closeLock.Unlock()
if db.closed {
return makeDbErr(database.ErrDbNotOpen, errDbNotOpenStr, nil)
}
db.closed = true
// NOTE: Since the above lock waits for all transactions to finish and
// prevents any new ones from being started, it is safe to flush the
// cache and clear all state without the individual locks.
// Close the database cache which will flush any existing entries to
// disk and close the underlying leveldb database. Any error is saved
// and returned at the end after the remaining cleanup since the
// database will be marked closed even if this fails given there is no
// good way for the caller to recover from a failure here anyways.
closeErr := db.cache.Close()
// Close any open flat files that house the blocks.
wc := db.store.writeCursor
if wc.curFile.file != nil {
_ = wc.curFile.file.Close()
wc.curFile.file = nil
}
for _, blockFile := range db.store.openBlockFiles {
_ = blockFile.file.Close()
}
db.store.openBlockFiles = nil
db.store.openBlocksLRU.Init()
db.store.fileNumToLRUElem = nil
return closeErr
}
// filesExists reports whether the named file or directory exists.
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// initDB creates the initial buckets and values used by the package. This is
// mainly in a separate function for testing purposes.
func initDB(ldb *leveldb.DB) er.R {
// The starting block file write cursor location is file num 0, offset
// 0.
batch := new(leveldb.Batch)
batch.Put(bucketizedKey(metadataBucketID, writeLocKeyName),
serializeWriteRow(0, 0))
// Create block index bucket and set the current bucket id.
//
// NOTE: Since buckets are virtualized through the use of prefixes,
// there is no need to store the bucket index data for the metadata
// bucket in the database. However, the first bucket ID to use does
// need to account for it to ensure there are no key collisions.
batch.Put(bucketIndexKey(metadataBucketID, blockIdxBucketName),
blockIdxBucketID[:])
batch.Put(curBucketIDKeyName, blockIdxBucketID[:])
// Write everything as a single batch.
if err := ldb.Write(batch, nil); err != nil {
str := fmt.Sprintf("failed to initialize metadata database: %v",
err)
return convertErr(str, err)
}
return nil
}
// OpenDB opens the database at the provided path. database.ErrDbDoesNotExist
// is returned if the database doesn't exist and the create flag is not set.
func OpenDB(dbPath string, network protocol.BitcoinNet, create bool) (database.DB, er.R) {
// Error if the database doesn't exist and the create flag is not set.
metadataDbPath := filepath.Join(dbPath, metadataDbName)
dbExists := fileExists(metadataDbPath)
if !create && !dbExists {
str := fmt.Sprintf("database %q does not exist", metadataDbPath)
return nil, makeDbErr(database.ErrDbDoesNotExist, str, nil)
}
// Ensure the full path to the database exists.
if !dbExists {
// The error can be ignored here since the call to
// leveldb.OpenFile will fail if the directory couldn't be
// created.
_ = os.MkdirAll(dbPath, 0o700)
}
// Open the metadata database (will create it if needed).
opts := opt.Options{
ErrorIfExist: create,
Strict: opt.StrictAll,
Compression: opt.NoCompression,
DisableCompactionBackoff: true,
DisableSeeksCompaction: true,
WriteL0PauseTrigger: 8,
DisableBufferPool: false,
DisableBlockCache: false,
Filter: filter.NewBloomFilter(10),
}
ldb, err := leveldb.OpenFile(metadataDbPath, &opts)
if _, corrupted := err.(*ldberrors.ErrCorrupted); corrupted {
ldb, err = leveldb.RecoverFile(metadataDbPath, nil)
}
if err != nil {
return nil, convertErr(err.Error(), err)
}
// Create the block store which includes scanning the existing flat
// block files to find what the current write cursor position is
// according to the data that is actually on disk. Also create the
// database cache which wraps the underlying leveldb database to provide
// write caching.
store := newBlockStore(dbPath, network)
cache := newDbCache(ldb, store, defaultCacheSize, defaultFlushSecs)
pdb := &db{store: store, cache: cache}
// Perform any reconciliation needed between the block and metadata as
// well as database initialization, if needed.
return reconcileDB(pdb, create)
}
|
{
// Use the driver-specific error code by default. The code below will
// update this with the converted error if it's recognized.
code := database.ErrDriverSpecific
switch {
// Database corruption errors.
case ldberrors.IsCorrupted(ldbErr):
code = database.ErrCorruption
// Database open/create errors.
case ldbErr == leveldb.ErrClosed:
code = database.ErrDbNotOpen
// Transaction errors.
case ldbErr == leveldb.ErrSnapshotReleased:
code = database.ErrTxClosed
case ldbErr == leveldb.ErrIterReleased:
code = database.ErrTxClosed
}
return code.New(desc, er.E(ldbErr))
}
|
models.rs
|
#![doc = "generated by AutoRust"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AciServiceCreateRequest {
#[serde(flatten)]
pub create_service_request: CreateServiceRequest,
#[doc = "The resource requirements for the container (cpu and memory)."]
#[serde(rename = "containerResourceRequirements", default, skip_serializing_if = "Option::is_none")]
pub container_resource_requirements: Option<ContainerResourceRequirements>,
#[doc = "Whether or not authentication is enabled on the service."]
#[serde(rename = "authEnabled", default, skip_serializing_if = "Option::is_none")]
pub auth_enabled: Option<bool>,
#[doc = "Whether or not SSL is enabled."]
#[serde(rename = "sslEnabled", default, skip_serializing_if = "Option::is_none")]
pub ssl_enabled: Option<bool>,
#[doc = "Whether or not Application Insights is enabled."]
#[serde(rename = "appInsightsEnabled", default, skip_serializing_if = "Option::is_none")]
pub app_insights_enabled: Option<bool>,
#[doc = "Details of the data collection options specified."]
#[serde(rename = "dataCollection", default, skip_serializing_if = "Option::is_none")]
pub data_collection: Option<serde_json::Value>,
#[doc = "The public SSL certificate in PEM format to use if SSL is enabled."]
#[serde(rename = "sslCertificate", default, skip_serializing_if = "Option::is_none")]
pub ssl_certificate: Option<String>,
#[doc = "The public SSL key in PEM format for the certificate."]
#[serde(rename = "sslKey", default, skip_serializing_if = "Option::is_none")]
pub ssl_key: Option<String>,
#[doc = "The CName for the service."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cname: Option<String>,
#[doc = "The Dns label for the service."]
#[serde(rename = "dnsNameLabel", default, skip_serializing_if = "Option::is_none")]
pub dns_name_label: Option<String>,
#[doc = "The virtual network configuration."]
#[serde(rename = "vnetConfiguration", default, skip_serializing_if = "Option::is_none")]
pub vnet_configuration: Option<serde_json::Value>,
#[doc = "The encryption properties."]
#[serde(rename = "encryptionProperties", default, skip_serializing_if = "Option::is_none")]
pub encryption_properties: Option<serde_json::Value>,
}
impl AciServiceCreateRequest {
pub fn new(create_service_request: CreateServiceRequest) -> Self {
Self {
create_service_request,
container_resource_requirements: None,
auth_enabled: None,
ssl_enabled: None,
app_insights_enabled: None,
data_collection: None,
ssl_certificate: None,
ssl_key: None,
cname: None,
dns_name_label: None,
vnet_configuration: None,
encryption_properties: None,
}
}
}
#[doc = "The response for an ACI service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AciServiceResponse {
#[serde(flatten)]
pub service_response_base: ServiceResponseBase,
#[doc = "The resource requirements for the container (cpu and memory)."]
#[serde(rename = "containerResourceRequirements", default, skip_serializing_if = "Option::is_none")]
pub container_resource_requirements: Option<ContainerResourceRequirements>,
#[doc = "The Uri for sending scoring requests."]
#[serde(rename = "scoringUri", default, skip_serializing_if = "Option::is_none")]
pub scoring_uri: Option<String>,
#[doc = "The name of the Azure location/region."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[doc = "Whether or not authentication is enabled on the service."]
#[serde(rename = "authEnabled", default, skip_serializing_if = "Option::is_none")]
pub auth_enabled: Option<bool>,
#[doc = "Whether or not SSL is enabled."]
#[serde(rename = "sslEnabled", default, skip_serializing_if = "Option::is_none")]
pub ssl_enabled: Option<bool>,
#[doc = "Whether or not Application Insights is enabled."]
#[serde(rename = "appInsightsEnabled", default, skip_serializing_if = "Option::is_none")]
pub app_insights_enabled: Option<bool>,
#[doc = "Details of the data collection options specified."]
#[serde(rename = "dataCollection", default, skip_serializing_if = "Option::is_none")]
pub data_collection: Option<serde_json::Value>,
#[doc = "The public SSL certificate in PEM format to use if SSL is enabled."]
#[serde(rename = "sslCertificate", default, skip_serializing_if = "Option::is_none")]
pub ssl_certificate: Option<String>,
#[doc = "The public SSL key in PEM format for the certificate."]
#[serde(rename = "sslKey", default, skip_serializing_if = "Option::is_none")]
pub ssl_key: Option<String>,
#[doc = "The CName for the service."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cname: Option<String>,
#[doc = "The public IP address for the service."]
#[serde(rename = "publicIp", default, skip_serializing_if = "Option::is_none")]
pub public_ip: Option<String>,
#[doc = "The public Fqdn for the service."]
#[serde(rename = "publicFqdn", default, skip_serializing_if = "Option::is_none")]
pub public_fqdn: Option<String>,
#[doc = "The Uri for sending swagger requests."]
#[serde(rename = "swaggerUri", default, skip_serializing_if = "Option::is_none")]
pub swagger_uri: Option<String>,
#[doc = "Details on the models and configurations."]
#[serde(rename = "modelConfigMap", default, skip_serializing_if = "Option::is_none")]
pub model_config_map: Option<serde_json::Value>,
#[doc = "The list of models."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub models: Vec<Model>,
#[doc = "The Environment, models and assets used for inferencing."]
#[serde(rename = "environmentImageRequest", default, skip_serializing_if = "Option::is_none")]
pub environment_image_request: Option<serde_json::Value>,
#[doc = "The virtual network configuration."]
#[serde(rename = "vnetConfiguration", default, skip_serializing_if = "Option::is_none")]
pub vnet_configuration: Option<serde_json::Value>,
#[doc = "The encryption properties."]
#[serde(rename = "encryptionProperties", default, skip_serializing_if = "Option::is_none")]
pub encryption_properties: Option<serde_json::Value>,
}
impl AciServiceResponse {
pub fn new(service_response_base: ServiceResponseBase) -> Self {
Self {
service_response_base,
container_resource_requirements: None,
scoring_uri: None,
location: None,
auth_enabled: None,
ssl_enabled: None,
app_insights_enabled: None,
data_collection: None,
ssl_certificate: None,
ssl_key: None,
cname: None,
public_ip: None,
public_fqdn: None,
swagger_uri: None,
model_config_map: None,
models: Vec::new(),
environment_image_request: None,
vnet_configuration: None,
encryption_properties: None,
}
}
}
#[doc = "A Machine Learning compute based on AKS."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Aks {
#[serde(flatten)]
pub compute: Compute,
#[doc = "AKS properties"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<aks::Properties>,
}
impl Aks {
pub fn new(compute: Compute) -> Self {
Self { compute, properties: None }
}
}
pub mod aks {
use super::*;
#[doc = "AKS properties"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "Cluster full qualified domain name"]
#[serde(rename = "clusterFqdn", default, skip_serializing_if = "Option::is_none")]
pub cluster_fqdn: Option<String>,
#[doc = "System services"]
#[serde(rename = "systemServices", default, skip_serializing_if = "Vec::is_empty")]
pub system_services: Vec<SystemService>,
#[doc = "Number of agents"]
#[serde(rename = "agentCount", default, skip_serializing_if = "Option::is_none")]
pub agent_count: Option<i32>,
#[doc = "Agent virtual machine size"]
#[serde(rename = "agentVmSize", default, skip_serializing_if = "Option::is_none")]
pub agent_vm_size: Option<String>,
#[doc = "Intended usage of the cluster"]
#[serde(rename = "clusterPurpose", default, skip_serializing_if = "Option::is_none")]
pub cluster_purpose: Option<properties::ClusterPurpose>,
#[doc = "The ssl configuration for scoring"]
#[serde(rename = "sslConfiguration", default, skip_serializing_if = "Option::is_none")]
pub ssl_configuration: Option<SslConfiguration>,
#[doc = "Advance configuration for AKS networking"]
#[serde(rename = "aksNetworkingConfiguration", default, skip_serializing_if = "Option::is_none")]
pub aks_networking_configuration: Option<AksNetworkingConfiguration>,
#[doc = "Load Balancer Type"]
#[serde(rename = "loadBalancerType", default, skip_serializing_if = "Option::is_none")]
pub load_balancer_type: Option<properties::LoadBalancerType>,
#[doc = "Load Balancer Subnet"]
#[serde(rename = "loadBalancerSubnet", default, skip_serializing_if = "Option::is_none")]
pub load_balancer_subnet: Option<String>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
pub mod properties {
use super::*;
#[doc = "Intended usage of the cluster"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ClusterPurpose {
FastProd,
DenseProd,
DevTest,
}
impl Default for ClusterPurpose {
fn default() -> Self {
Self::FastProd
}
}
#[doc = "Load Balancer Type"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LoadBalancerType {
PublicIp,
InternalLoadBalancer,
}
impl Default for LoadBalancerType {
fn default() -> Self {
Self::PublicIp
}
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AksReplicaStatus {
#[doc = "The desired number of replicas."]
#[serde(rename = "desiredReplicas", default, skip_serializing_if = "Option::is_none")]
pub desired_replicas: Option<i32>,
#[doc = "The number of updated replicas."]
#[serde(rename = "updatedReplicas", default, skip_serializing_if = "Option::is_none")]
pub updated_replicas: Option<i32>,
#[doc = "The number of available replicas."]
#[serde(rename = "availableReplicas", default, skip_serializing_if = "Option::is_none")]
pub available_replicas: Option<i32>,
#[doc = "The error details."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<serde_json::Value>,
}
impl AksReplicaStatus {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The request to create an AKS service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksServiceCreateRequest {
#[serde(flatten)]
pub create_endpoint_variant_request: CreateEndpointVariantRequest,
#[doc = "The number of replicas on the cluster."]
#[serde(rename = "numReplicas", default, skip_serializing_if = "Option::is_none")]
pub num_replicas: Option<i32>,
#[doc = "Details of the data collection options specified."]
#[serde(rename = "dataCollection", default, skip_serializing_if = "Option::is_none")]
pub data_collection: Option<serde_json::Value>,
#[doc = "The name of the compute resource."]
#[serde(rename = "computeName", default, skip_serializing_if = "Option::is_none")]
pub compute_name: Option<String>,
#[doc = "Whether or not Application Insights is enabled."]
#[serde(rename = "appInsightsEnabled", default, skip_serializing_if = "Option::is_none")]
pub app_insights_enabled: Option<bool>,
#[doc = "The auto scaler properties."]
#[serde(rename = "autoScaler", default, skip_serializing_if = "Option::is_none")]
pub auto_scaler: Option<serde_json::Value>,
#[doc = "The resource requirements for the container (cpu and memory)."]
#[serde(rename = "containerResourceRequirements", default, skip_serializing_if = "Option::is_none")]
pub container_resource_requirements: Option<ContainerResourceRequirements>,
#[doc = "The maximum number of concurrent requests per container."]
#[serde(rename = "maxConcurrentRequestsPerContainer", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_requests_per_container: Option<i32>,
#[doc = "Maximum time a request will wait in the queue (in milliseconds). After this time, the service will return 503 (Service Unavailable)"]
#[serde(rename = "maxQueueWaitMs", default, skip_serializing_if = "Option::is_none")]
pub max_queue_wait_ms: Option<i32>,
#[doc = "Kubernetes namespace for the service."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace: Option<String>,
#[doc = "The scoring timeout in milliseconds."]
#[serde(rename = "scoringTimeoutMs", default, skip_serializing_if = "Option::is_none")]
pub scoring_timeout_ms: Option<i32>,
#[doc = "Whether or not authentication is enabled."]
#[serde(rename = "authEnabled", default, skip_serializing_if = "Option::is_none")]
pub auth_enabled: Option<bool>,
#[doc = "The liveness probe requirements."]
#[serde(rename = "livenessProbeRequirements", default, skip_serializing_if = "Option::is_none")]
pub liveness_probe_requirements: Option<serde_json::Value>,
#[doc = "Whether or not AAD authentication is enabled."]
#[serde(rename = "aadAuthEnabled", default, skip_serializing_if = "Option::is_none")]
pub aad_auth_enabled: Option<bool>,
}
impl AksServiceCreateRequest {
pub fn new(create_endpoint_variant_request: CreateEndpointVariantRequest) -> Self {
Self {
create_endpoint_variant_request,
num_replicas: None,
data_collection: None,
compute_name: None,
app_insights_enabled: None,
auto_scaler: None,
container_resource_requirements: None,
max_concurrent_requests_per_container: None,
max_queue_wait_ms: None,
namespace: None,
scoring_timeout_ms: None,
auth_enabled: None,
liveness_probe_requirements: None,
aad_auth_enabled: None,
}
}
}
#[doc = "The response for an AKS service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksServiceResponse {
#[serde(flatten)]
pub aks_variant_response: AksVariantResponse,
#[doc = "The list of models."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub models: Vec<Model>,
#[doc = "The resource requirements for the container (cpu and memory)."]
#[serde(rename = "containerResourceRequirements", default, skip_serializing_if = "Option::is_none")]
pub container_resource_requirements: Option<ContainerResourceRequirements>,
#[doc = "The maximum number of concurrent requests per container."]
#[serde(rename = "maxConcurrentRequestsPerContainer", default, skip_serializing_if = "Option::is_none")]
pub max_concurrent_requests_per_container: Option<i32>,
#[doc = "Maximum time a request will wait in the queue (in milliseconds). After this time, the service will return 503 (Service Unavailable)"]
#[serde(rename = "maxQueueWaitMs", default, skip_serializing_if = "Option::is_none")]
pub max_queue_wait_ms: Option<i32>,
#[doc = "The name of the compute resource."]
#[serde(rename = "computeName", default, skip_serializing_if = "Option::is_none")]
pub compute_name: Option<String>,
#[doc = "The Kubernetes namespace of the deployment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace: Option<String>,
#[doc = "The number of replicas on the cluster."]
#[serde(rename = "numReplicas", default, skip_serializing_if = "Option::is_none")]
pub num_replicas: Option<i32>,
#[doc = "Details of the data collection options specified."]
#[serde(rename = "dataCollection", default, skip_serializing_if = "Option::is_none")]
pub data_collection: Option<serde_json::Value>,
#[doc = "Whether or not Application Insights is enabled."]
#[serde(rename = "appInsightsEnabled", default, skip_serializing_if = "Option::is_none")]
pub app_insights_enabled: Option<bool>,
#[doc = "The auto scaler properties."]
#[serde(rename = "autoScaler", default, skip_serializing_if = "Option::is_none")]
pub auto_scaler: Option<serde_json::Value>,
#[doc = "The Uri for sending scoring requests."]
#[serde(rename = "scoringUri", default, skip_serializing_if = "Option::is_none")]
pub scoring_uri: Option<String>,
#[doc = "The deployment status."]
#[serde(rename = "deploymentStatus", default, skip_serializing_if = "Option::is_none")]
pub deployment_status: Option<serde_json::Value>,
#[doc = "The scoring timeout in milliseconds."]
#[serde(rename = "scoringTimeoutMs", default, skip_serializing_if = "Option::is_none")]
pub scoring_timeout_ms: Option<i32>,
#[doc = "The liveness probe requirements."]
#[serde(rename = "livenessProbeRequirements", default, skip_serializing_if = "Option::is_none")]
pub liveness_probe_requirements: Option<serde_json::Value>,
#[doc = "Whether or not authentication is enabled."]
#[serde(rename = "authEnabled", default, skip_serializing_if = "Option::is_none")]
pub auth_enabled: Option<bool>,
#[doc = "Whether or not AAD authentication is enabled."]
#[serde(rename = "aadAuthEnabled", default, skip_serializing_if = "Option::is_none")]
pub aad_auth_enabled: Option<bool>,
#[doc = "The Uri for sending swagger requests."]
#[serde(rename = "swaggerUri", default, skip_serializing_if = "Option::is_none")]
pub swagger_uri: Option<String>,
#[doc = "Details on the models and configurations."]
#[serde(rename = "modelConfigMap", default, skip_serializing_if = "Option::is_none")]
pub model_config_map: Option<serde_json::Value>,
#[doc = "The Environment, models and assets used for inferencing."]
#[serde(rename = "environmentImageRequest", default, skip_serializing_if = "Option::is_none")]
pub environment_image_request: Option<serde_json::Value>,
}
impl AksServiceResponse {
pub fn new(aks_variant_response: AksVariantResponse) -> Self {
Self {
aks_variant_response,
models: Vec::new(),
container_resource_requirements: None,
max_concurrent_requests_per_container: None,
max_queue_wait_ms: None,
compute_name: None,
namespace: None,
num_replicas: None,
data_collection: None,
app_insights_enabled: None,
auto_scaler: None,
scoring_uri: None,
deployment_status: None,
scoring_timeout_ms: None,
liveness_probe_requirements: None,
auth_enabled: None,
aad_auth_enabled: None,
swagger_uri: None,
model_config_map: None,
environment_image_request: None,
}
}
}
#[doc = "The response for an AKS variant."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksVariantResponse {
#[serde(flatten)]
pub service_response_base: ServiceResponseBase,
#[doc = "Is this the default variant."]
#[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[doc = "The amount of traffic variant receives."]
#[serde(rename = "trafficPercentile", default, skip_serializing_if = "Option::is_none")]
pub traffic_percentile: Option<f32>,
#[doc = "The type of the variant."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<aks_variant_response::Type>,
}
impl AksVariantResponse {
pub fn new(service_response_base: ServiceResponseBase) -> Self {
Self {
service_response_base,
is_default: None,
traffic_percentile: None,
type_: None,
}
}
}
pub mod aks_variant_response {
use super::*;
#[doc = "The type of the variant."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Control,
Treatment,
}
}
#[doc = "Secrets related to a Machine Learning compute based on AKS."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AksComputeSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[doc = "Content of kubeconfig file that can be used to connect to the Kubernetes cluster."]
#[serde(rename = "userKubeConfig", default, skip_serializing_if = "Option::is_none")]
pub user_kube_config: Option<String>,
#[doc = "Content of kubeconfig file that can be used to connect to the Kubernetes cluster."]
#[serde(rename = "adminKubeConfig", default, skip_serializing_if = "Option::is_none")]
pub admin_kube_config: Option<String>,
#[doc = "Image registry pull secret."]
#[serde(rename = "imagePullSecretName", default, skip_serializing_if = "Option::is_none")]
pub image_pull_secret_name: Option<String>,
}
impl AksComputeSecrets {
pub fn new(compute_secrets: ComputeSecrets) -> Self {
Self {
compute_secrets,
user_kube_config: None,
admin_kube_config: None,
image_pull_secret_name: None,
}
}
}
#[doc = "Advance configuration for AKS networking"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AksNetworkingConfiguration {
#[doc = "Virtual network subnet resource ID the compute nodes belong to"]
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[doc = "A CIDR notation IP range from which to assign service cluster IPs. It must not overlap with any Subnet IP ranges."]
#[serde(rename = "serviceCidr", default, skip_serializing_if = "Option::is_none")]
pub service_cidr: Option<String>,
#[doc = "An IP address assigned to the Kubernetes DNS service. It must be within the Kubernetes service address range specified in serviceCidr."]
#[serde(rename = "dnsServiceIP", default, skip_serializing_if = "Option::is_none")]
pub dns_service_ip: Option<String>,
#[doc = "A CIDR notation IP range assigned to the Docker bridge network. It must not overlap with any Subnet IP ranges or the Kubernetes service address range."]
#[serde(rename = "dockerBridgeCidr", default, skip_serializing_if = "Option::is_none")]
pub docker_bridge_cidr: Option<String>,
}
impl AksNetworkingConfiguration {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An Azure Machine Learning compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlCompute {
#[serde(flatten)]
pub compute: Compute,
#[doc = "AML Compute properties"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<aml_compute::Properties>,
}
impl AmlCompute {
pub fn new(compute: Compute) -> Self {
Self { compute, properties: None }
}
}
pub mod aml_compute {
use super::*;
#[doc = "AML Compute properties"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "Compute OS Type"]
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<properties::OsType>,
#[doc = "Virtual Machine Size"]
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<String>,
#[doc = "Virtual Machine priority"]
#[serde(rename = "vmPriority", default, skip_serializing_if = "Option::is_none")]
pub vm_priority: Option<properties::VmPriority>,
#[doc = "Virtual Machine image for Windows AML Compute"]
#[serde(rename = "virtualMachineImage", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine_image: Option<VirtualMachineImage>,
#[doc = "Network is isolated or not"]
#[serde(rename = "isolatedNetwork", default, skip_serializing_if = "Option::is_none")]
pub isolated_network: Option<bool>,
#[doc = "scale settings for AML Compute"]
#[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")]
pub scale_settings: Option<ScaleSettings>,
#[doc = "Settings for user account that gets created on each on the nodes of a compute."]
#[serde(rename = "userAccountCredentials", default, skip_serializing_if = "Option::is_none")]
pub user_account_credentials: Option<UserAccountCredentials>,
#[doc = "Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<ResourceId>,
#[doc = "State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled."]
#[serde(rename = "remoteLoginPortPublicAccess", default, skip_serializing_if = "Option::is_none")]
pub remote_login_port_public_access: Option<properties::RemoteLoginPortPublicAccess>,
#[doc = "Allocation state of the compute. Possible values are: steady - Indicates that the compute is not resizing. There are no changes to the number of compute nodes in the compute in progress. A compute enters this state when it is created and when no operations are being performed on the compute to change the number of compute nodes. resizing - Indicates that the compute is resizing; that is, compute nodes are being added to or removed from the compute."]
#[serde(rename = "allocationState", default, skip_serializing_if = "Option::is_none")]
pub allocation_state: Option<properties::AllocationState>,
#[doc = "The time at which the compute entered its current allocation state."]
#[serde(rename = "allocationStateTransitionTime", default, skip_serializing_if = "Option::is_none")]
pub allocation_state_transition_time: Option<String>,
#[doc = "Collection of errors encountered by various compute nodes during node setup."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub errors: Vec<MachineLearningServiceError>,
#[doc = "The number of compute nodes currently assigned to the compute."]
#[serde(rename = "currentNodeCount", default, skip_serializing_if = "Option::is_none")]
pub current_node_count: Option<i32>,
#[doc = "The target number of compute nodes for the compute. If the allocationState is resizing, this property denotes the target node count for the ongoing resize operation. If the allocationState is steady, this property denotes the target node count for the previous resize operation."]
#[serde(rename = "targetNodeCount", default, skip_serializing_if = "Option::is_none")]
pub target_node_count: Option<i32>,
#[doc = "Counts of various compute node states on the amlCompute."]
#[serde(rename = "nodeStateCounts", default, skip_serializing_if = "Option::is_none")]
pub node_state_counts: Option<NodeStateCounts>,
#[doc = "Enable or disable node public IP address provisioning. Possible values are: Possible values are: true - Indicates that the compute nodes will have public IPs provisioned. false - Indicates that the compute nodes will have a private endpoint and no public IPs."]
#[serde(rename = "enableNodePublicIp", default, skip_serializing_if = "Option::is_none")]
pub enable_node_public_ip: Option<bool>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
pub mod properties {
use super::*;
#[doc = "Compute OS Type"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Linux,
Windows,
}
impl Default for OsType {
fn default() -> Self {
Self::Linux
}
}
#[doc = "Virtual Machine priority"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmPriority {
Dedicated,
LowPriority,
}
#[doc = "State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on all nodes of the cluster. Enabled - Indicates that the public ssh port is open on all nodes of the cluster. NotSpecified - Indicates that the public ssh port is closed on all nodes of the cluster if VNet is defined, else is open all public nodes. It can be default only during cluster creation time, after creation it will be either enabled or disabled."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RemoteLoginPortPublicAccess {
Enabled,
Disabled,
NotSpecified,
}
impl Default for RemoteLoginPortPublicAccess {
fn default() -> Self {
Self::NotSpecified
}
}
#[doc = "Allocation state of the compute. Possible values are: steady - Indicates that the compute is not resizing. There are no changes to the number of compute nodes in the compute in progress. A compute enters this state when it is created and when no operations are being performed on the compute to change the number of compute nodes. resizing - Indicates that the compute is resizing; that is, compute nodes are being added to or removed from the compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AllocationState {
Steady,
Resizing,
}
}
}
#[doc = "Compute node information related to a AmlCompute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AmlComputeNodeInformation {
#[doc = "ID of the compute node."]
#[serde(rename = "nodeId", default, skip_serializing_if = "Option::is_none")]
pub node_id: Option<String>,
#[doc = "Private IP address of the compute node."]
#[serde(rename = "privateIpAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
#[doc = "Public IP address of the compute node."]
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[doc = "SSH port number of the node."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<f64>,
#[doc = "State of the compute node. Values are idle, running, preparing, unusable, leaving and preempted."]
#[serde(rename = "nodeState", default, skip_serializing_if = "Option::is_none")]
pub node_state: Option<aml_compute_node_information::NodeState>,
#[doc = "ID of the Experiment running on the node, if any else null."]
#[serde(rename = "runId", default, skip_serializing_if = "Option::is_none")]
pub run_id: Option<String>,
}
impl AmlComputeNodeInformation {
pub fn new() -> Self {
Self::default()
}
}
pub mod aml_compute_node_information {
use super::*;
#[doc = "State of the compute node. Values are idle, running, preparing, unusable, leaving and preempted."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NodeState {
#[serde(rename = "idle")]
Idle,
#[serde(rename = "running")]
Running,
#[serde(rename = "preparing")]
Preparing,
#[serde(rename = "unusable")]
Unusable,
#[serde(rename = "leaving")]
Leaving,
#[serde(rename = "preempted")]
Preempted,
}
}
#[doc = "Compute node information related to a AmlCompute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AmlComputeNodesInformation {
#[serde(flatten)]
pub compute_nodes_information: ComputeNodesInformation,
#[doc = "The collection of returned AmlCompute nodes details."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub nodes: Vec<AmlComputeNodeInformation>,
#[doc = "The continuation token."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl AmlComputeNodesInformation {
pub fn new(compute_nodes_information: ComputeNodesInformation) -> Self {
Self {
compute_nodes_information,
nodes: Vec::new(),
next_link: None,
}
}
}
#[doc = "Features enabled for a workspace"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AmlUserFeature {
#[doc = "Specifies the feature ID"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Specifies the feature name "]
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[doc = "Describes the feature for user experience"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
impl AmlUserFeature {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "A user that can be assigned to a compute instance."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AssignedUser {
#[doc = "User’s AAD Object Id."]
#[serde(rename = "objectId")]
pub object_id: String,
#[doc = "User’s AAD Tenant Id."]
#[serde(rename = "tenantId")]
pub tenant_id: String,
}
impl AssignedUser {
pub fn new(object_id: String, tenant_id: String) -> Self {
Self { object_id, tenant_id }
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AuthKeys {
#[doc = "The primary key."]
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[doc = "The secondary key."]
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
}
impl AuthKeys {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Auto pause properties"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AutoPauseProperties {
#[serde(rename = "delayInMinutes", default, skip_serializing_if = "Option::is_none")]
pub delay_in_minutes: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
impl AutoPauseProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Auto scale properties"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AutoScaleProperties {
#[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")]
pub min_node_count: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "maxNodeCount", default, skip_serializing_if = "Option::is_none")]
pub max_node_count: Option<i32>,
}
impl AutoScaleProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The Auto Scaler properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct AutoScaler {
#[doc = "Option to enable/disable auto scaling."]
#[serde(rename = "autoscaleEnabled", default, skip_serializing_if = "Option::is_none")]
pub autoscale_enabled: Option<bool>,
#[doc = "The minimum number of replicas to scale down to."]
#[serde(rename = "minReplicas", default, skip_serializing_if = "Option::is_none")]
pub min_replicas: Option<i32>,
#[doc = "The maximum number of replicas in the cluster."]
#[serde(rename = "maxReplicas", default, skip_serializing_if = "Option::is_none")]
pub max_replicas: Option<i32>,
#[doc = "The target utilization percentage to use for determining whether to scale the cluster."]
#[serde(rename = "targetUtilization", default, skip_serializing_if = "Option::is_none")]
pub target_utilization: Option<i32>,
#[doc = "The amount of seconds to wait between auto scale updates."]
#[serde(rename = "refreshPeriodInSeconds", default, skip_serializing_if = "Option::is_none")]
pub refresh_period_in_seconds: Option<i32>,
}
impl AutoScaler {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "AmlCompute update parameters."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ClusterUpdateParameters {
#[doc = "The properties of a amlCompute that need to be updated."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ClusterUpdateProperties>,
}
impl ClusterUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The properties of a amlCompute that need to be updated."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ClusterUpdateProperties {
#[doc = "scale settings for AML Compute"]
#[serde(rename = "scaleSettings", default, skip_serializing_if = "Option::is_none")]
pub scale_settings: Option<ScaleSettings>,
}
impl ClusterUpdateProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Machine Learning compute object."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Compute {
#[doc = "The type of compute"]
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
#[doc = "Location for the underlying compute"]
#[serde(rename = "computeLocation", default, skip_serializing_if = "Option::is_none")]
pub compute_location: Option<String>,
#[doc = "The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed."]
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<compute::ProvisioningState>,
#[doc = "The description of the Machine Learning compute."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The time at which the compute was created."]
#[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")]
pub created_on: Option<String>,
#[doc = "The time at which the compute was last modified."]
#[serde(rename = "modifiedOn", default, skip_serializing_if = "Option::is_none")]
pub modified_on: Option<String>,
#[doc = "ARM resource id of the underlying compute"]
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[doc = "Errors during provisioning"]
#[serde(rename = "provisioningErrors", default, skip_serializing_if = "Vec::is_empty")]
pub provisioning_errors: Vec<MachineLearningServiceError>,
#[doc = "Indicating whether the compute was provisioned by user and brought from outside if true, or machine learning service provisioned it if false."]
#[serde(rename = "isAttachedCompute", default, skip_serializing_if = "Option::is_none")]
pub is_attached_compute: Option<bool>,
#[doc = "Opt-out of local authentication and ensure customers can use only MSI and AAD exclusively for authentication."]
#[serde(rename = "disableLocalAuth", default, skip_serializing_if = "Option::is_none")]
pub disable_local_auth: Option<bool>,
}
impl Compute {
pub fn new(compute_type: ComputeType) -> Self {
Self {
compute_type,
compute_location: None,
provisioning_state: None,
description: None,
created_on: None,
modified_on: None,
resource_id: None,
provisioning_errors: Vec::new(),
is_attached_compute: None,
disable_local_auth: None,
}
}
}
pub mod compute {
use super::*;
#[doc = "The provision state of the cluster. Valid values are Unknown, Updating, Provisioning, Succeeded, and Failed."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Unknown,
Updating,
Creating,
Deleting,
Succeeded,
Failed,
Canceled,
}
}
#[doc = "An Azure Machine Learning compute instance."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeInstance {
#[serde(flatten)]
pub compute: Compute,
#[doc = "Compute Instance properties"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<compute_instance::Properties>,
}
impl ComputeInstance {
pub fn new(compute: Compute) -> Self {
Self { compute, properties: None }
}
}
pub mod compute_instance {
use super::*;
#[doc = "Compute Instance properties"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "Virtual Machine Size"]
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<String>,
#[doc = "Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<ResourceId>,
#[doc = "Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role."]
#[serde(rename = "applicationSharingPolicy", default, skip_serializing_if = "Option::is_none")]
pub application_sharing_policy: Option<properties::ApplicationSharingPolicy>,
#[doc = "Specifies policy and settings for SSH access."]
#[serde(rename = "sshSettings", default, skip_serializing_if = "Option::is_none")]
pub ssh_settings: Option<ComputeInstanceSshSettings>,
#[doc = "Defines all connectivity endpoints and properties for an ComputeInstance."]
#[serde(rename = "connectivityEndpoints", default, skip_serializing_if = "Option::is_none")]
pub connectivity_endpoints: Option<ComputeInstanceConnectivityEndpoints>,
#[doc = "Describes available applications and their endpoints on this ComputeInstance."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub applications: Vec<ComputeInstanceApplication>,
#[doc = "Describes information on user who created this ComputeInstance."]
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<ComputeInstanceCreatedBy>,
#[doc = "Collection of errors encountered on this ComputeInstance."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub errors: Vec<MachineLearningServiceError>,
#[doc = "Current state of an ComputeInstance."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<ComputeInstanceState>,
#[doc = "The Compute Instance Authorization type. Available values are personal (default)."]
#[serde(rename = "computeInstanceAuthorizationType", default, skip_serializing_if = "Option::is_none")]
pub compute_instance_authorization_type: Option<properties::ComputeInstanceAuthorizationType>,
#[doc = "Settings for a personal compute instance."]
#[serde(rename = "personalComputeInstanceSettings", default, skip_serializing_if = "Option::is_none")]
pub personal_compute_instance_settings: Option<PersonalComputeInstanceSettings>,
#[doc = "Details of customized scripts to execute for setting up the cluster."]
#[serde(rename = "setupScripts", default, skip_serializing_if = "Option::is_none")]
pub setup_scripts: Option<SetupScripts>,
#[doc = "The last operation on ComputeInstance."]
#[serde(rename = "lastOperation", default, skip_serializing_if = "Option::is_none")]
pub last_operation: Option<ComputeInstanceLastOperation>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
pub mod properties {
use super::*;
#[doc = "Policy for sharing applications on this compute instance among users of parent workspace. If Personal, only the creator can access applications on this compute instance. When Shared, any workspace user can access applications on this instance depending on his/her assigned role."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ApplicationSharingPolicy {
Personal,
Shared,
}
impl Default for ApplicationSharingPolicy {
fn default() -> Self {
Self::Shared
}
}
#[doc = "The Compute Instance Authorization type. Available values are personal (default)."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeInstanceAuthorizationType {
#[serde(rename = "personal")]
Personal,
}
impl Default for ComputeInstanceAuthorizationType {
fn default() -> Self {
Self::Personal
}
}
}
}
#[doc = "Defines an Aml Instance application and its connectivity endpoint URI."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ComputeInstanceApplication {
#[doc = "Name of the ComputeInstance application."]
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[doc = "Application' endpoint URI."]
#[serde(rename = "endpointUri", default, skip_serializing_if = "Option::is_none")]
pub endpoint_uri: Option<String>,
}
impl ComputeInstanceApplication {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Defines all connectivity endpoints and properties for an ComputeInstance."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ComputeInstanceConnectivityEndpoints {
#[doc = "Public IP Address of this ComputeInstance."]
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[doc = "Private IP Address of this ComputeInstance (local to the VNET in which the compute instance is deployed)."]
#[serde(rename = "privateIpAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
}
impl ComputeInstanceConnectivityEndpoints {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Describes information on user who created this ComputeInstance."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ComputeInstanceCreatedBy {
#[doc = "Name of the user."]
#[serde(rename = "userName", default, skip_serializing_if = "Option::is_none")]
pub user_name: Option<String>,
#[doc = "Uniquely identifies user' Azure Active Directory organization."]
#[serde(rename = "userOrgId", default, skip_serializing_if = "Option::is_none")]
pub user_org_id: Option<String>,
#[doc = "Uniquely identifies the user within his/her organization."]
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<String>,
}
impl ComputeInstanceCreatedBy {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The last operation on ComputeInstance."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ComputeInstanceLastOperation {
#[doc = "Name of the last operation."]
#[serde(rename = "operationName", default, skip_serializing_if = "Option::is_none")]
pub operation_name: Option<compute_instance_last_operation::OperationName>,
#[doc = "Time of the last operation."]
#[serde(rename = "operationTime", default, skip_serializing_if = "Option::is_none")]
pub operation_time: Option<String>,
#[doc = "Operation status."]
#[serde(rename = "operationStatus", default, skip_serializing_if = "Option::is_none")]
pub operation_status: Option<compute_instance_last_operation::OperationStatus>,
}
impl ComputeInstanceLastOperation {
pub fn new() -> Self {
Self::default()
}
}
pub mod compute_instance_last_operation {
use super::*;
#[doc = "Name of the last operation."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationName {
Create,
Start,
Stop,
Restart,
Reimage,
Delete,
}
#[doc = "Operation status."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationStatus {
InProgress,
Succeeded,
CreateFailed,
StartFailed,
StopFailed,
RestartFailed,
ReimageFailed,
DeleteFailed,
}
}
#[doc = "Specifies policy and settings for SSH access."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ComputeInstanceSshSettings {
#[doc = "State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable."]
#[serde(rename = "sshPublicAccess", default, skip_serializing_if = "Option::is_none")]
pub ssh_public_access: Option<compute_instance_ssh_settings::SshPublicAccess>,
#[doc = "Describes the admin user name."]
#[serde(rename = "adminUserName", default, skip_serializing_if = "Option::is_none")]
pub admin_user_name: Option<String>,
#[doc = "Describes the port for connecting through SSH."]
#[serde(rename = "sshPort", default, skip_serializing_if = "Option::is_none")]
pub ssh_port: Option<i32>,
#[doc = "Specifies the SSH rsa public key file as a string. Use \"ssh-keygen -t rsa -b 2048\" to generate your SSH key pairs."]
#[serde(rename = "adminPublicKey", default, skip_serializing_if = "Option::is_none")]
pub admin_public_key: Option<String>,
}
impl ComputeInstanceSshSettings {
pub fn new() -> Self {
Self::default()
}
}
pub mod compute_instance_ssh_settings {
use super::*;
#[doc = "State of the public SSH port. Possible values are: Disabled - Indicates that the public ssh port is closed on this instance. Enabled - Indicates that the public ssh port is open and accessible according to the VNet/subnet policy if applicable."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SshPublicAccess {
Enabled,
Disabled,
}
impl Default for SshPublicAccess {
fn default() -> Self {
Self::Disabled
}
}
}
#[doc = "Current state of an ComputeInstance."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeInstanceState {
Creating,
CreateFailed,
Deleting,
Running,
Restarting,
JobRunning,
SettingUp,
SetupFailed,
Starting,
Stopped,
Stopping,
UserSettingUp,
UserSetupFailed,
Unknown,
Unusable,
}
#[doc = "Compute nodes information related to a Machine Learning compute. Might differ for every type of compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeNodesInformation {
#[doc = "The type of compute"]
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
#[doc = "The continuation token."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl ComputeNodesInformation {
pub fn new(compute_type: ComputeType) -> Self {
Self {
compute_type,
next_link: None,
}
}
}
#[doc = "Machine Learning compute object wrapped into ARM resource envelope."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ComputeResource {
#[serde(flatten)]
pub resource: Resource,
#[doc = "Machine Learning compute object."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<Compute>,
}
impl ComputeResource {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Secrets related to a Machine Learning compute. Might differ for every type of compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeSecrets {
#[doc = "The type of compute"]
#[serde(rename = "computeType")]
pub compute_type: ComputeType,
}
impl ComputeSecrets {
pub fn new(compute_type: ComputeType) -> Self {
Self { compute_type }
}
}
#[doc = "The type of compute"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
#[serde(rename = "AKS")]
Aks,
AmlCompute,
ComputeInstance,
DataFactory,
VirtualMachine,
#[serde(rename = "HDInsight")]
HdInsight,
Databricks,
DataLakeAnalytics,
SynapseSpark,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ContainerRegistry {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
}
impl ContainerRegistry {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ContainerRegistryResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
}
impl ContainerRegistryResponse {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The resource requirements for the container (cpu and memory)."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ContainerResourceRequirements {
#[doc = "The minimum amount of CPU cores to be used by the container. More info:\nhttps://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cpu: Option<f64>,
#[doc = "The maximum amount of CPU cores allowed to be used by the container. More info:\nhttps://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"]
#[serde(rename = "cpuLimit", default, skip_serializing_if = "Option::is_none")]
pub cpu_limit: Option<f64>,
#[doc = "The minimum amount of memory (in GB) to be used by the container. More info:\nhttps://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"]
#[serde(rename = "memoryInGB", default, skip_serializing_if = "Option::is_none")]
pub memory_in_gb: Option<f64>,
#[doc = "The maximum amount of memory (in GB) allowed to be used by the container. More info:\nhttps://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/"]
#[serde(rename = "memoryInGBLimit", default, skip_serializing_if = "Option::is_none")]
pub memory_in_gb_limit: Option<f64>,
#[doc = "The number of GPU cores in the container."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gpu: Option<i32>,
#[doc = "The number of FPGA PCIE devices exposed to the container. Must be multiple of 2."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fpga: Option<i32>,
}
impl ContainerResourceRequirements {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct CosmosDbSettings {
#[doc = "The throughput of the collections in cosmosdb database"]
#[serde(rename = "collectionsThroughput", default, skip_serializing_if = "Option::is_none")]
pub collections_throughput: Option<i32>,
}
impl CosmosDbSettings {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The Variant properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateEndpointVariantRequest {
#[serde(flatten)]
pub create_service_request: CreateServiceRequest,
#[doc = "Is this the default variant."]
#[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[doc = "The amount of traffic variant receives."]
#[serde(rename = "trafficPercentile", default, skip_serializing_if = "Option::is_none")]
pub traffic_percentile: Option<f32>,
#[doc = "The type of the variant."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<create_endpoint_variant_request::Type>,
}
impl CreateEndpointVariantRequest {
pub fn new(create_service_request: CreateServiceRequest) -> Self {
Self {
create_service_request,
is_default: None,
traffic_percentile: None,
type_: None,
}
}
}
pub mod create_endpoint_variant_request {
use super::*;
#[doc = "The type of the variant."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Control,
Treatment,
}
}
#[doc = "The base class for creating a service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreateServiceRequest {
#[doc = "The description of the service."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The service tag dictionary. Tags are mutable."]
#[serde(rename = "kvTags", default, skip_serializing_if = "Option::is_none")]
pub kv_tags: Option<serde_json::Value>,
#[doc = "The service properties dictionary. Properties are immutable."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
#[doc = "The authentication keys."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub keys: Option<serde_json::Value>,
#[doc = "The compute environment type for the service."]
#[serde(rename = "computeType")]
pub compute_type: create_service_request::ComputeType,
#[doc = "The Environment, models and assets needed for inferencing."]
#[serde(rename = "environmentImageRequest", default, skip_serializing_if = "Option::is_none")]
pub environment_image_request: Option<serde_json::Value>,
#[doc = "The name of the Azure location/region."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
impl CreateServiceRequest {
pub fn new(compute_type: create_service_request::ComputeType) -> Self {
Self {
description: None,
kv_tags: None,
properties: None,
keys: None,
compute_type,
environment_image_request: None,
location: None,
}
}
}
pub mod create_service_request {
use super::*;
#[doc = "The compute environment type for the service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
#[serde(rename = "ACI")]
Aci,
#[serde(rename = "AKS")]
Aks,
}
}
#[doc = "A DataFactory compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataFactory {
#[serde(flatten)]
pub compute: Compute,
}
impl DataFactory {
pub fn new(compute: Compute) -> Self {
Self { compute }
}
}
#[doc = "A DataLakeAnalytics compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataLakeAnalytics {
#[serde(flatten)]
pub compute: Compute,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<data_lake_analytics::Properties>,
}
impl DataLakeAnalytics {
pub fn new(compute: Compute) -> Self {
Self { compute, properties: None }
}
}
pub mod data_lake_analytics {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "DataLake Store Account Name"]
#[serde(rename = "dataLakeStoreAccountName", default, skip_serializing_if = "Option::is_none")]
pub data_lake_store_account_name: Option<String>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "A DataFactory compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Databricks {
#[serde(flatten)]
pub compute: Compute,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<databricks::Properties>,
}
impl Databricks {
pub fn new(compute: Compute) -> Self {
Self { compute, properties: None }
}
}
pub mod databricks {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "Databricks access token"]
#[serde(rename = "databricksAccessToken", default, skip_serializing_if = "Option::is_none")]
pub databricks_access_token: Option<String>,
#[doc = "Workspace Url"]
#[serde(rename = "workspaceUrl", default, skip_serializing_if = "Option::is_none")]
pub workspace_url: Option<String>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "Secrets related to a Machine Learning compute based on Databricks."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DatabricksComputeSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[doc = "access token for databricks account."]
#[serde(rename = "databricksAccessToken", default, skip_serializing_if = "Option::is_none")]
pub databricks_access_token: Option<String>,
}
impl DatabricksComputeSecrets {
pub fn new(compute_secrets: ComputeSecrets) -> Self {
Self {
compute_secrets,
databricks_access_token: None,
}
}
}
#[doc = "The dataset reference object."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct DatasetReference {
#[doc = "The name of the dataset reference."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The id of the dataset reference."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
impl DatasetReference {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProperties {
#[doc = "vault base Url"]
#[serde(rename = "vaultBaseUrl")]
pub vault_base_url: String,
#[doc = "Encryption Key name"]
#[serde(rename = "keyName")]
pub key_name: String,
#[doc = "Encryption Key Version"]
#[serde(rename = "keyVersion")]
pub key_version: String,
}
impl EncryptionProperties {
pub fn new(vault_base_url: String, key_name: String, key_version: String) -> Self {
Self {
vault_base_url,
key_name,
key_version,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionProperty {
#[doc = "Indicates whether or not the encryption is enabled for the workspace."]
pub status: encryption_property::Status,
#[doc = "Identity that will be used to access key vault for encryption at rest"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<IdentityForCmk>,
#[serde(rename = "keyVaultProperties")]
pub key_vault_properties: KeyVaultProperties,
}
impl EncryptionProperty {
pub fn new(status: encryption_property::Status, key_vault_properties: KeyVaultProperties) -> Self {
Self {
status,
identity: None,
key_vault_properties,
}
}
}
pub mod encryption_property {
use super::*;
#[doc = "Indicates whether or not the encryption is enabled for the workspace."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Enabled,
Disabled,
}
}
#[doc = "Request to create a Docker image based on Environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentImageRequest {
#[doc = "The name of the driver file."]
#[serde(rename = "driverProgram", default, skip_serializing_if = "Option::is_none")]
pub driver_program: Option<String>,
#[doc = "The list of assets."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub assets: Vec<ImageAsset>,
#[doc = "The list of model Ids."]
#[serde(rename = "modelIds", default, skip_serializing_if = "Vec::is_empty")]
pub model_ids: Vec<String>,
#[doc = "The list of models."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub models: Vec<Model>,
#[doc = "The details of the AZURE ML environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub environment: Option<serde_json::Value>,
#[doc = "The unique identifying details of the AZURE ML environment."]
#[serde(rename = "environmentReference", default, skip_serializing_if = "Option::is_none")]
pub environment_reference: Option<serde_json::Value>,
}
impl EnvironmentImageRequest {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Request to create a Docker image based on Environment."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentImageResponse {
#[doc = "The name of the driver file."]
#[serde(rename = "driverProgram", default, skip_serializing_if = "Option::is_none")]
pub driver_program: Option<String>,
#[doc = "The list of assets."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub assets: Vec<ImageAsset>,
#[doc = "The list of model Ids."]
#[serde(rename = "modelIds", default, skip_serializing_if = "Vec::is_empty")]
pub model_ids: Vec<String>,
#[doc = "The list of models."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub models: Vec<Model>,
#[doc = "The details of the AZURE ML environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub environment: Option<serde_json::Value>,
#[doc = "The unique identifying details of the AZURE ML environment."]
#[serde(rename = "environmentReference", default, skip_serializing_if = "Option::is_none")]
pub environment_reference: Option<serde_json::Value>,
}
impl EnvironmentImageResponse {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct EnvironmentReference {
#[doc = "Name of the environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Version of the environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
impl EnvironmentReference {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Error detail information."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetail {
#[doc = "Error code."]
pub code: String,
#[doc = "Error message."]
pub message: String,
}
impl ErrorDetail {
pub fn new(code: String, message: String) -> Self {
Self { code, message }
}
}
#[doc = "Error response information."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ErrorResponse {
#[doc = "Error code."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[doc = "Error message."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[doc = "The target of the particular error"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[doc = "An array of error detail objects."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
}
impl ErrorResponse {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The estimated price info for using a VM of a particular OS type, tier, etc."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EstimatedVmPrice {
#[doc = "The price charged for using the VM."]
#[serde(rename = "retailPrice")]
pub retail_price: f64,
#[doc = "Operating system type used by the VM."]
#[serde(rename = "osType")]
pub os_type: estimated_vm_price::OsType,
#[doc = "The type of the VM."]
#[serde(rename = "vmTier")]
pub vm_tier: estimated_vm_price::VmTier,
}
impl EstimatedVmPrice {
pub fn new(retail_price: f64, os_type: estimated_vm_price::OsType, vm_tier: estimated_vm_price::VmTier) -> Self {
Self {
retail_price,
os_type,
vm_tier,
}
}
}
pub mod estimated_vm_price {
use super::*;
#[doc = "Operating system type used by the VM."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Linux,
Windows,
}
#[doc = "The type of the VM."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmTier {
Standard,
LowPriority,
Spot,
}
}
#[doc = "The estimated price info for using a VM."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EstimatedVmPrices {
#[doc = "Three lettered code specifying the currency of the VM price. Example: USD"]
#[serde(rename = "billingCurrency")]
pub billing_currency: estimated_vm_prices::BillingCurrency,
#[doc = "The unit of time measurement for the specified VM price. Example: OneHour"]
#[serde(rename = "unitOfMeasure")]
pub unit_of_measure: estimated_vm_prices::UnitOfMeasure,
#[doc = "The list of estimated prices for using a VM of a particular OS type, tier, etc."]
pub values: Vec<EstimatedVmPrice>,
}
impl EstimatedVmPrices {
pub fn new(
billing_currency: estimated_vm_prices::BillingCurrency,
unit_of_measure: estimated_vm_prices::UnitOfMeasure,
values: Vec<EstimatedVmPrice>,
) -> Self {
Self {
billing_currency,
unit_of_measure,
values,
}
}
}
pub mod estimated_vm_prices {
use super::*;
#[doc = "Three lettered code specifying the currency of the VM price. Example: USD"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum BillingCurrency {
#[serde(rename = "USD")]
Usd,
}
#[doc = "The unit of time measurement for the specified VM price. Example: OneHour"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum UnitOfMeasure {
OneHour,
}
}
#[doc = "A HDInsight compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HdInsight {
#[serde(flatten)]
pub compute: Compute,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<hd_insight::Properties>,
}
impl HdInsight {
pub fn new(compute: Compute) -> Self {
Self { compute, properties: None }
}
}
pub mod hd_insight {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "Port open for ssh connections on the master node of the cluster."]
#[serde(rename = "sshPort", default, skip_serializing_if = "Option::is_none")]
pub ssh_port: Option<i32>,
#[doc = "Public IP address of the master node of the cluster."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[doc = "Admin credentials for virtual machine"]
#[serde(rename = "administratorAccount", default, skip_serializing_if = "Option::is_none")]
pub administrator_account: Option<VirtualMachineSshCredentials>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "Identity for the resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Identity {
#[doc = "The principal ID of resource identity."]
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[doc = "The tenant ID of resource."]
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[doc = "The identity type."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<identity::Type>,
#[doc = "dictionary containing all the user assigned identities, with resourceId of the UAI as key."]
#[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<UserAssignedIdentities>,
}
impl Identity {
pub fn new() -> Self {
Self::default()
}
}
pub mod identity {
use super::*;
#[doc = "The identity type."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
#[serde(rename = "SystemAssigned,UserAssigned")]
SystemAssignedUserAssigned,
UserAssigned,
None,
}
}
#[doc = "Identity that will be used to access key vault for encryption at rest"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct IdentityForCmk {
#[doc = "The ArmId of the user assigned identity that will be used to access the customer managed key vault"]
#[serde(rename = "userAssignedIdentity", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identity: Option<String>,
}
impl IdentityForCmk {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The type of identity that creates/modifies resources"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IdentityType {
User,
Application,
ManagedIdentity,
Key,
}
#[doc = "An Image asset."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ImageAsset {
#[doc = "The Asset Id."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "The mime type."]
#[serde(rename = "mimeType", default, skip_serializing_if = "Option::is_none")]
pub mime_type: Option<String>,
#[doc = "The Url of the Asset."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[doc = "Whether the Asset is unpacked."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unpack: Option<bool>,
}
impl ImageAsset {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultProperties {
#[doc = "The ArmId of the keyVault where the customer owned encryption key is present."]
#[serde(rename = "keyVaultArmId")]
pub key_vault_arm_id: String,
#[doc = "Key vault uri to access the encryption key."]
#[serde(rename = "keyIdentifier")]
pub key_identifier: String,
#[doc = "For future use - The client id of the identity which will be used to access key vault."]
#[serde(rename = "identityClientId", default, skip_serializing_if = "Option::is_none")]
pub identity_client_id: Option<String>,
}
impl KeyVaultProperties {
pub fn new(key_vault_arm_id: String, key_identifier: String) -> Self {
Self {
key_vault_arm_id,
key_identifier,
identity_client_id: None,
}
}
}
#[doc = "The List Aml user feature operation response."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ListAmlUserFeatureResult {
#[doc = "The list of AML user facing features."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AmlUserFeature>,
#[doc = "The URI to fetch the next page of AML user features information. Call ListNext() with this to fetch the next page of AML user features information."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl ListAmlUserFeatureResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ListNotebookKeysResult {
#[serde(rename = "primaryAccessKey", default, skip_serializing_if = "Option::is_none")]
pub primary_access_key: Option<String>,
#[serde(rename = "secondaryAccessKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_access_key: Option<String>,
}
impl ListNotebookKeysResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ListStorageAccountKeysResult {
#[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")]
pub user_storage_key: Option<String>,
}
impl ListStorageAccountKeysResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The List Usages operation response."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ListUsagesResult {
#[doc = "The list of AML resource usages."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Usage>,
#[doc = "The URI to fetch the next page of AML resource usage information. Call ListNext() with this to fetch the next page of AML resource usage information."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl ListUsagesResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ListWorkspaceKeysResult {
#[serde(rename = "userStorageKey", default, skip_serializing_if = "Option::is_none")]
pub user_storage_key: Option<String>,
#[serde(rename = "userStorageResourceId", default, skip_serializing_if = "Option::is_none")]
pub user_storage_resource_id: Option<String>,
#[serde(rename = "appInsightsInstrumentationKey", default, skip_serializing_if = "Option::is_none")]
pub app_insights_instrumentation_key: Option<String>,
#[serde(rename = "containerRegistryCredentials", default, skip_serializing_if = "Option::is_none")]
pub container_registry_credentials: Option<RegistryListCredentialsResult>,
#[serde(rename = "notebookAccessKeys", default, skip_serializing_if = "Option::is_none")]
pub notebook_access_keys: Option<ListNotebookKeysResult>,
}
impl ListWorkspaceKeysResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The List WorkspaceQuotasByVMFamily operation response."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ListWorkspaceQuotas {
#[doc = "The list of Workspace Quotas by VM Family"]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ResourceQuota>,
#[doc = "The URI to fetch the next page of workspace quota information by VM Family. Call ListNext() with this to fetch the next page of Workspace Quota information."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl ListWorkspaceQuotas {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The liveness probe requirements."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct LivenessProbeRequirements {
#[doc = "The number of failures to allow before returning an unhealthy status."]
#[serde(rename = "failureThreshold", default, skip_serializing_if = "Option::is_none")]
pub failure_threshold: Option<i32>,
#[doc = "The number of successful probes before returning a healthy status."]
#[serde(rename = "successThreshold", default, skip_serializing_if = "Option::is_none")]
pub success_threshold: Option<i32>,
#[doc = "The probe timeout in seconds."]
#[serde(rename = "timeoutSeconds", default, skip_serializing_if = "Option::is_none")]
pub timeout_seconds: Option<i32>,
#[doc = "The length of time between probes in seconds."]
#[serde(rename = "periodSeconds", default, skip_serializing_if = "Option::is_none")]
pub period_seconds: Option<i32>,
#[doc = "The delay before the first probe in seconds."]
#[serde(rename = "initialDelaySeconds", default, skip_serializing_if = "Option::is_none")]
pub initial_delay_seconds: Option<i32>,
}
impl LivenessProbeRequirements {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Wrapper for error response to follow ARM guidelines."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct MachineLearningServiceError {
#[doc = "Error response information."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponse>,
}
impl MachineLearningServiceError {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An Azure Machine Learning Model."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Model {
#[doc = "The Model Id."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "The Model name."]
pub name: String,
#[doc = "The Model framework."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub framework: Option<String>,
#[doc = "The Model framework version."]
#[serde(rename = "frameworkVersion", default, skip_serializing_if = "Option::is_none")]
pub framework_version: Option<String>,
#[doc = "The Model version assigned by Model Management Service."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<i64>,
#[doc = "The list of datasets associated with the model."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub datasets: Vec<DatasetReference>,
#[doc = "The URL of the Model. Usually a SAS URL."]
pub url: String,
#[doc = "The MIME type of Model content. For more details about MIME type, please open https://www.iana.org/assignments/media-types/media-types.xhtml"]
#[serde(rename = "mimeType")]
pub mime_type: String,
#[doc = "The Model description text."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The Model creation time (UTC)."]
#[serde(rename = "createdTime", default, skip_serializing_if = "Option::is_none")]
pub created_time: Option<String>,
#[doc = "The Model last modified time (UTC)."]
#[serde(rename = "modifiedTime", default, skip_serializing_if = "Option::is_none")]
pub modified_time: Option<String>,
#[doc = "Indicates whether we need to unpack the Model during docker Image creation."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unpack: Option<bool>,
#[doc = "The Parent Model Id."]
#[serde(rename = "parentModelId", default, skip_serializing_if = "Option::is_none")]
pub parent_model_id: Option<String>,
#[doc = "The RunId that created this model."]
#[serde(rename = "runId", default, skip_serializing_if = "Option::is_none")]
pub run_id: Option<String>,
#[doc = "The name of the experiment where this model was created."]
#[serde(rename = "experimentName", default, skip_serializing_if = "Option::is_none")]
pub experiment_name: Option<String>,
#[doc = "The Model tag dictionary. Items are mutable."]
#[serde(rename = "kvTags", default, skip_serializing_if = "Option::is_none")]
pub kv_tags: Option<serde_json::Value>,
#[doc = "The Model property dictionary. Properties are immutable."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
#[doc = "Models derived from this model"]
#[serde(rename = "derivedModelIds", default, skip_serializing_if = "Vec::is_empty")]
pub derived_model_ids: Vec<String>,
#[doc = "Sample Input Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}"]
#[serde(rename = "sampleInputData", default, skip_serializing_if = "Option::is_none")]
pub sample_input_data: Option<String>,
#[doc = "Sample Output Data for the Model. A reference to a dataset in the workspace in the format aml://dataset/{datasetId}"]
#[serde(rename = "sampleOutputData", default, skip_serializing_if = "Option::is_none")]
pub sample_output_data: Option<String>,
#[doc = "The resource requirements for the container (cpu and memory)."]
#[serde(rename = "resourceRequirements", default, skip_serializing_if = "Option::is_none")]
pub resource_requirements: Option<ContainerResourceRequirements>,
}
impl Model {
pub fn new(name: String, url: String, mime_type: String) -> Self {
Self {
id: None,
name,
framework: None,
framework_version: None,
version: None,
datasets: Vec::new(),
url,
mime_type,
description: None,
created_time: None,
modified_time: None,
unpack: None,
parent_model_id: None,
run_id: None,
experiment_name: None,
kv_tags: None,
properties: None,
derived_model_ids: Vec::new(),
sample_input_data: None,
sample_output_data: None,
resource_requirements: None,
}
}
}
#[doc = "The Model data collection properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ModelDataCollection {
#[doc = "Option for enabling/disabling Event Hub."]
#[serde(rename = "eventHubEnabled", default, skip_serializing_if = "Option::is_none")]
pub event_hub_enabled: Option<bool>,
#[doc = "Option for enabling/disabling storage."]
#[serde(rename = "storageEnabled", default, skip_serializing_if = "Option::is_none")]
pub storage_enabled: Option<bool>,
}
impl ModelDataCollection {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ModelDockerSection {
#[doc = "Base image used for Docker-based runs. Mutually exclusive with BaseDockerfile."]
#[serde(rename = "baseImage", default, skip_serializing_if = "Option::is_none")]
pub base_image: Option<String>,
#[doc = "Base Dockerfile used for Docker-based runs. Mutually exclusive with BaseImage."]
#[serde(rename = "baseDockerfile", default, skip_serializing_if = "Option::is_none")]
pub base_dockerfile: Option<String>,
#[doc = "Image registry that contains the base image."]
#[serde(rename = "baseImageRegistry", default, skip_serializing_if = "Option::is_none")]
pub base_image_registry: Option<serde_json::Value>,
}
impl ModelDockerSection {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ModelDockerSectionResponse {
#[doc = "Base image used for Docker-based runs. Mutually exclusive with BaseDockerfile."]
#[serde(rename = "baseImage", default, skip_serializing_if = "Option::is_none")]
pub base_image: Option<String>,
#[doc = "Base Dockerfile used for Docker-based runs. Mutually exclusive with BaseImage."]
#[serde(rename = "baseDockerfile", default, skip_serializing_if = "Option::is_none")]
pub base_dockerfile: Option<String>,
#[doc = "Image registry that contains the base image."]
#[serde(rename = "baseImageRegistry", default, skip_serializing_if = "Option::is_none")]
pub base_image_registry: Option<serde_json::Value>,
}
impl ModelDockerSectionResponse {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ModelEnvironmentDefinition {
#[doc = "The name of the environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The environment version."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[doc = "Settings for a Python environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub python: Option<serde_json::Value>,
#[doc = "Definition of environment variables to be defined in the environment."]
#[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")]
pub environment_variables: Option<serde_json::Value>,
#[doc = "The definition of a Docker container."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub docker: Option<serde_json::Value>,
#[doc = "The configuration for a Spark environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub spark: Option<serde_json::Value>,
#[doc = "Settings for a R environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub r: Option<serde_json::Value>,
#[doc = "The inferencing stack version added to the image. To avoid adding an inferencing stack, do not set this value. Valid values: \"latest\"."]
#[serde(rename = "inferencingStackVersion", default, skip_serializing_if = "Option::is_none")]
pub inferencing_stack_version: Option<String>,
}
impl ModelEnvironmentDefinition {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ModelEnvironmentDefinitionResponse {
#[doc = "The name of the environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The environment version."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[doc = "Settings for a Python environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub python: Option<serde_json::Value>,
#[doc = "Definition of environment variables to be defined in the environment."]
#[serde(rename = "environmentVariables", default, skip_serializing_if = "Option::is_none")]
pub environment_variables: Option<serde_json::Value>,
#[doc = "The definition of a Docker container."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub docker: Option<serde_json::Value>,
#[doc = "The configuration for a Spark environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub spark: Option<serde_json::Value>,
#[doc = "Settings for a R environment."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub r: Option<serde_json::Value>,
#[doc = "The inferencing stack version added to the image. To avoid adding an inferencing stack, do not set this value. Valid values: \"latest\"."]
#[serde(rename = "inferencingStackVersion", default, skip_serializing_if = "Option::is_none")]
pub inferencing_stack_version: Option<String>,
}
impl ModelEnvironmentDefinitionResponse {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ModelPythonSection {
#[doc = "The python interpreter path to use if an environment build is not required. The path specified gets used to call the user script."]
#[serde(rename = "interpreterPath", default, skip_serializing_if = "Option::is_none")]
pub interpreter_path: Option<String>,
#[doc = "True means that AzureML reuses an existing python environment; False means that AzureML will create a python environment based on the Conda dependencies specification."]
#[serde(rename = "userManagedDependencies", default, skip_serializing_if = "Option::is_none")]
pub user_managed_dependencies: Option<bool>,
#[doc = "A JObject containing Conda dependencies."]
#[serde(rename = "condaDependencies", default, skip_serializing_if = "Option::is_none")]
pub conda_dependencies: Option<serde_json::Value>,
#[serde(rename = "baseCondaEnvironment", default, skip_serializing_if = "Option::is_none")]
pub base_conda_environment: Option<String>,
}
impl ModelPythonSection {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ModelSparkSection {
#[doc = "The list of spark repositories."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub repositories: Vec<String>,
#[doc = "The Spark packages to use."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub packages: Vec<SparkMavenPackage>,
#[doc = "Whether to precache the packages."]
#[serde(rename = "precachePackages", default, skip_serializing_if = "Option::is_none")]
pub precache_packages: Option<bool>,
}
impl ModelSparkSection {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Counts of various compute node states on the amlCompute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct NodeStateCounts {
#[doc = "Number of compute nodes in idle state."]
#[serde(rename = "idleNodeCount", default, skip_serializing_if = "Option::is_none")]
pub idle_node_count: Option<i32>,
#[doc = "Number of compute nodes which are running jobs."]
#[serde(rename = "runningNodeCount", default, skip_serializing_if = "Option::is_none")]
pub running_node_count: Option<i32>,
#[doc = "Number of compute nodes which are being prepared."]
#[serde(rename = "preparingNodeCount", default, skip_serializing_if = "Option::is_none")]
pub preparing_node_count: Option<i32>,
#[doc = "Number of compute nodes which are in unusable state."]
#[serde(rename = "unusableNodeCount", default, skip_serializing_if = "Option::is_none")]
pub unusable_node_count: Option<i32>,
#[doc = "Number of compute nodes which are leaving the amlCompute."]
#[serde(rename = "leavingNodeCount", default, skip_serializing_if = "Option::is_none")]
pub leaving_node_count: Option<i32>,
#[doc = "Number of compute nodes which are in preempted state."]
#[serde(rename = "preemptedNodeCount", default, skip_serializing_if = "Option::is_none")]
pub preempted_node_count: Option<i32>,
}
impl NodeStateCounts {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct NotebookAccessTokenResult {
#[serde(rename = "notebookResourceId", default, skip_serializing_if = "Option::is_none")]
pub notebook_resource_id: Option<String>,
#[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")]
pub host_name: Option<String>,
#[serde(rename = "publicDns", default, skip_serializing_if = "Option::is_none")]
pub public_dns: Option<String>,
#[serde(rename = "accessToken", default, skip_serializing_if = "Option::is_none")]
pub access_token: Option<String>,
#[serde(rename = "tokenType", default, skip_serializing_if = "Option::is_none")]
pub token_type: Option<String>,
#[serde(rename = "expiresIn", default, skip_serializing_if = "Option::is_none")]
pub expires_in: Option<i32>,
#[serde(rename = "refreshToken", default, skip_serializing_if = "Option::is_none")]
pub refresh_token: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
}
impl NotebookAccessTokenResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct NotebookPreparationError {
#[serde(rename = "errorMessage", default, skip_serializing_if = "Option::is_none")]
pub error_message: Option<String>,
#[serde(rename = "statusCode", default, skip_serializing_if = "Option::is_none")]
pub status_code: Option<i32>,
}
impl NotebookPreparationError {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct NotebookResourceInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub fqdn: Option<String>,
#[doc = "the data plane resourceId that used to initialize notebook component"]
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "notebookPreparationError", default, skip_serializing_if = "Option::is_none")]
pub notebook_preparation_error: Option<NotebookPreparationError>,
}
impl NotebookResourceInfo {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Azure Machine Learning workspace REST API operation"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Operation {
#[doc = "Operation name: {provider}/{resource}/{operation}"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Display name of operation"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
impl Operation {
pub fn new() -> Self {
Self::default()
}
}
pub mod operation {
use super::*;
#[doc = "Display name of operation"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Display {
#[doc = "The resource provider name: Microsoft.MachineLearningExperimentation"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[doc = "The resource on which the operation is performed."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[doc = "The operation that users can perform."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[doc = "The description for the operation."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
impl Display {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "An array of operations supported by the resource provider."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct OperationListResult {
#[doc = "List of AML workspace operations supported by the AML workspace resource provider."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
}
impl OperationListResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Paginated list of Machine Learning compute objects wrapped in ARM resource envelope."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PaginatedComputeResourcesList {
#[doc = "An array of Machine Learning compute objects wrapped in ARM resource envelope."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ComputeResource>,
#[doc = "A continuation link (absolute URI) to the next page of results in the list."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl PaginatedComputeResourcesList {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Paginated list of Machine Learning service objects wrapped in ARM resource envelope."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PaginatedServiceList {
#[doc = "An array of Machine Learning compute objects wrapped in ARM resource envelope."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ServiceResource>,
#[doc = "A continuation link (absolute URI) to the next page of results in the list."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl PaginatedServiceList {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Paginated list of Workspace connection objects."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PaginatedWorkspaceConnectionsList {
#[doc = "An array of Workspace connection objects."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<WorkspaceConnection>,
#[doc = "A continuation link (absolute URI) to the next page of results in the list."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl PaginatedWorkspaceConnectionsList {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Password {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
impl Password {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Settings for a personal compute instance."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PersonalComputeInstanceSettings {
#[doc = "A user that can be assigned to a compute instance."]
#[serde(rename = "assignedUser", default, skip_serializing_if = "Option::is_none")]
pub assigned_user: Option<AssignedUser>,
}
impl PersonalComputeInstanceSettings {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The Private Endpoint resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateEndpoint {
#[doc = "The ARM identifier for Private Endpoint"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "The ARM identifier for Subnet resource that private endpoint links to"]
#[serde(rename = "subnetArmId", default, skip_serializing_if = "Option::is_none")]
pub subnet_arm_id: Option<String>,
}
impl PrivateEndpoint {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The Private Endpoint Connection resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateEndpointConnection {
#[serde(flatten)]
pub resource: Resource,
#[doc = "Properties of the PrivateEndpointConnectProperties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
}
impl PrivateEndpointConnection {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties of the PrivateEndpointConnectProperties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionProperties {
#[doc = "The Private Endpoint resource."]
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[doc = "A collection of information about the state of the connection between service consumer and provider."]
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[doc = "The current provisioning state."]
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
impl PrivateEndpointConnectionProperties {
pub fn new(private_link_service_connection_state: PrivateLinkServiceConnectionState) -> Self {
Self {
private_endpoint: None,
private_link_service_connection_state,
provisioning_state: None,
}
}
}
#[doc = "The current provisioning state."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Deleting,
Failed,
}
#[doc = "The private endpoint connection status."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
Disconnected,
Timeout,
}
#[doc = "A private link resource"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub resource: Resource,
#[doc = "Properties of a private link resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
}
impl PrivateLinkResource {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "A list of private link resources"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkResourceListResult {
#[doc = "Array of private link resources"]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
}
impl PrivateLinkResourceListResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties of a private link resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkResourceProperties {
#[doc = "The private link resource group id."]
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[doc = "The private link resource required member names."]
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[doc = "The private link resource Private link DNS zone name."]
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
impl PrivateLinkResourceProperties {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "A collection of information about the state of the connection between service consumer and provider."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct PrivateLinkServiceConnectionState {
#[doc = "The private endpoint connection status."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[doc = "The reason for approval/rejection of the connection."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "A message indicating if changes on the service provider require any updates on the consumer."]
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
}
impl PrivateLinkServiceConnectionState {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The properties for Quota update or retrieval."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct QuotaBaseProperties {
#[doc = "Specifies the resource ID."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Specifies the resource type."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "The maximum permitted quota of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[doc = "An enum describing the unit of quota measurement."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<quota_base_properties::Unit>,
}
impl QuotaBaseProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod quota_base_properties {
use super::*;
#[doc = "An enum describing the unit of quota measurement."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[doc = "Quota update parameters."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct QuotaUpdateParameters {
#[doc = "The list for update quota."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<QuotaBaseProperties>,
#[doc = "Region of workspace quota to be updated."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
impl QuotaUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct RCranPackage {
#[doc = "The package name."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The repository name."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
}
impl RCranPackage {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct RGitHubPackage {
#[doc = "Repository address in the format username/repo[/subdir][@ref|#pull]."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
#[doc = "Personal access token to install from a private repo"]
#[serde(rename = "authToken", default, skip_serializing_if = "Option::is_none")]
pub auth_token: Option<String>,
}
impl RGitHubPackage {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct RGitHubPackageResponse {
#[doc = "Repository address in the format username/repo[/subdir][@ref|#pull]."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub repository: Option<String>,
}
impl RGitHubPackageResponse {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct RSection {
#[doc = "The version of R to be installed"]
#[serde(rename = "rVersion", default, skip_serializing_if = "Option::is_none")]
pub r_version: Option<String>,
#[doc = "Indicates whether the environment is managed by user or by AzureML."]
#[serde(rename = "userManaged", default, skip_serializing_if = "Option::is_none")]
pub user_managed: Option<bool>,
#[doc = "The Rscript path to use if an environment build is not required.\r\nThe path specified gets used to call the user script."]
#[serde(rename = "rscriptPath", default, skip_serializing_if = "Option::is_none")]
pub rscript_path: Option<String>,
#[doc = "Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. \"2019-04-17\""]
#[serde(rename = "snapshotDate", default, skip_serializing_if = "Option::is_none")]
pub snapshot_date: Option<String>,
#[doc = "The CRAN packages to use."]
#[serde(rename = "cranPackages", default, skip_serializing_if = "Vec::is_empty")]
pub cran_packages: Vec<RCranPackage>,
#[doc = "The packages directly from GitHub."]
#[serde(rename = "gitHubPackages", default, skip_serializing_if = "Vec::is_empty")]
pub git_hub_packages: Vec<RGitHubPackage>,
#[doc = "The packages from custom urls."]
#[serde(rename = "customUrlPackages", default, skip_serializing_if = "Vec::is_empty")]
pub custom_url_packages: Vec<String>,
#[doc = "The packages from Bioconductor."]
#[serde(rename = "bioConductorPackages", default, skip_serializing_if = "Vec::is_empty")]
pub bio_conductor_packages: Vec<String>,
}
impl RSection {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct RSectionResponse {
#[doc = "The version of R to be installed"]
#[serde(rename = "rVersion", default, skip_serializing_if = "Option::is_none")]
pub r_version: Option<String>,
#[doc = "Indicates whether the environment is managed by user or by AzureML."]
#[serde(rename = "userManaged", default, skip_serializing_if = "Option::is_none")]
pub user_managed: Option<bool>,
#[doc = "The Rscript path to use if an environment build is not required.\r\nThe path specified gets used to call the user script."]
#[serde(rename = "rscriptPath", default, skip_serializing_if = "Option::is_none")]
pub rscript_path: Option<String>,
#[doc = "Date of MRAN snapshot to use in YYYY-MM-DD format, e.g. \"2019-04-17\""]
#[serde(rename = "snapshotDate", default, skip_serializing_if = "Option::is_none")]
pub snapshot_date: Option<String>,
#[doc = "The CRAN packages to use."]
#[serde(rename = "cranPackages", default, skip_serializing_if = "Vec::is_empty")]
pub cran_packages: Vec<RCranPackage>,
#[doc = "The packages directly from GitHub."]
#[serde(rename = "gitHubPackages", default, skip_serializing_if = "Vec::is_empty")]
pub git_hub_packages: Vec<RGitHubPackageResponse>,
#[doc = "The packages from custom urls."]
#[serde(rename = "customUrlPackages", default, skip_serializing_if = "Vec::is_empty")]
pub custom_url_packages: Vec<String>,
#[doc = "The packages from Bioconductor."]
#[serde(rename = "bioConductorPackages", default, skip_serializing_if = "Vec::is_empty")]
pub bio_conductor_packages: Vec<String>,
}
impl RSectionResponse {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct RegistryListCredentialsResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub passwords: Vec<Password>,
}
impl RegistryListCredentialsResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Azure Resource Manager resource envelope."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Resource {
#[doc = "Specifies the resource ID."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Specifies the name of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Identity for the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[doc = "Specifies the location of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[doc = "Specifies the type of the resource."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "Contains resource tags defined as key/value pairs."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[doc = "Sku of the resource"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[doc = "Read only system data"]
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
}
impl Resource {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Represents a resource ID. For example, for a subnet, it is the resource URL for the subnet."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceId {
#[doc = "The ID of the resource"]
pub id: String,
}
impl ResourceId {
pub fn new(id: String) -> Self {
Self { id }
}
}
#[doc = "The Resource Name."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceName {
#[doc = "The name of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[doc = "The localized name of the resource."]
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
impl ResourceName {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The quota assigned to a resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceQuota {
#[doc = "Specifies the resource ID."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Region of the AML workspace in the id."]
#[serde(rename = "amlWorkspaceLocation", default, skip_serializing_if = "Option::is_none")]
pub aml_workspace_location: Option<String>,
#[doc = "Specifies the resource type."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "The Resource Name."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<ResourceName>,
#[doc = "The maximum permitted quota of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[doc = "An enum describing the unit of quota measurement."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<resource_quota::Unit>,
}
impl ResourceQuota {
pub fn new() -> Self {
Self::default()
}
}
pub mod resource_quota {
use super::*;
#[doc = "An enum describing the unit of quota measurement."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceSkuLocationInfo {
#[doc = "Location of the SKU"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[doc = "List of availability zones where the SKU is supported."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[doc = "Details of capabilities available to a SKU in specific zones."]
#[serde(rename = "zoneDetails", default, skip_serializing_if = "Vec::is_empty")]
pub zone_details: Vec<ResourceSkuZoneDetails>,
}
impl ResourceSkuLocationInfo {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Describes The zonal capabilities of a SKU."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ResourceSkuZoneDetails {
#[doc = "The set of zones that the SKU is available in with the specified capabilities."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub name: Vec<String>,
#[doc = "A list of capabilities that are available for the SKU in the specified list of zones."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<SkuCapability>,
}
impl ResourceSkuZoneDetails {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The restriction because of which SKU cannot be used."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Restriction {
#[doc = "The type of restrictions. As of now only possible value for this is location."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
#[doc = "The reason for the restriction."]
#[serde(rename = "reasonCode", default, skip_serializing_if = "Option::is_none")]
pub reason_code: Option<restriction::ReasonCode>,
}
impl Restriction {
pub fn new() -> Self {
Self::default()
}
}
pub mod restriction {
use super::*;
#[doc = "The reason for the restriction."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReasonCode {
NotSpecified,
NotAvailableForRegion,
NotAvailableForSubscription,
}
}
#[doc = "Features/user capabilities associated with the sku"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SkuCapability {
#[doc = "Capability/Feature ID"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Details about the feature/capability"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
impl SkuCapability {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "scale settings for AML Compute"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScaleSettings {
#[doc = "Max number of nodes to use"]
#[serde(rename = "maxNodeCount")]
pub max_node_count: i32,
#[doc = "Min number of nodes to use"]
#[serde(rename = "minNodeCount", default, skip_serializing_if = "Option::is_none")]
pub min_node_count: Option<i32>,
#[doc = "Node Idle Time before scaling down amlCompute. This string needs to be in the RFC Format."]
#[serde(rename = "nodeIdleTimeBeforeScaleDown", default, skip_serializing_if = "Option::is_none")]
pub node_idle_time_before_scale_down: Option<String>,
}
impl ScaleSettings {
pub fn new(max_node_count: i32) -> Self {
Self {
max_node_count,
min_node_count: None,
node_idle_time_before_scale_down: None,
}
}
}
#[doc = "Script reference"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ScriptReference {
#[doc = "The storage source of the script: inline, workspace."]
#[serde(rename = "scriptSource", default, skip_serializing_if = "Option::is_none")]
pub script_source: Option<String>,
#[doc = "The location of scripts in the mounted volume."]
#[serde(rename = "scriptData", default, skip_serializing_if = "Option::is_none")]
pub script_data: Option<String>,
#[doc = "Optional command line arguments passed to the script to run."]
#[serde(rename = "scriptArguments", default, skip_serializing_if = "Option::is_none")]
pub script_arguments: Option<String>,
#[doc = "Optional time period passed to timeout command."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timeout: Option<String>,
}
impl ScriptReference {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Customized setup scripts"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ScriptsToExecute {
#[doc = "Script reference"]
#[serde(rename = "startupScript", default, skip_serializing_if = "Option::is_none")]
pub startup_script: Option<ScriptReference>,
#[doc = "Script reference"]
#[serde(rename = "creationScript", default, skip_serializing_if = "Option::is_none")]
pub creation_script: Option<ScriptReference>,
}
impl ScriptsToExecute {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ServiceManagedResourcesSettings {
#[serde(rename = "cosmosDb", default, skip_serializing_if = "Option::is_none")]
pub cosmos_db: Option<CosmosDbSettings>,
}
impl ServiceManagedResourcesSettings {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Service principal credentials."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicePrincipalCredentials {
#[doc = "Client Id"]
#[serde(rename = "clientId")]
pub client_id: String,
#[doc = "Client secret"]
#[serde(rename = "clientSecret")]
pub client_secret: String,
}
impl ServicePrincipalCredentials {
pub fn new(client_id: String, client_secret: String) -> Self {
Self { client_id, client_secret }
}
}
#[doc = "Machine Learning service object wrapped into ARM resource envelope."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct ServiceResource {
#[serde(flatten)]
pub resource: Resource,
#[doc = "The base service response. The correct inherited response based on computeType will be returned (ex. ACIServiceResponse)"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ServiceResponseBase>,
}
impl ServiceResource {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The base service response. The correct inherited response based on computeType will be returned (ex. ACIServiceResponse)"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceResponseBase {
#[doc = "The service description."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The service tag dictionary. Tags are mutable."]
#[serde(rename = "kvTags", default, skip_serializing_if = "Option::is_none")]
pub kv_tags: Option<serde_json::Value>,
#[doc = "The service property dictionary. Properties are immutable."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
#[doc = "The current state of the service."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<service_response_base::State>,
#[doc = "The error details."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<serde_json::Value>,
#[doc = "The compute environment type for the service."]
#[serde(rename = "computeType")]
pub compute_type: service_response_base::ComputeType,
#[doc = "The deployment type for the service."]
#[serde(rename = "deploymentType", default, skip_serializing_if = "Option::is_none")]
pub deployment_type: Option<service_response_base::DeploymentType>,
}
impl ServiceResponseBase {
pub fn new(compute_type: service_response_base::ComputeType) -> Self {
Self {
description: None,
kv_tags: None,
properties: None,
state: None,
error: None,
compute_type,
deployment_type: None,
}
}
}
pub mod service_response_base {
use super::*;
#[doc = "The current state of the service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Transitioning,
Healthy,
Unhealthy,
Failed,
Unschedulable,
}
#[doc = "The compute environment type for the service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComputeType {
#[serde(rename = "ACI")]
Aci,
#[serde(rename = "AKS")]
Aks,
}
#[doc = "The deployment type for the service."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeploymentType {
#[serde(rename = "GRPCRealtimeEndpoint")]
GrpcRealtimeEndpoint,
HttpRealtimeEndpoint,
Batch,
}
}
#[doc = "Details of customized scripts to execute for setting up the cluster."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Setu
|
#[doc = "Customized setup scripts"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scripts: Option<ScriptsToExecute>,
}
impl SetupScripts {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SharedPrivateLinkResource {
#[doc = "Unique name of the private link."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Properties of a shared private link resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SharedPrivateLinkResourceProperty>,
}
impl SharedPrivateLinkResource {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Properties of a shared private link resource."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SharedPrivateLinkResourceProperty {
#[doc = "The resource id that private link links to."]
#[serde(rename = "privateLinkResourceId", default, skip_serializing_if = "Option::is_none")]
pub private_link_resource_id: Option<String>,
#[doc = "The private link resource group id."]
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[doc = "Request message."]
#[serde(rename = "requestMessage", default, skip_serializing_if = "Option::is_none")]
pub request_message: Option<String>,
#[doc = "The private endpoint connection status."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
}
impl SharedPrivateLinkResourceProperty {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Sku of the resource"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Sku {
#[doc = "Name of the sku"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Tier of the sku like Basic or Enterprise"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
impl Sku {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "List of skus with features"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SkuListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<WorkspaceSku>,
#[doc = "The URI to fetch the next page of Workspace Skus. Call ListNext() with this URI to fetch the next page of Workspace Skus"]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl SkuListResult {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SparkMavenPackage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub group: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub artifact: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
impl SparkMavenPackage {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The ssl configuration for scoring"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SslConfiguration {
#[doc = "Enable or disable ssl for scoring"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<ssl_configuration::Status>,
#[doc = "Cert data"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cert: Option<String>,
#[doc = "Key data"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub key: Option<String>,
#[doc = "CNAME of the cert"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cname: Option<String>,
#[doc = "Leaf domain label of public endpoint"]
#[serde(rename = "leafDomainLabel", default, skip_serializing_if = "Option::is_none")]
pub leaf_domain_label: Option<String>,
#[doc = "Indicates whether to overwrite existing domain label."]
#[serde(rename = "overwriteExistingDomain", default, skip_serializing_if = "Option::is_none")]
pub overwrite_existing_domain: Option<bool>,
}
impl SslConfiguration {
pub fn new() -> Self {
Self::default()
}
}
pub mod ssl_configuration {
use super::*;
#[doc = "Enable or disable ssl for scoring"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Disabled,
Enabled,
Auto,
}
}
#[doc = "A SynapseSpark compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SynapseSpark {
#[serde(flatten)]
pub compute: Compute,
#[serde(flatten)]
pub synapse_spark_pool_properties: SynapseSparkPoolProperties,
}
impl SynapseSpark {
pub fn new(compute: Compute) -> Self {
Self {
compute,
synapse_spark_pool_properties: SynapseSparkPoolProperties::default(),
}
}
}
#[doc = "Properties specific to Synapse Spark pools."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SynapseSparkPoolProperties {
#[doc = "AKS properties"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<synapse_spark_pool_properties::Properties>,
}
impl SynapseSparkPoolProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod synapse_spark_pool_properties {
use super::*;
#[doc = "AKS properties"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "Auto scale properties"]
#[serde(rename = "autoScaleProperties", default, skip_serializing_if = "Option::is_none")]
pub auto_scale_properties: Option<AutoScaleProperties>,
#[doc = "Auto pause properties"]
#[serde(rename = "autoPauseProperties", default, skip_serializing_if = "Option::is_none")]
pub auto_pause_properties: Option<AutoPauseProperties>,
#[doc = "Spark version."]
#[serde(rename = "sparkVersion", default, skip_serializing_if = "Option::is_none")]
pub spark_version: Option<String>,
#[doc = "The number of compute nodes currently assigned to the compute."]
#[serde(rename = "nodeCount", default, skip_serializing_if = "Option::is_none")]
pub node_count: Option<i32>,
#[doc = "Node size."]
#[serde(rename = "nodeSize", default, skip_serializing_if = "Option::is_none")]
pub node_size: Option<String>,
#[doc = "Node size family."]
#[serde(rename = "nodeSizeFamily", default, skip_serializing_if = "Option::is_none")]
pub node_size_family: Option<String>,
#[doc = "Azure subscription identifier."]
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[doc = "Name of the resource group in which workspace is located."]
#[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")]
pub resource_group: Option<String>,
#[doc = "Name of Azure Machine Learning workspace."]
#[serde(rename = "workspaceName", default, skip_serializing_if = "Option::is_none")]
pub workspace_name: Option<String>,
#[doc = "Pool name."]
#[serde(rename = "poolName", default, skip_serializing_if = "Option::is_none")]
pub pool_name: Option<String>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "Read only system data"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SystemData {
#[doc = "An identifier for the identity that created the resource"]
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[doc = "The type of identity that creates/modifies resources"]
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<IdentityType>,
#[doc = "The timestamp of resource creation (UTC)"]
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[doc = "An identifier for the identity that last modified the resource"]
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[doc = "The type of identity that creates/modifies resources"]
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<IdentityType>,
#[doc = "The timestamp of resource last modification (UTC)"]
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
impl SystemData {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "A system service running on a compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct SystemService {
#[doc = "The type of this system service."]
#[serde(rename = "systemServiceType", default, skip_serializing_if = "Option::is_none")]
pub system_service_type: Option<String>,
#[doc = "Public IP address"]
#[serde(rename = "publicIpAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<String>,
#[doc = "The version for this type."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
impl SystemService {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The properties for update Quota response."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UpdateWorkspaceQuotas {
#[doc = "Specifies the resource ID."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Specifies the resource type."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "The maximum permitted quota of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[doc = "An enum describing the unit of quota measurement."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<update_workspace_quotas::Unit>,
#[doc = "Status of update workspace quota."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<update_workspace_quotas::Status>,
}
impl UpdateWorkspaceQuotas {
pub fn new() -> Self {
Self::default()
}
}
pub mod update_workspace_quotas {
use super::*;
#[doc = "An enum describing the unit of quota measurement."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
#[doc = "Status of update workspace quota."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Undefined,
Success,
Failure,
InvalidQuotaBelowClusterMinimum,
InvalidQuotaExceedsSubscriptionLimit,
#[serde(rename = "InvalidVMFamilyName")]
InvalidVmFamilyName,
OperationNotSupportedForSku,
OperationNotEnabledForRegion,
}
}
#[doc = "The result of update workspace quota."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UpdateWorkspaceQuotasResult {
#[doc = "The list of workspace quota update result."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<UpdateWorkspaceQuotas>,
#[doc = "The URI to fetch the next page of workspace quota update result. Call ListNext() with this to fetch the next page of Workspace Quota update result."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl UpdateWorkspaceQuotasResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Describes AML Resource Usage."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Usage {
#[doc = "Specifies the resource ID."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Region of the AML workspace in the id."]
#[serde(rename = "amlWorkspaceLocation", default, skip_serializing_if = "Option::is_none")]
pub aml_workspace_location: Option<String>,
#[doc = "Specifies the resource type."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "An enum describing the unit of usage measurement."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<usage::Unit>,
#[doc = "The current usage of the resource."]
#[serde(rename = "currentValue", default, skip_serializing_if = "Option::is_none")]
pub current_value: Option<i64>,
#[doc = "The maximum permitted usage of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub limit: Option<i64>,
#[doc = "The Usage Names."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<UsageName>,
}
impl Usage {
pub fn new() -> Self {
Self::default()
}
}
pub mod usage {
use super::*;
#[doc = "An enum describing the unit of usage measurement."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[doc = "The Usage Names."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UsageName {
#[doc = "The name of the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[doc = "The localized name of the resource."]
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
impl UsageName {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Settings for user account that gets created on each on the nodes of a compute."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserAccountCredentials {
#[doc = "Name of the administrator user account which can be used to SSH to nodes."]
#[serde(rename = "adminUserName")]
pub admin_user_name: String,
#[doc = "SSH public key of the administrator user account."]
#[serde(rename = "adminUserSshPublicKey", default, skip_serializing_if = "Option::is_none")]
pub admin_user_ssh_public_key: Option<String>,
#[doc = "Password of the administrator user account."]
#[serde(rename = "adminUserPassword", default, skip_serializing_if = "Option::is_none")]
pub admin_user_password: Option<String>,
}
impl UserAccountCredentials {
pub fn new(admin_user_name: String) -> Self {
Self {
admin_user_name,
admin_user_ssh_public_key: None,
admin_user_password: None,
}
}
}
#[doc = "dictionary containing all the user assigned identities, with resourceId of the UAI as key."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UserAssignedIdentities {}
impl UserAssignedIdentities {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "User Assigned Identity"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct UserAssignedIdentity {
#[doc = "The principal ID of the user assigned identity."]
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[doc = "The tenant ID of the user assigned identity."]
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[doc = "The clientId(aka appId) of the user assigned identity."]
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
}
impl UserAssignedIdentity {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "A Machine Learning compute based on Azure Virtual Machines."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachine {
#[serde(flatten)]
pub compute: Compute,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<virtual_machine::Properties>,
}
impl VirtualMachine {
pub fn new(compute: Compute) -> Self {
Self { compute, properties: None }
}
}
pub mod virtual_machine {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Properties {
#[doc = "Virtual Machine size"]
#[serde(rename = "virtualMachineSize", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine_size: Option<String>,
#[doc = "Port open for ssh connections."]
#[serde(rename = "sshPort", default, skip_serializing_if = "Option::is_none")]
pub ssh_port: Option<i32>,
#[doc = "Public IP address of the virtual machine."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[doc = "Admin credentials for virtual machine"]
#[serde(rename = "administratorAccount", default, skip_serializing_if = "Option::is_none")]
pub administrator_account: Option<VirtualMachineSshCredentials>,
#[doc = "Indicates whether this compute will be used for running notebooks."]
#[serde(rename = "isNotebookInstanceCompute", default, skip_serializing_if = "Option::is_none")]
pub is_notebook_instance_compute: Option<bool>,
}
impl Properties {
pub fn new() -> Self {
Self::default()
}
}
}
#[doc = "Virtual Machine image for Windows AML Compute"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineImage {
#[doc = "Virtual Machine image path"]
pub id: String,
}
impl VirtualMachineImage {
pub fn new(id: String) -> Self {
Self { id }
}
}
#[doc = "Secrets related to a Machine Learning compute based on AKS."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSecrets {
#[serde(flatten)]
pub compute_secrets: ComputeSecrets,
#[doc = "Admin credentials for virtual machine"]
#[serde(rename = "administratorAccount", default, skip_serializing_if = "Option::is_none")]
pub administrator_account: Option<VirtualMachineSshCredentials>,
}
impl VirtualMachineSecrets {
pub fn new(compute_secrets: ComputeSecrets) -> Self {
Self {
compute_secrets,
administrator_account: None,
}
}
}
#[doc = "Describes the properties of a VM size."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VirtualMachineSize {
#[doc = "The name of the virtual machine size."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "The family name of the virtual machine size."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[doc = "The number of vCPUs supported by the virtual machine size."]
#[serde(rename = "vCPUs", default, skip_serializing_if = "Option::is_none")]
pub v_cp_us: Option<i32>,
#[doc = "The number of gPUs supported by the virtual machine size."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub gpus: Option<i32>,
#[doc = "The OS VHD disk size, in MB, allowed by the virtual machine size."]
#[serde(rename = "osVhdSizeMB", default, skip_serializing_if = "Option::is_none")]
pub os_vhd_size_mb: Option<i32>,
#[doc = "The resource volume size, in MB, allowed by the virtual machine size."]
#[serde(rename = "maxResourceVolumeMB", default, skip_serializing_if = "Option::is_none")]
pub max_resource_volume_mb: Option<i32>,
#[doc = "The amount of memory, in GB, supported by the virtual machine size."]
#[serde(rename = "memoryGB", default, skip_serializing_if = "Option::is_none")]
pub memory_gb: Option<f64>,
#[doc = "Specifies if the virtual machine size supports low priority VMs."]
#[serde(rename = "lowPriorityCapable", default, skip_serializing_if = "Option::is_none")]
pub low_priority_capable: Option<bool>,
#[doc = "Specifies if the virtual machine size supports premium IO."]
#[serde(rename = "premiumIO", default, skip_serializing_if = "Option::is_none")]
pub premium_io: Option<bool>,
#[doc = "The estimated price info for using a VM."]
#[serde(rename = "estimatedVMPrices", default, skip_serializing_if = "Option::is_none")]
pub estimated_vm_prices: Option<EstimatedVmPrices>,
#[doc = "Specifies the compute types supported by the virtual machine size."]
#[serde(rename = "supportedComputeTypes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_compute_types: Vec<String>,
}
impl VirtualMachineSize {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The List Virtual Machine size operation response."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VirtualMachineSizeListResult {
#[doc = "The list of virtual machine sizes supported by AmlCompute."]
#[serde(rename = "amlCompute", default, skip_serializing_if = "Vec::is_empty")]
pub aml_compute: Vec<VirtualMachineSize>,
}
impl VirtualMachineSizeListResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Admin credentials for virtual machine"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VirtualMachineSshCredentials {
#[doc = "Username of admin account"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[doc = "Password of admin account"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[doc = "Public key data"]
#[serde(rename = "publicKeyData", default, skip_serializing_if = "Option::is_none")]
pub public_key_data: Option<String>,
#[doc = "Private key data"]
#[serde(rename = "privateKeyData", default, skip_serializing_if = "Option::is_none")]
pub private_key_data: Option<String>,
}
impl VirtualMachineSshCredentials {
pub fn new() -> Self {
Self::default()
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VnetConfiguration {
#[doc = "The name of the virtual network."]
#[serde(rename = "vnetName", default, skip_serializing_if = "Option::is_none")]
pub vnet_name: Option<String>,
#[doc = "The name of the virtual network subnet."]
#[serde(rename = "subnetName", default, skip_serializing_if = "Option::is_none")]
pub subnet_name: Option<String>,
}
impl VnetConfiguration {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "An object that represents a machine learning workspace."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct Workspace {
#[serde(flatten)]
pub resource: Resource,
#[doc = "The properties of a machine learning workspace."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspaceProperties>,
}
impl Workspace {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Workspace connection."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspaceConnection {
#[doc = "ResourceId of the workspace connection."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[doc = "Friendly name of the workspace connection."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Resource type of workspace connection."]
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[doc = "Workspace Connection specific properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspaceConnectionProps>,
}
impl WorkspaceConnection {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "object used for creating workspace connection."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspaceConnectionDto {
#[doc = "Friendly name of the workspace connection"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "Workspace Connection specific properties."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspaceConnectionProps>,
}
impl WorkspaceConnectionDto {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Workspace Connection specific properties."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspaceConnectionProps {
#[doc = "Category of the workspace connection."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[doc = "Target of the workspace connection."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[doc = "Authorization type of the workspace connection."]
#[serde(rename = "authType", default, skip_serializing_if = "Option::is_none")]
pub auth_type: Option<String>,
#[doc = "Value details of the workspace connection."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[doc = "format for the workspace connection value"]
#[serde(rename = "valueFormat", default, skip_serializing_if = "Option::is_none")]
pub value_format: Option<workspace_connection_props::ValueFormat>,
}
impl WorkspaceConnectionProps {
pub fn new() -> Self {
Self::default()
}
}
pub mod workspace_connection_props {
use super::*;
#[doc = "format for the workspace connection value"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ValueFormat {
#[serde(rename = "JSON")]
Json,
}
}
#[doc = "The result of a request to list machine learning workspaces."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspaceListResult {
#[doc = "The list of machine learning workspaces. Since this list may be incomplete, the nextLink field should be used to request the next list of machine learning workspaces."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Workspace>,
#[doc = "The URI that can be used to request the next list of machine learning workspaces."]
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
impl WorkspaceListResult {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The properties of a machine learning workspace."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspaceProperties {
#[doc = "The immutable id associated with this workspace."]
#[serde(rename = "workspaceId", default, skip_serializing_if = "Option::is_none")]
pub workspace_id: Option<String>,
#[doc = "The description of this workspace."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The friendly name for this workspace. This name in mutable"]
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[doc = "ARM id of the key vault associated with this workspace. This cannot be changed once the workspace has been created"]
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<String>,
#[doc = "ARM id of the application insights associated with this workspace. This cannot be changed once the workspace has been created"]
#[serde(rename = "applicationInsights", default, skip_serializing_if = "Option::is_none")]
pub application_insights: Option<String>,
#[doc = "ARM id of the container registry associated with this workspace. This cannot be changed once the workspace has been created"]
#[serde(rename = "containerRegistry", default, skip_serializing_if = "Option::is_none")]
pub container_registry: Option<String>,
#[doc = "ARM id of the storage account associated with this workspace. This cannot be changed once the workspace has been created"]
#[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")]
pub storage_account: Option<String>,
#[doc = "Url for the discovery service to identify regional endpoints for machine learning experimentation services"]
#[serde(rename = "discoveryUrl", default, skip_serializing_if = "Option::is_none")]
pub discovery_url: Option<String>,
#[doc = "The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning."]
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<workspace_properties::ProvisioningState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<EncryptionProperty>,
#[doc = "The flag to signal HBI data in the workspace and reduce diagnostic data collected by the service"]
#[serde(rename = "hbiWorkspace", default, skip_serializing_if = "Option::is_none")]
pub hbi_workspace: Option<bool>,
#[doc = "The name of the managed resource group created by workspace RP in customer subscription if the workspace is CMK workspace"]
#[serde(rename = "serviceProvisionedResourceGroup", default, skip_serializing_if = "Option::is_none")]
pub service_provisioned_resource_group: Option<String>,
#[doc = "Count of private connections in the workspace"]
#[serde(rename = "privateLinkCount", default, skip_serializing_if = "Option::is_none")]
pub private_link_count: Option<i32>,
#[doc = "The compute name for image build"]
#[serde(rename = "imageBuildCompute", default, skip_serializing_if = "Option::is_none")]
pub image_build_compute: Option<String>,
#[doc = "The flag to indicate whether to allow public access when behind VNet."]
#[serde(rename = "allowPublicAccessWhenBehindVnet", default, skip_serializing_if = "Option::is_none")]
pub allow_public_access_when_behind_vnet: Option<bool>,
#[doc = "The list of private endpoint connections in the workspace."]
#[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")]
pub private_endpoint_connections: Vec<PrivateEndpointConnection>,
#[doc = "The list of shared private link resources in this workspace."]
#[serde(rename = "sharedPrivateLinkResources", default, skip_serializing_if = "Vec::is_empty")]
pub shared_private_link_resources: Vec<SharedPrivateLinkResource>,
#[serde(rename = "notebookInfo", default, skip_serializing_if = "Option::is_none")]
pub notebook_info: Option<NotebookResourceInfo>,
#[serde(rename = "serviceManagedResourcesSettings", default, skip_serializing_if = "Option::is_none")]
pub service_managed_resources_settings: Option<ServiceManagedResourcesSettings>,
#[doc = "The user assigned identity resource id that represents the workspace identity."]
#[serde(rename = "primaryUserAssignedIdentity", default, skip_serializing_if = "Option::is_none")]
pub primary_user_assigned_identity: Option<String>,
#[doc = "The tenant id associated with this workspace."]
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
}
impl WorkspaceProperties {
pub fn new() -> Self {
Self::default()
}
}
pub mod workspace_properties {
use super::*;
#[doc = "The current deployment state of workspace resource. The provisioningState is to indicate states for resource provisioning."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Unknown,
Updating,
Creating,
Deleting,
Succeeded,
Failed,
Canceled,
}
}
#[doc = "The parameters for updating the properties of a machine learning workspace."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspacePropertiesUpdateParameters {
#[doc = "The description of this workspace."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[doc = "The friendly name for this workspace."]
#[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")]
pub friendly_name: Option<String>,
#[doc = "The compute name for image build"]
#[serde(rename = "imageBuildCompute", default, skip_serializing_if = "Option::is_none")]
pub image_build_compute: Option<String>,
#[serde(rename = "serviceManagedResourcesSettings", default, skip_serializing_if = "Option::is_none")]
pub service_managed_resources_settings: Option<ServiceManagedResourcesSettings>,
#[doc = "The user assigned identity resource id that represents the workspace identity."]
#[serde(rename = "primaryUserAssignedIdentity", default, skip_serializing_if = "Option::is_none")]
pub primary_user_assigned_identity: Option<String>,
}
impl WorkspacePropertiesUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "Describes Workspace Sku details and features"]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspaceSku {
#[doc = "The set of locations that the SKU is available. This will be supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.)."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
#[doc = "A list of locations and availability zones in those locations where the SKU is available."]
#[serde(rename = "locationInfo", default, skip_serializing_if = "Vec::is_empty")]
pub location_info: Vec<ResourceSkuLocationInfo>,
#[doc = "Sku Tier like Basic or Enterprise"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[doc = "List of features/user capabilities associated with the sku"]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<SkuCapability>,
#[doc = "The restrictions because of which SKU cannot be used. This is empty if there are no restrictions."]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub restrictions: Vec<Restriction>,
}
impl WorkspaceSku {
pub fn new() -> Self {
Self::default()
}
}
#[doc = "The parameters for updating a machine learning workspace."]
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct WorkspaceUpdateParameters {
#[doc = "The resource tags for the machine learning workspace."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[doc = "Sku of the resource"]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[doc = "Identity for the resource."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<Identity>,
#[doc = "The parameters for updating the properties of a machine learning workspace."]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<WorkspacePropertiesUpdateParameters>,
}
impl WorkspaceUpdateParameters {
pub fn new() -> Self {
Self::default()
}
}
|
pScripts {
|
tls_decorator.go
|
package tls
import (
"github.com/rotisserie/eris"
discoveryv1alpha2 "github.com/solo-io/gloo-mesh/pkg/api/discovery.mesh.gloo.solo.io/v1alpha2"
"github.com/solo-io/gloo-mesh/pkg/api/networking.mesh.gloo.solo.io/v1alpha2"
"github.com/solo-io/gloo-mesh/pkg/mesh-networking/translation/istio/decorators"
networkingv1alpha3spec "istio.io/api/networking/v1alpha3"
)
const (
decoratorName = "tls"
)
func init() {
decorators.Register(decoratorConstructor)
}
func decoratorConstructor(_ decorators.Parameters) decorators.Decorator {
return NewTlsDecorator()
}
// Handles setting TLS on a DestinationRule.
type tlsDecorator struct{}
var _ decorators.TrafficPolicyDestinationRuleDecorator = &tlsDecorator{}
func
|
() *tlsDecorator {
return &tlsDecorator{}
}
func (d *tlsDecorator) DecoratorName() string {
return decoratorName
}
func (d *tlsDecorator) ApplyTrafficPolicyToDestinationRule(
appliedPolicy *discoveryv1alpha2.TrafficTargetStatus_AppliedTrafficPolicy,
_ *discoveryv1alpha2.TrafficTarget,
output *networkingv1alpha3spec.DestinationRule,
registerField decorators.RegisterField,
) error {
tlsSettings, err := d.translateTlsSettings(appliedPolicy.Spec)
if err != nil {
return err
}
if tlsSettings != nil {
if err := registerField(&output.TrafficPolicy.Tls, tlsSettings); err != nil {
return err
}
output.TrafficPolicy.Tls = tlsSettings
}
return nil
}
func (d *tlsDecorator) translateTlsSettings(
trafficPolicy *v1alpha2.TrafficPolicySpec,
) (*networkingv1alpha3spec.ClientTLSSettings, error) {
// If TrafficPolicy doesn't specify mTLS configuration, use global default populated upstream during initialization.
if trafficPolicy.GetMtls().GetIstio() == nil {
return nil, nil
}
istioTlsMode, err := MapIstioTlsMode(trafficPolicy.Mtls.Istio.TlsMode)
if err != nil {
return nil, err
}
return &networkingv1alpha3spec.ClientTLSSettings{
Mode: istioTlsMode,
}, nil
}
func MapIstioTlsMode(tlsMode v1alpha2.TrafficPolicySpec_MTLS_Istio_TLSmode) (networkingv1alpha3spec.ClientTLSSettings_TLSmode, error) {
switch tlsMode {
case v1alpha2.TrafficPolicySpec_MTLS_Istio_DISABLE:
return networkingv1alpha3spec.ClientTLSSettings_DISABLE, nil
case v1alpha2.TrafficPolicySpec_MTLS_Istio_SIMPLE:
return networkingv1alpha3spec.ClientTLSSettings_SIMPLE, nil
case v1alpha2.TrafficPolicySpec_MTLS_Istio_ISTIO_MUTUAL:
return networkingv1alpha3spec.ClientTLSSettings_ISTIO_MUTUAL, nil
default:
return 0, eris.Errorf("unrecognized Istio TLS mode %s", tlsMode)
}
}
|
NewTlsDecorator
|
ripemd160sum.rs
|
extern crate ripemd160;
use ripemd160::{Ripemd160, Digest};
use std::env;
use std::fs;
use std::io::{self, Read};
const BUFFER_SIZE: usize = 1024;
/// Print digest result as hex string and name pair
fn print_result(sum: &[u8], name: &str)
|
/// Compute digest value for given `Reader` and print it
/// On any error simply return without doing anything
fn process<D: Digest + Default, R: Read>(reader: &mut R, name: &str) {
let mut sh = D::default();
let mut buffer = [0u8; BUFFER_SIZE];
loop {
let n = match reader.read(&mut buffer) {
Ok(n) => n,
Err(_) => return,
};
sh.input(&buffer[..n]);
if n == 0 || n < BUFFER_SIZE {
break;
}
}
print_result(&sh.result(), name);
}
fn main() {
let args = env::args();
// Process files listed in command line arguments one by one
// If no files provided process input from stdin
if args.len() > 1 {
for path in args.skip(1) {
if let Ok(mut file) = fs::File::open(&path) {
process::<Ripemd160, _>(&mut file, &path);
}
}
} else {
process::<Ripemd160, _>(&mut io::stdin(), "-");
}
}
|
{
for byte in sum {
print!("{:02x}", byte);
}
println!("\t{}", name);
}
|
Problem_027.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from math import sqrt
def is_prime(a):
a = abs(int(a))
for i in range( 2, int(sqrt(a)) + 1 ):
if a % i == 0:
return False
return True
def num_primes(a,b):
i = 0
while True:
if not is_prime( i*(i + a) + b ):
|
else:
i += 1
return i
max_num_primes = 0
for i in range(-999,1000):
for j in range(-999,1000):
n = num_primes(i,j)
if n > max_num_primes:
max_num_primes = n
max_mult = i * j
print max_mult
|
break
|
ConflictResolutionMode.ts
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT license.
export enum ConflictResolutionMode {
Custom = "Custom",
LastWriterWins = "LastWriterWins",
|
}
|
|
point-mutations.rs
|
extern crate point_mutations as dna;
#[test]
fn test_no_difference_between_empty_strands() {
assert_eq!(dna::hamming_distance("", ""), 0);
}
#[test]
#[ignore]
|
}
#[test]
#[ignore]
fn test_complete_hamming_distance_in_small_strand() {
assert_eq!(dna::hamming_distance("ACT", "GGA"), 3);
}
#[test]
#[ignore]
fn test_small_hamming_distance_in_the_middle_somewhere() {
assert_eq!(dna::hamming_distance("GGACG", "GGTCG"), 1);
}
#[test]
#[ignore]
fn test_larger_distance() {
assert_eq!(dna::hamming_distance("ACCAGGG", "ACTATGG"), 2);
}
|
fn test_no_difference_between_identical_strands() {
assert_eq!(dna::hamming_distance("GGACTGA", "GGACTGA"), 0);
|
all_test.go
|
// Copyright (C) 2012-2016 Miquel Sabaté Solà <[email protected]>
// This file is licensed under the MIT license.
// See the LICENSE file.
package user_agent
import (
"fmt"
"testing"
)
// Slice that contains all the tests. Each test is contained in a struct
// that groups the title of the test, the User-Agent string to be tested and the expected value.
var uastrings = []struct {
title string
ua string
expected string
}{
// Bots
{
title: "GoogleBot",
ua: "Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
expected: "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:false",
},
{
title: "GoogleBotSmartphone",
ua: "Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
expected: "Mozilla:5.0 Browser:Googlebot-2.1 Bot:true Mobile:true",
},
{
title: "BingBot",
ua: "Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)",
expected: "Mozilla:5.0 Browser:bingbot-2.0 Bot:true Mobile:false",
},
{
title: "BaiduBot",
ua: "Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)",
expected: "Mozilla:5.0 Browser:Baiduspider-2.0 Bot:true Mobile:false",
},
{
title: "Twitterbot",
ua: "Twitterbot",
expected: "Browser:Twitterbot Bot:true Mobile:false",
},
{
title: "YahooBot",
ua: "Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
expected: "Mozilla:5.0 Browser:Yahoo! Slurp Bot:true Mobile:false",
},
{
title: "FacebookExternalHit",
ua: "facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)",
expected: "Browser:facebookexternalhit-1.1 Bot:true Mobile:false",
},
{
title: "FacebookPlatform",
ua: "facebookplatform/1.0 (+http://developers.facebook.com)",
expected: "Browser:facebookplatform-1.0 Bot:true Mobile:false",
},
{
title: "FaceBot",
ua: "Facebot",
expected: "Browser:Facebot Bot:true Mobile:false",
},
{
title: "NutchCVS",
ua: "NutchCVS/0.8-dev (Nutch; http://lucene.apache.org/nutch/bot.html; [email protected])",
expected: "Browser:NutchCVS Bot:true Mobile:false",
},
{
title: "MJ12bot",
ua: "Mozilla/5.0 (compatible; MJ12bot/v1.2.4; http://www.majestic12.co.uk/bot.php?+)",
expected: "Mozilla:5.0 Browser:MJ12bot-v1.2.4 Bot:true Mobile:false",
},
{
title: "MJ12bot",
ua: "MJ12bot/v1.0.8 (http://majestic12.co.uk/bot.php?+)",
expected: "Browser:MJ12bot Bot:true Mobile:false",
},
{
title: "AhrefsBot",
ua: "Mozilla/5.0 (compatible; AhrefsBot/4.0; +http://ahrefs.com/robot/)",
expected: "Mozilla:5.0 Browser:AhrefsBot-4.0 Bot:true Mobile:false",
},
// Internet Explorer
{
title: "IE10",
ua: "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "Tablet",
ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.2; ARM; Trident/6.0; Touch; .NET4.0E; .NET4.0C; Tablet PC 2.0)",
expected: "Mozilla:4.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "Touch",
ua: "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; ARM; Trident/6.0; Touch)",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 8 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "Phone",
ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0; SAMSUNG; SGH-i917)",
expected: "Mozilla:4.0 Platform:Windows OS:Windows Phone OS 7.0 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:true",
},
{
title: "IE6",
ua: "Mozilla/4.0 (compatible; MSIE6.0; Windows NT 5.0; .NET CLR 1.1.4322)",
expected: "Mozilla:4.0 Platform:Windows OS:Windows 2000 Browser:Internet Explorer-6.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "IE8Compatibility",
ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)",
expected: "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-8.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "IE10Compatibility",
ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; InfoPath.3; MS-RTC LM 8)",
expected: "Mozilla:4.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-10.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "IE11Win81",
ua: "Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "IE11Win7",
ua: "Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "IE11b32Win7b64",
ua: "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "IE11b32Win7b64MDDRJS",
ua: "Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; MDDRJS; rv:11.0) like Gecko",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Internet Explorer-11.0 Engine:Trident Bot:false Mobile:false",
},
{
title: "IE11Compatibility",
ua: "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0)",
expected: "Mozilla:4.0 Platform:Windows OS:Windows 8.1 Browser:Internet Explorer-7.0 Engine:Trident Bot:false Mobile:false",
},
// Microsoft Edge
{
title: "EdgeDesktop",
ua: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 10 Browser:Edge-12.10240 Engine:EdgeHTML Bot:false Mobile:false",
},
{
title: "EdgeMobile",
ua: "Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; DEVICE INFO) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Mobile Safari/537.36 Edge/12.10240",
expected: "Mozilla:5.0 Platform:Windows OS:Windows Phone 10.0 Browser:Edge-12.10240 Engine:EdgeHTML Bot:false Mobile:true",
},
// Gecko
{
title: "FirefoxMac",
ua: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b8) Gecko/20100101 Firefox/4.0b8",
expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Browser:Firefox-4.0b8 Engine:Gecko-20100101 Bot:false Mobile:false",
},
{
title: "FirefoxMacLoc",
ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13",
expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:Firefox-3.6.13 Engine:Gecko-20101203 Bot:false Mobile:false",
},
{
title: "FirefoxLinux",
ua: "Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0",
expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Firefox-17.0 Engine:Gecko-20100101 Bot:false Mobile:false",
},
{
title: "FirefoxWin",
ua: "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14",
expected: "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-2.0.0.14 Engine:Gecko-20080404 Bot:false Mobile:false",
},
{
title: "Firefox29Win7",
ua: "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:29.0) Gecko/20100101 Firefox/29.0",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Firefox-29.0 Engine:Gecko-20100101 Bot:false Mobile:false",
},
{
title: "CaminoMac",
ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en; rv:1.8.1.14) Gecko/20080409 Camino/1.6 (like Firefox/2.0.0.14)",
expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Camino-1.6 Engine:Gecko-20080409 Bot:false Mobile:false",
},
{
title: "Iceweasel",
ua: "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Iceweasel/2.0 (Debian-2.0+dfsg-1)",
expected: "Mozilla:5.0 Platform:X11 OS:Linux i686 Localization:en-US Browser:Iceweasel-2.0 Engine:Gecko-20061024 Bot:false Mobile:false",
},
{
title: "SeaMonkey",
ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.1.4) Gecko/20091017 SeaMonkey/2.0",
expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10.6 Localization:en-US Browser:SeaMonkey-2.0 Engine:Gecko-20091017 Bot:false Mobile:false",
},
{
title: "AndroidFirefox",
ua: "Mozilla/5.0 (Android; Mobile; rv:17.0) Gecko/17.0 Firefox/17.0",
expected: "Mozilla:5.0 Platform:Mobile OS:Android Browser:Firefox-17.0 Engine:Gecko-17.0 Bot:false Mobile:true",
},
{
title: "AndroidFirefoxTablet",
ua: "Mozilla/5.0 (Android; Tablet; rv:26.0) Gecko/26.0 Firefox/26.0",
expected: "Mozilla:5.0 Platform:Tablet OS:Android Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
},
{
title: "FirefoxOS",
ua: "Mozilla/5.0 (Mobile; rv:26.0) Gecko/26.0 Firefox/26.0",
expected: "Mozilla:5.0 Platform:Mobile OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
},
{
title: "FirefoxOSTablet",
ua: "Mozilla/5.0 (Tablet; rv:26.0) Gecko/26.0 Firefox/26.0",
expected: "Mozilla:5.0 Platform:Tablet OS:FirefoxOS Browser:Firefox-26.0 Engine:Gecko-26.0 Bot:false Mobile:true",
},
{
title: "FirefoxWinXP",
ua: "Mozilla/5.0 (Windows NT 5.2; rv:31.0) Gecko/20100101 Firefox/31.0",
expected: "Mozilla:5.0 Platform:Windows OS:Windows XP x64 Edition Browser:Firefox-31.0 Engine:Gecko-20100101 Bot:false Mobile:false",
},
{
title: "FirefoxMRA",
ua: "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:24.0) Gecko/20130405 MRA 5.5 (build 02842) Firefox/24.0 (.NET CLR 3.5.30729)",
expected: "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en-US Browser:Firefox-24.0 Engine:Gecko-20130405 Bot:false Mobile:false",
},
// Opera
{
title: "OperaMac",
ua: "Opera/9.27 (Macintosh; Intel Mac OS X; U; en)",
expected: "Platform:Macintosh OS:Intel Mac OS X Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false",
},
{
title: "OperaWin",
ua: "Opera/9.27 (Windows NT 5.1; U; en)",
expected: "Platform:Windows OS:Windows XP Localization:en Browser:Opera-9.27 Engine:Presto Bot:false Mobile:false",
},
{
title: "OperaWinNoLocale",
ua: "Opera/9.80 (Windows NT 5.1) Presto/2.12.388 Version/12.10",
expected: "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
},
{
title: "OperaWin2Comment",
ua: "Opera/9.80 (Windows NT 6.0; WOW64) Presto/2.12.388 Version/12.15",
expected: "Platform:Windows OS:Windows Vista Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
},
{
title: "OperaMinimal",
ua: "Opera/9.80",
expected: "Browser:Opera-9.80 Engine:Presto Bot:false Mobile:false",
},
{
title: "OperaFull",
ua: "Opera/9.80 (Windows NT 6.0; U; en) Presto/2.2.15 Version/10.10",
expected: "Platform:Windows OS:Windows Vista Localization:en Browser:Opera-9.80 Engine:Presto-2.2.15 Bot:false Mobile:false",
},
{
title: "OperaLinux",
ua: "Opera/9.80 (X11; Linux x86_64) Presto/2.12.388 Version/12.10",
expected: "Platform:X11 OS:Linux x86_64 Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
},
{
title: "OperaAndroid",
ua: "Opera/9.80 (Android 4.2.1; Linux; Opera Mobi/ADR-1212030829) Presto/2.11.355 Version/12.10",
expected: "Platform:Android 4.2.1 OS:Linux Browser:Opera-9.80 Engine:Presto-2.11.355 Bot:false Mobile:true",
},
{
title: "OperaNested",
ua: "Opera/9.80 (Windows NT 5.1; MRA 6.0 (build 5831)) Presto/2.12.388 Version/12.10",
expected: "Platform:Windows OS:Windows XP Browser:Opera-9.80 Engine:Presto-2.12.388 Bot:false Mobile:false",
},
{
title: "OperaMRA",
ua: "Opera/9.80 (Windows NT 6.1; U; MRA 5.8 (build 4139); en) Presto/2.9.168 Version/11.50",
expected: "Platform:Windows OS:Windows 7 Localization:en Browser:Opera-9.80 Engine:Presto-2.9.168 Bot:false Mobile:false",
},
// Other
{
title: "Empty",
ua: "",
expected: "Bot:false Mobile:false",
},
{
title: "Nil",
ua: "nil",
expected: "Browser:nil Bot:false Mobile:false",
},
{
title: "Compatible",
ua: "Mozilla/4.0 (compatible)",
expected: "Browser:Mozilla-4.0 Bot:false Mobile:false",
},
{
title: "Mozilla",
ua: "Mozilla/5.0",
expected: "Browser:Mozilla-5.0 Bot:false Mobile:false",
},
{
title: "Amaya",
ua: "amaya/9.51 libwww/5.4.0",
expected: "Browser:amaya-9.51 Engine:libwww-5.4.0 Bot:false Mobile:false",
},
{
title: "Rails",
ua: "Rails Testing",
expected: "Browser:Rails Engine:Testing Bot:false Mobile:false",
},
{
title: "Python",
ua: "Python-urllib/2.7",
expected: "Browser:Python-urllib-2.7 Bot:false Mobile:false",
},
{
title: "Curl",
ua: "curl/7.28.1",
expected: "Browser:curl-7.28.1 Bot:false Mobile:false",
},
// WebKit
{
title: "ChromeLinux",
ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.97 Safari/537.11",
expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Chrome-23.0.1271.97 Engine:AppleWebKit-537.11 Bot:false Mobile:false",
},
{
title: "ChromeWin7",
ua: "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19",
expected: "Mozilla:5.0 Platform:Windows OS:Windows 7 Browser:Chrome-18.0.1025.168 Engine:AppleWebKit-535.19 Bot:false Mobile:false",
},
{
title: "ChromeMinimal",
ua: "Mozilla/5.0 AppleWebKit/534.10 Chrome/8.0.552.215 Safari/534.10",
expected: "Mozilla:5.0 Browser:Chrome-8.0.552.215 Engine:AppleWebKit-534.10 Bot:false Mobile:false",
},
{
title: "ChromeMac",
ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.552.231 Safari/534.10",
expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_5 Localization:en-US Browser:Chrome-8.0.552.231 Engine:AppleWebKit-534.10 Bot:false Mobile:false",
},
{
title: "SafariMac",
ua: "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_3; en-us) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16",
expected: "Mozilla:5.0 Platform:Macintosh OS:Intel Mac OS X 10_6_3 Localization:en-us Browser:Safari-5.0 Engine:AppleWebKit-533.16 Bot:false Mobile:false",
},
{
title: "SafariWin",
ua: "Mozilla/5.0 (Windows; U; Windows NT 5.1; en) AppleWebKit/526.9 (KHTML, like Gecko) Version/4.0dp1 Safari/526.8",
expected: "Mozilla:5.0 Platform:Windows OS:Windows XP Localization:en Browser:Safari-4.0dp1 Engine:AppleWebKit-526.9 Bot:false Mobile:false",
},
{
title: "iPhone7",
ua: "Mozilla/5.0 (iPhone; CPU iPhone OS 7_0_3 like Mac OS X) AppleWebKit/537.51.1 (KHTML, like Gecko) Version/7.0 Mobile/11B511 Safari/9537.53",
expected: "Mozilla:5.0 Platform:iPhone OS:CPU iPhone OS 7_0_3 like Mac OS X Browser:Safari-7.0 Engine:AppleWebKit-537.51.1 Bot:false Mobile:true",
},
{
title: "iPhone",
ua: "Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419",
expected: "Mozilla:5.0 Platform:iPhone OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true",
},
{
title: "iPod",
ua: "Mozilla/5.0 (iPod; U; CPU like Mac OS X; en) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/4A102 Safari/419",
expected: "Mozilla:5.0 Platform:iPod OS:CPU like Mac OS X Localization:en Browser:Safari-3.0 Engine:AppleWebKit-420.1 Bot:false Mobile:true",
},
{
title: "iPad",
ua: "Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B367 Safari/531.21.10",
expected: "Mozilla:5.0 Platform:iPad OS:CPU OS 3_2 like Mac OS X Localization:en-us Browser:Safari-4.0.4 Engine:AppleWebKit-531.21.10 Bot:false Mobile:true",
},
{
title: "webOS",
ua: "Mozilla/5.0 (webOS/1.4.0; U; en-US) AppleWebKit/532.2 (KHTML, like Gecko) Version/1.0 Safari/532.2 Pre/1.1",
expected: "Mozilla:5.0 Platform:webOS OS:Palm Localization:en-US Browser:webOS-1.0 Engine:AppleWebKit-532.2 Bot:false Mobile:true",
},
{
title: "Android",
ua: "Mozilla/5.0 (Linux; U; Android 1.5; de-; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5+ (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
expected: "Mozilla:5.0 Platform:Linux OS:Android 1.5 Localization:de- Browser:Android-3.1.2 Engine:AppleWebKit-528.5+ Bot:false Mobile:true",
},
{
title: "BlackBerry",
ua: "Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1+",
expected: "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry 9800 Localization:en Browser:BlackBerry-6.0.0.141 Engine:AppleWebKit-534.1+ Bot:false Mobile:true",
},
{
title: "BB10",
ua: "Mozilla/5.0 (BB10; Touch) AppleWebKit/537.3+ (KHTML, like Gecko) Version/10.0.9.388 Mobile Safari/537.3+",
expected: "Mozilla:5.0 Platform:BlackBerry OS:BlackBerry Browser:BlackBerry-10.0.9.388 Engine:AppleWebKit-537.3+ Bot:false Mobile:true",
},
{
title: "Ericsson",
ua: "Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525",
expected: "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.4 Browser:Symbian-3.0 Engine:AppleWebKit-525 Bot:false Mobile:true",
},
{
title: "ChromeAndroid",
ua: "Mozilla/5.0 (Linux; Android 4.2.1; Galaxy Nexus Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Mobile Safari/535.19",
expected: "Mozilla:5.0 Platform:Linux OS:Android 4.2.1 Browser:Chrome-18.0.1025.166 Engine:AppleWebKit-535.19 Bot:false Mobile:true",
},
{
title: "WebkitNoPlatform",
ua: "Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13",
expected: "Mozilla:5.0 Platform:en-us Localization:en-us Browser:Safari-3.1 Engine:AppleWebKit-525.13 Bot:false Mobile:false",
},
{
title: "OperaWebkitMobile",
ua: "Mozilla/5.0 (Linux; Android 4.2.2; Galaxy Nexus Build/JDQ39) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Mobile Safari/537.31 OPR/14.0.1074.57453",
expected: "Mozilla:5.0 Platform:Linux OS:Android 4.2.2 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:true",
},
{
title: "OperaWebkitDesktop",
ua: "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.58 Safari/537.31 OPR/14.0.1074.57453",
expected: "Mozilla:5.0 Platform:X11 OS:Linux x86_64 Browser:Opera-14.0.1074.57453 Engine:AppleWebKit-537.31 Bot:false Mobile:false",
},
{
title: "ChromeNothingAfterU",
ua: "Mozilla/5.0 (Linux; U) AppleWebKit/537.4 (KHTML, like Gecko) Chrome/22.0.1229.79 Safari/537.4",
expected: "Mozilla:5.0 Platform:Linux OS:Linux Browser:Chrome-22.0.1229.79 Engine:AppleWebKit-537.4 Bot:false Mobile:false",
},
{
title: "SafariOnSymbian",
ua: "Mozilla/5.0 (SymbianOS/9.1; U; [en-us]) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
expected: "Mozilla:5.0 Platform:Symbian OS:SymbianOS/9.1 Browser:Symbian-413 Engine:AppleWebKit-413 Bot:false Mobile:true",
},
// Dalvik
{
title: "Dalvik - Dell:001DL",
ua: "Dalvik/1.2.0 (Linux; U; Android 2.2.2; 001DL Build/FRG83G)",
expected: "Mozilla:5.0 Platform:Linux OS:Android 2.2.2 Bot:false Mobile:true",
},
{
title: "Dalvik - HTC:001HT",
ua: "Dalvik/1.4.0 (Linux; U; Android 2.3.3; 001HT Build/GRI40)",
expected: "Mozilla:5.0 Platform:Linux OS:Android 2.3.3 Bot:false Mobile:true",
},
{
title: "Dalvik - ZTE:009Z",
ua: "Dalvik/1.4.0 (Linux; U; Android 2.3.4; 009Z Build/GINGERBREAD)",
expected: "Mozilla:5.0 Platform:Linux OS:Android 2.3.4 Bot:false Mobile:true",
},
{
title: "Dalvik - A850",
ua: "Dalvik/1.6.0 (Linux; U; Android 4.2.2; A850 Build/JDQ39) Configuration/CLDC-1.1; Opera Mini/att/4.2",
expected: "Mozilla:5.0 Platform:Linux OS:Android 4.2.2 Bot:false Mobile:true",
},
{
title: "Dalvik - Asus:T00Q",
ua: "Dalvik/1.6.0 (Linux; U; Android 4.4.2; ASUS_T00Q Build/KVT49L)/CLDC-1.1",
expected: "Mozilla:5.0 Platform:Linux OS:Android 4.4.2 Bot:false Mobile:true",
},
{
title: "Dalvik - W2430",
ua: "Dalvik/1.6.0 (Linux; U; Android 4.0.4; W2430 Build/IMM76D)014; Profile/MIDP-2.1 Configuration/CLDC-1",
expected: "Mozilla:5.0 Platform:Linux OS:Android 4.0.4 Bot:false Mobile:true",
},
}
// Internal: beautify the UserAgent reference into a string so it can be
// tested later on.
//
// ua - a UserAgent reference.
//
// Returns a string that contains the beautified representation.
func beautify(ua *UserAgent) (s string) {
if len(ua.Mozilla()) > 0 {
s += "Mozilla:" + ua.Mozilla() + " "
}
if len(ua.Platform()) > 0 {
s += "Platform:" + ua.Platform() + " "
}
if len(ua.OS()) > 0 {
s += "OS:" + ua.OS() + " "
}
if len(ua.Localization()) > 0 {
s += "Localization:" + ua.Localization() + " "
}
str1, str2 := ua.Browser()
if len(str1) > 0 {
s += "Browser:" + str1
if len(str2) > 0 {
s += "-" + str2 + " "
} else {
s += " "
}
}
str1, str2 = ua.Engine()
if len(str1) > 0 {
s += "Engine:" + str1
if len(str2) > 0 {
s += "-" + str2 + " "
} else {
s += " "
}
}
s += "Bot:" + fmt.Sprintf("%v", ua.Bot()) + " "
s += "Mobile:" + fmt.Sprintf("%v", ua.Mobile())
return s
}
// The test suite.
func TestUserAgent(t *testing.T) {
for _, tt := range uastrings {
ua := New(tt.ua)
got := beautify(ua)
if tt.expected != got {
t.Errorf("\nTest %v\ngot: %q\nexpected %q\n", tt.title, got, tt.expected)
}
}
}
// Benchmark: it parses each User-Agent string on the uastrings slice b.N times.
func Be
|
*testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
for _, tt := range uastrings {
ua := new(UserAgent)
b.StartTimer()
ua.Parse(tt.ua)
}
}
}
|
nchmarkUserAgent(b
|
range.go
|
package circuits
import (
"github.com/consensys/gnark/frontend"
"github.com/consensys/gurvy"
)
type rangeCheckConstantCircuit struct {
X frontend.Variable
Y frontend.Variable `gnark:",public"`
}
func (circuit *rangeCheckConstantCircuit) Define(curveID gurvy.ID, cs *frontend.ConstraintSystem) error {
c1 := cs.Mul(circuit.X, circuit.Y)
c2 := cs.Mul(c1, circuit.Y)
c3 := cs.Add(circuit.X, circuit.Y)
cs.AssertIsLessOrEqual(c3, 161) // c3 is from a linear expression only
cs.AssertIsLessOrEqual(c2, 161)
return nil
}
func rangeCheckConstant() {
var circuit, good, bad, public rangeCheckConstantCircuit
r1cs, err := frontend.Compile(gurvy.UNKNOWN, &circuit)
if err != nil {
panic(err)
}
good.X.Assign(10)
good.Y.Assign(4)
bad.X.Assign(11)
bad.Y.Assign(4)
public.Y.Assign(4)
addEntry("range_constant", r1cs, &good, &bad, &public)
}
type rangeCheckCircuit struct {
X frontend.Variable
Y, Bound frontend.Variable `gnark:",public"`
}
func (circuit *rangeCheckCircuit) Define(curveID gurvy.ID, cs *frontend.ConstraintSystem) error {
c1 := cs.Mul(circuit.X, circuit.Y)
c2 := cs.Mul(c1, circuit.Y)
c3 := cs.Add(circuit.X, circuit.Y)
cs.AssertIsLessOrEqual(c2, circuit.Bound)
cs.AssertIsLessOrEqual(c3, circuit.Bound) // c3 is from a linear expression only
return nil
}
func rangeCheck() {
var circuit, good, bad, public rangeCheckCircuit
r1cs, err := frontend.Compile(gurvy.UNKNOWN, &circuit)
if err != nil {
panic(err)
}
good.X.Assign(10)
good.Y.Assign(4)
good.Bound.Assign(161)
bad.X.Assign(11)
bad.Y.Assign(4)
bad.Bound.Assign(161)
public.Y.Assign(4)
public.Bound.Assign(161)
addEntry("range", r1cs, &good, &bad, &public)
}
func init()
|
{
rangeCheckConstant()
rangeCheck()
}
|
|
uexpect.py
|
# Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from Queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s ' % repr(self.buffer[:])
s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data)
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def
|
(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536)
queue.put(data)
except:
if kill_event.is_set():
break
raise
|
read
|
sitemap.xml.js
|
import React from "react";
import getSlugs from '@utils/getSlugs'
class Sitemap extends React.Component {
static async getInitialProps(ctx) {
console.log("ctx: ");
console.log(ctx)
const baseUrl = {
development: "http://localhost:3000",
production: "https://andenacitelli.com",
}[process.env.NODE_ENV];
const staticPages = [
"",
"blog"
].map((staticPagePath) => {
return `${baseUrl}/${staticPagePath}`;
});
// Dynamic pages
// TODO: Set change dates for blog to be the actual modification dates
const slugs = ((context) => {
return getSlugs(context)
})(require.context('../posts', true, /\.md$/))
const dynamicPages = slugs.map((dynamicPagePath) => {
return `${baseUrl}/blog/${dynamicPagePath}`;
})
const pages = staticPages.concat(dynamicPages);
// TODO: Set priorities for blog pages to be lower
const sitemap = `<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
${pages
.map((url) => {
return `
<url>
<loc>${url}</loc>
<lastmod>${new Date().toISOString()}</lastmod>
<changefreq>monthly</changefreq>
<priority>1.0</priority>
</url>
`;
})
.join("")}
</urlset>
`;
res.setHeader("Content-Type", "text/xml");
res.write(sitemap);
res.end();
|
return {
props: {},
};
};
};
export default Sitemap;
| |
httpmockserver_test.go
|
package httpmockserver_test
import (
"bytes"
"github.com/stretchr/testify/assert"
"github.com/ybbus/httpmockserver"
"net/http"
"testing"
)
func TestMockServer_EXPECT(t *testing.T) {
check := assert.New(t)
tests := []struct {
Description string
Run func(mockServer *httpmockserver.MockServer, url string)
}{
{
Description: "simple get on /hello",
Run: func(mockServer *httpmockserver.MockServer, url string) {
mockServer.EXPECT().Get("/hello").Response(200)
res, err := Get(url+"/hello", nil)
check.NoError(err)
check.Equal(200, res.StatusCode)
},
},
{
Description: "simple get on /hello with header",
Run: func(mockServer *httpmockserver.MockServer, url string) {
mockServer.EXPECT().Get("/hello").Header("Test", "123").Response(200)
res, err := Get(url+"/hello", map[string]string{"Test": "123"})
check.NoError(err)
check.Equal(200, res.StatusCode)
},
},
{
Description: "default calls",
Run: func(mockServer *httpmockserver.MockServer, url string) {
mockServer.DEFAULT().AnyRequest().Response(201)
mockServer.EXPECT().Get("/hello").Header("Test", "123").Response(200)
res, err := Get(url+"/hello", map[string]string{"Test": "123"})
check.NoError(err)
|
check.Equal(200, res.StatusCode)
res, err = Get(url+"/hello", map[string]string{"Test": "123"})
check.NoError(err)
check.Equal(201, res.StatusCode)
res, err = Post(url+"/test", map[string]string{"Test": "123"}, []byte("Hello World"))
check.NoError(err)
check.Equal(201, res.StatusCode)
},
},
{
Description: "min max calls",
Run: func(mockServer *httpmockserver.MockServer, url string) {
mockServer.EXPECT().Get("/min").MinTimes(1).Response(200)
mockServer.EXPECT().Get("/max").MinTimes(2).Response(200)
mockServer.EXPECT().Get("/times").Times(3).Response(200)
Get(url+"/min", nil)
Get(url+"/max", nil)
Get(url+"/max", nil)
Get(url+"/max", nil)
Get(url+"/times", nil)
Get(url+"/times", nil)
Get(url+"/times", nil)
},
},
}
for _, test := range tests {
func() {
server := httpmockserver.New(t)
defer server.Shutdown()
test.Run(server, server.URL())
server.Finish()
}()
}
}
func Get(url string, headers map[string]string) (*http.Response, error) {
c := http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
panic(err)
}
for k, v := range headers {
req.Header.Set(k, v)
}
return c.Do(req)
}
func Post(url string, headers map[string]string, body []byte) (*http.Response, error) {
c := http.Client{}
req, err := http.NewRequest("POST", url, bytes.NewReader(body))
if err != nil {
panic(err)
}
for k, v := range headers {
req.Header.Set(k, v)
}
return c.Do(req)
}
| |
geo.js
|
/* callback function for getCurrentLocation */
var locationCallback = null;
/**
*
* @param {Object} _lat
* @param {Object} _lng
* @param {Object} _callback
*/
function
|
(_lat, _lng, _callback) {
var title;
Ti.Geolocation.purpose = "Wiley Alloy App Demo";
// callback method converting lat lng into a location/address
Ti.Geolocation.reverseGeocoder(_lat, _lng, function(_data) {
if (_data.success) {
Ti.API.debug("reverseGeocoder " + JSON.stringify(_data, null, 2));
var place = _data.places[0];
if (place.city === "") {
title = place.address;
} else {
title = place.street + " " + place.city;
}
} else {
title = "No Address Found: " + _lat + ", " + _lng;
}
_callback(title);
});
}
/**
*
* @param {Object} _location
*/
function locationCallbackHandler(_location) {
// later on when you no longer want to listen;
Ti.Geolocation.removeEventListener('location', locationCallbackHandler);
if (!_location.error && _location && _location.coords) {
var lat, lng;
Ti.API.debug("locationCallback " + JSON.stringify(_location, null, 2));
lat = _location.coords.latitude;
lng = _location.coords.longitude;
reverseGeocoder(lat, lng, function(_title) {
locationCallback({
coords : _location.coords,
title : _title
}, null);
locationCallback = null;
});
} else {
alert('Location Services Error: ' + _location.error);
locationCallback(null, _location.error);
}
}
/**
*
* @param {Object} _callback
*/
exports.getCurrentLocation = function(_callback) {
if (!Ti.Geolocation.getLocationServicesEnabled()) {
alert('Location Services are not enabled');
_callback(null, 'Location Services are not enabled');
return;
}
// save in global for use in locationCallbackHandler
locationCallback = _callback;
Ti.Geolocation.purpose = "Wiley Alloy App Demo";
Ti.Geolocation.accuracy = Ti.Geolocation.ACCURACY_HIGH;
Ti.Geolocation.distanceFilter = 10;
Ti.Geolocation.addEventListener('location', locationCallbackHandler);
};
exports.calculateMapRegion = function(_annotations) {
var latMax, latMin, lngMax, lngMin;
for (var c = 0; c < _annotations.length; c++) {
var latitude = _annotations[c].latitude;
var longitude = _annotations[c].longitude;
latMax = Math.max(latMax || latitude, latitude);
latMin = Math.min(latMin || latitude, latitude);
lngMax = Math.max(lngMax || longitude, longitude);
lngMin = Math.min(lngMin || longitude, longitude);
}
//create the map boundary area values
var bndLat = (latMax + latMin) / 2;
var bndLng = (lngMax + lngMin) / 2;
var bndLatDelta = latMax - latMin + 0.01;
var bndLngDelta = lngMax - lngMin + 0.01;
//create the map region definition for the boundaries containing the sites
return mapRegionSites = {
latitude : bndLat,
longitude : bndLng,
animate : true,
latitudeDelta : bndLatDelta,
longitudeDelta : bndLngDelta
};
};
|
reverseGeocoder
|
e-shape-action-runtime-transform-move-forward-or-backward.ts
|
/*
* Copyright (C) 2019 Toshiba Corporation
* SPDX-License-Identifier: Apache-2.0
*/
import { EShape } from "../e-shape";
import { EShapeRuntime, EShapeRuntimeReset } from "../e-shape-runtime";
import { EShapeActionRuntimeTransformMove } from "./e-shape-action-runtime-transform-move";
import { EShapeActionValueTransformMove } from "./e-shape-action-value-transform-move";
export class EShapeActionRuntimeTransformMoveForwardOrBackward extends EShapeActionRuntimeTransformMove {
constructor( value: EShapeActionValueTransformMove ) {
super( value, EShapeRuntimeReset.POSITION );
}
execute( shape: EShape, runtime: EShapeRuntime, time: number ): void {
if( this.condition( shape, time ) ) {
|
const writtenPositionX = ( (runtime.written & EShapeRuntimeReset.POSITION_X) !== 0 );
const writtenPositionY = ( (runtime.written & EShapeRuntimeReset.POSITION_Y) !== 0 );
const oldPositionX = ( writtenPositionX ? position.x : runtime.x );
const oldPositionY = ( writtenPositionY ? position.y : runtime.y );
shape.updateTransform();
const localTransform = shape.transform.localTransform;
position.set( oldPositionX - localTransform.c * amount, oldPositionY - localTransform.d * amount );
runtime.written |= this.reset;
}
}
}
|
const amount = this.amount( shape, time );
const transform = shape.transform;
const position = transform.position;
|
docs.go
|
// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// This file was generated by swaggo/swag at
// 2019-07-17 15:54:04.765591008 +0300 EEST m=+0.039056976
package docs
import (
"bytes"
"encoding/json"
"github.com/alecthomas/template"
"github.com/swaggo/swag"
)
var doc = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "This is a sample server Petstore server.",
"title": "Swagger Example API",
"termsOfService": "http://swagger.io/terms/",
"contact": {
"name": "API Support",
"url": "http://www.swagger.io/support",
"email": "[email protected]"
},
"license": {
"name": "Apache 2.0",
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
},
"version": "1.0"
},
"host": "petstore.swagger.io",
"basePath": "/v2",
"paths": {
"/file/upload": {
"post": {
"description": "Upload file",
"consumes": [
"multipart/form-data"
],
"produces": [
"application/json"
],
"summary": "Upload file",
"operationId": "file.upload",
"parameters": [
{
"type": "file",
"description": "this is a test file",
"name": "file",
"in": "formData",
"required": true
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
},
"/testapi/get-string-by-int/{some_id}": {
"get": {
"description": "get string by ID",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"summary": "Add a new pet to the store",
"operationId": "get-string-by-int",
"parameters": [
{
"type": "integer",
"format": "int64",
"description": "Some ID",
"name": "some_id",
"in": "path",
"required": true
},
{
"description": "Some ID",
"name": "some_id",
"in": "body",
"required": true,
"schema": {
"type": "object",
"$ref": "#/definitions/web.Pet"
}
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
},
"/testapi/get-struct-array-by-string/{some_id}": {
"get": {
"security": [
{
"ApiKeyAuth": []
},
{
"BasicAuth": []
},
{
"OAuth2Application": [
"write"
]
},
{
"OAuth2Implicit": [
"read",
"admin"
]
},
{
"OAuth2AccessCode": [
"read"
]
},
{
"OAuth2Password": [
"admin"
]
}
],
"description": "get struct array by ID",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"operationId": "get-struct-array-by-string",
"parameters": [
{
"type": "string",
"description": "Some ID",
"name": "some_id",
"in": "path",
"required": true
},
{
"enum": [
1,
2,
3
],
"type": "integer",
"description": "Category",
"name": "category",
"in": "query",
"required": true
},
{
"minimum": 0,
"type": "integer",
"default": 0,
"description": "Offset",
"name": "offset",
"in": "query",
"required": true
},
{
"maximum": 50,
"type": "integer",
"default": 10,
"description": "Limit",
"name": "limit",
"in": "query",
"required": true
},
{
"maxLength": 50,
"minLength": 1,
"type": "string",
"default": "\"\"",
"description": "q",
"name": "q",
"in": "query",
"required": true
}
],
"responses": {
"200": {
"description": "ok",
"schema": {
"type": "string"
}
},
"400": {
"description": "We need ID!!",
"schema": {
"$ref": "#/definitions/web.APIError"
}
},
"404": {
"description": "Can not find ID",
"schema": {
"$ref": "#/definitions/web.APIError"
}
}
}
}
}
},
"definitions": {
"web.APIError": {
"type": "object",
"properties": {
"createdAt": {
"type": "string"
},
"errorCode": {
"type": "integer"
},
"errorMessage": {
"type": "string"
}
}
},
"web.Pet": {
"type": "object",
"properties": {
"category": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"example": 1
},
"name": {
"type": "string",
"example": "category_name"
},
"photoURLs": {
"type": "array",
"format": "url",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
},
"smallCategory": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"example": 1
},
"name": {
"type": "string",
"example": "detail_category_name"
},
"photoURLs": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
}
}
}
}
},
"data": {
"type": "object"
},
"decimal": {
"type": "number"
},
"id": {
"type": "integer",
"format": "int64",
"example": 1
},
"isAlive": {
"type": "boolean",
"example": true
},
"name": {
"type": "string",
"example": "poti"
},
"pets": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet2"
}
},
"pets2": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet2"
}
},
"photoURLs": {
"type": "array",
"items": {
"type": "string"
},
"example": [
"http://test/image/1.jpg",
"http://test/image/2.jpg"
]
},
"price": {
"type": "number",
"example": 3.25
},
"status": {
"type": "string"
},
"tags": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Tag"
}
},
"uuid": {
"type": "string"
}
}
},
"web.Pet2": {
"type": "object",
"properties": {
"deletedAt": {
"type": "string"
},
"id": {
"type": "integer"
},
"middleName": {
"type": "string"
}
}
},
"web.RevValue": {
"type": "object",
"properties": {
"data": {
"type": "integer"
},
"err": {
"type": "integer"
},
"status": {
"type": "boolean"
}
}
},
"web.Tag": {
"type": "object",
"properties": {
"id": {
"type": "integer",
"format": "int64"
},
"name": {
"type": "string"
},
"pets": {
"type": "array",
"items": {
"$ref": "#/definitions/web.Pet"
}
}
}
}
},
"securityDefinitions": {
"ApiKeyAuth": {
"type": "apiKey",
"name": "Authorization",
"in": "header"
},
"BasicAuth": {
"type": "basic"
},
"OAuth2AccessCode": {
"type": "oauth2",
"flow": "accessCode",
"authorizationUrl": "https://example.com/oauth/authorize",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information"
}
},
"OAuth2Application": {
"type": "oauth2",
"flow": "application",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Implicit": {
"type": "oauth2",
"flow": "implicit",
"authorizationUrl": "https://example.com/oauth/authorize",
"scopes": {
"admin": " Grants read and write access to administrative information",
"write": " Grants write access"
}
},
"OAuth2Password": {
"type": "oauth2",
"flow": "password",
"tokenUrl": "https://example.com/oauth/token",
"scopes": {
"admin": " Grants read and write access to administrative information",
"read": " Grants read access",
"write": " Grants write access"
}
}
}
}`
type swaggerInfo struct {
Version string
Host string
BasePath string
Schemes []string
Title string
Description string
}
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = swaggerInfo{Schemes: []string{}}
type s struct{}
func (s *s) ReadDoc() string {
t, err := template.New("swagger_info").Funcs(template.FuncMap{
"marshal": func(v interface{}) string {
a, _ := json.Marshal(v)
return string(a)
},
}).Parse(doc)
if err != nil {
return doc
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, SwaggerInfo); err != nil {
return doc
}
return tpl.String()
}
func
|
() {
swag.Register(swag.Name, &s{})
}
|
init
|
simplify.rs
|
//! A number of passes which remove various redundancies in the CFG.
//!
//! The `SimplifyCfg` pass gets rid of unnecessary blocks in the CFG, whereas the `SimplifyLocals`
//! gets rid of all the unnecessary local variable declarations.
//!
//! The `SimplifyLocals` pass is kinda expensive and therefore not very suitable to be run often.
//! Most of the passes should not care or be impacted in meaningful ways due to extra locals
//! either, so running the pass once, right before codegen, should suffice.
//!
//! On the other side of the spectrum, the `SimplifyCfg` pass is considerably cheap to run, thus
//! one should run it after every pass which may modify CFG in significant ways. This pass must
//! also be run before any analysis passes because it removes dead blocks, and some of these can be
//! ill-typed.
//!
//! The cause of this typing issue is typeck allowing most blocks whose end is not reachable have
//! an arbitrary return type, rather than having the usual () return type (as a note, typeck's
//! notion of reachability is in fact slightly weaker than MIR CFG reachability - see #31617). A
//! standard example of the situation is:
//!
//! ```rust
//! fn example() {
//! let _a: char = { return; };
|
//! }
//! ```
//!
//! Here the block (`{ return; }`) has the return type `char`, rather than `()`, but the MIR we
//! naively generate still contains the `_a = ()` write in the unreachable block "after" the
//! return.
use crate::MirPass;
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::mir::coverage::*;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use smallvec::SmallVec;
use std::borrow::Cow;
use std::convert::TryInto;
pub struct SimplifyCfg {
label: String,
}
impl SimplifyCfg {
pub fn new(label: &str) -> Self {
SimplifyCfg { label: format!("SimplifyCfg-{}", label) }
}
}
pub fn simplify_cfg<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
CfgSimplifier::new(body).simplify();
remove_dead_blocks(tcx, body);
// FIXME: Should probably be moved into some kind of pass manager
body.basic_blocks_mut().raw.shrink_to_fit();
}
impl<'tcx> MirPass<'tcx> for SimplifyCfg {
fn name(&self) -> Cow<'_, str> {
Cow::Borrowed(&self.label)
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, body.source);
simplify_cfg(tcx, body);
}
}
pub struct CfgSimplifier<'a, 'tcx> {
basic_blocks: &'a mut IndexVec<BasicBlock, BasicBlockData<'tcx>>,
pred_count: IndexVec<BasicBlock, u32>,
}
impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
pub fn new(body: &'a mut Body<'tcx>) -> Self {
let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks());
// we can't use mir.predecessors() here because that counts
// dead blocks, which we don't want to.
pred_count[START_BLOCK] = 1;
for (_, data) in traversal::preorder(body) {
if let Some(ref term) = data.terminator {
for &tgt in term.successors() {
pred_count[tgt] += 1;
}
}
}
let basic_blocks = body.basic_blocks_mut();
CfgSimplifier { basic_blocks, pred_count }
}
pub fn simplify(mut self) {
self.strip_nops();
// Vec of the blocks that should be merged. We store the indices here, instead of the
// statements itself to avoid moving the (relatively) large statements twice.
// We do not push the statements directly into the target block (`bb`) as that is slower
// due to additional reallocations
let mut merged_blocks = Vec::new();
loop {
let mut changed = false;
for bb in self.basic_blocks.indices() {
if self.pred_count[bb] == 0 {
continue;
}
debug!("simplifying {:?}", bb);
let mut terminator =
self.basic_blocks[bb].terminator.take().expect("invalid terminator state");
for successor in terminator.successors_mut() {
self.collapse_goto_chain(successor, &mut changed);
}
let mut inner_changed = true;
merged_blocks.clear();
while inner_changed {
inner_changed = false;
inner_changed |= self.simplify_branch(&mut terminator);
inner_changed |= self.merge_successor(&mut merged_blocks, &mut terminator);
changed |= inner_changed;
}
let statements_to_merge =
merged_blocks.iter().map(|&i| self.basic_blocks[i].statements.len()).sum();
if statements_to_merge > 0 {
let mut statements = std::mem::take(&mut self.basic_blocks[bb].statements);
statements.reserve(statements_to_merge);
for &from in &merged_blocks {
statements.append(&mut self.basic_blocks[from].statements);
}
self.basic_blocks[bb].statements = statements;
}
self.basic_blocks[bb].terminator = Some(terminator);
}
if !changed {
break;
}
}
}
/// This function will return `None` if
/// * the block has statements
/// * the block has a terminator other than `goto`
/// * the block has no terminator (meaning some other part of the current optimization stole it)
fn take_terminator_if_simple_goto(&mut self, bb: BasicBlock) -> Option<Terminator<'tcx>> {
match self.basic_blocks[bb] {
BasicBlockData {
ref statements,
terminator:
ref mut terminator @ Some(Terminator { kind: TerminatorKind::Goto { .. }, .. }),
..
} if statements.is_empty() => terminator.take(),
// if `terminator` is None, this means we are in a loop. In that
// case, let all the loop collapse to its entry.
_ => None,
}
}
/// Collapse a goto chain starting from `start`
fn collapse_goto_chain(&mut self, start: &mut BasicBlock, changed: &mut bool) {
// Using `SmallVec` here, because in some logs on libcore oli-obk saw many single-element
// goto chains. We should probably benchmark different sizes.
let mut terminators: SmallVec<[_; 1]> = Default::default();
let mut current = *start;
while let Some(terminator) = self.take_terminator_if_simple_goto(current) {
let target = match terminator {
Terminator { kind: TerminatorKind::Goto { target }, .. } => target,
_ => unreachable!(),
};
terminators.push((current, terminator));
current = target;
}
let last = current;
*start = last;
while let Some((current, mut terminator)) = terminators.pop() {
let target = match terminator {
Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } => target,
_ => unreachable!(),
};
*changed |= *target != last;
*target = last;
debug!("collapsing goto chain from {:?} to {:?}", current, target);
if self.pred_count[current] == 1 {
// This is the last reference to current, so the pred-count to
// to target is moved into the current block.
self.pred_count[current] = 0;
} else {
self.pred_count[*target] += 1;
self.pred_count[current] -= 1;
}
self.basic_blocks[current].terminator = Some(terminator);
}
}
// merge a block with 1 `goto` predecessor to its parent
fn merge_successor(
&mut self,
merged_blocks: &mut Vec<BasicBlock>,
terminator: &mut Terminator<'tcx>,
) -> bool {
let target = match terminator.kind {
TerminatorKind::Goto { target } if self.pred_count[target] == 1 => target,
_ => return false,
};
debug!("merging block {:?} into {:?}", target, terminator);
*terminator = match self.basic_blocks[target].terminator.take() {
Some(terminator) => terminator,
None => {
// unreachable loop - this should not be possible, as we
// don't strand blocks, but handle it correctly.
return false;
}
};
merged_blocks.push(target);
self.pred_count[target] = 0;
true
}
// turn a branch with all successors identical to a goto
fn simplify_branch(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
match terminator.kind {
TerminatorKind::SwitchInt { .. } => {}
_ => return false,
};
let first_succ = {
if let Some(&first_succ) = terminator.successors().next() {
if terminator.successors().all(|s| *s == first_succ) {
let count = terminator.successors().count();
self.pred_count[first_succ] -= (count - 1) as u32;
first_succ
} else {
return false;
}
} else {
return false;
}
};
debug!("simplifying branch {:?}", terminator);
terminator.kind = TerminatorKind::Goto { target: first_succ };
true
}
fn strip_nops(&mut self) {
for blk in self.basic_blocks.iter_mut() {
blk.statements.retain(|stmt| !matches!(stmt.kind, StatementKind::Nop))
}
}
}
pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let reachable = traversal::reachable_as_bitset(body);
let num_blocks = body.basic_blocks().len();
if num_blocks == reachable.count() {
return;
}
let basic_blocks = body.basic_blocks_mut();
let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
let mut used_blocks = 0;
for alive_index in reachable.iter() {
let alive_index = alive_index.index();
replacements[alive_index] = BasicBlock::new(used_blocks);
if alive_index != used_blocks {
// Swap the next alive block data with the current available slot. Since
// alive_index is non-decreasing this is a valid operation.
basic_blocks.raw.swap(alive_index, used_blocks);
}
used_blocks += 1;
}
if tcx.sess.instrument_coverage() {
save_unreachable_coverage(basic_blocks, used_blocks);
}
basic_blocks.raw.truncate(used_blocks);
for block in basic_blocks {
for target in block.terminator_mut().successors_mut() {
*target = replacements[target.index()];
}
}
}
/// Some MIR transforms can determine at compile time that a sequences of
/// statements will never be executed, so they can be dropped from the MIR.
/// For example, an `if` or `else` block that is guaranteed to never be executed
/// because its condition can be evaluated at compile time, such as by const
/// evaluation: `if false { ... }`.
///
/// Those statements are bypassed by redirecting paths in the CFG around the
/// `dead blocks`; but with `-Z instrument-coverage`, the dead blocks usually
/// include `Coverage` statements representing the Rust source code regions to
/// be counted at runtime. Without these `Coverage` statements, the regions are
/// lost, and the Rust source code will show no coverage information.
///
/// What we want to show in a coverage report is the dead code with coverage
/// counts of `0`. To do this, we need to save the code regions, by injecting
/// `Unreachable` coverage statements. These are non-executable statements whose
/// code regions are still recorded in the coverage map, representing regions
/// with `0` executions.
fn save_unreachable_coverage(
basic_blocks: &mut IndexVec<BasicBlock, BasicBlockData<'_>>,
first_dead_block: usize,
) {
let has_live_counters = basic_blocks.raw[0..first_dead_block].iter().any(|live_block| {
live_block.statements.iter().any(|statement| {
if let StatementKind::Coverage(coverage) = &statement.kind {
matches!(coverage.kind, CoverageKind::Counter { .. })
} else {
false
}
})
});
if !has_live_counters {
// If there are no live `Counter` `Coverage` statements anymore, don't
// move dead coverage to the `START_BLOCK`. Just allow the dead
// `Coverage` statements to be dropped with the dead blocks.
//
// The `generator::StateTransform` MIR pass can create atypical
// conditions, where all live `Counter`s are dropped from the MIR.
//
// At least one Counter per function is required by LLVM (and necessary,
// to add the `function_hash` to the counter's call to the LLVM
// intrinsic `instrprof.increment()`).
return;
}
// Retain coverage info for dead blocks, so coverage reports will still
// report `0` executions for the uncovered code regions.
let mut dropped_coverage = Vec::new();
for dead_block in basic_blocks.raw[first_dead_block..].iter() {
for statement in dead_block.statements.iter() {
if let StatementKind::Coverage(coverage) = &statement.kind {
if let Some(code_region) = &coverage.code_region {
dropped_coverage.push((statement.source_info, code_region.clone()));
}
}
}
}
let start_block = &mut basic_blocks[START_BLOCK];
for (source_info, code_region) in dropped_coverage {
start_block.statements.push(Statement {
source_info,
kind: StatementKind::Coverage(Box::new(Coverage {
kind: CoverageKind::Unreachable,
code_region: Some(code_region),
})),
})
}
}
pub struct SimplifyLocals;
impl<'tcx> MirPass<'tcx> for SimplifyLocals {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
sess.mir_opt_level() > 0
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
trace!("running SimplifyLocals on {:?}", body.source);
simplify_locals(body, tcx);
}
}
pub fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) {
// First, we're going to get a count of *actual* uses for every `Local`.
let mut used_locals = UsedLocals::new(body);
// Next, we're going to remove any `Local` with zero actual uses. When we remove those
// `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
// count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
// `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
// fixedpoint where there are no more unused locals.
remove_unused_definitions(&mut used_locals, body);
// Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s.
let map = make_local_map(&mut body.local_decls, &used_locals);
// Only bother running the `LocalUpdater` if we actually found locals to remove.
if map.iter().any(Option::is_none) {
// Update references to all vars and tmps now
let mut updater = LocalUpdater { map, tcx };
updater.visit_body(body);
body.local_decls.shrink_to_fit();
}
}
/// Construct the mapping while swapping out unused stuff out from the `vec`.
fn make_local_map<V>(
local_decls: &mut IndexVec<Local, V>,
used_locals: &UsedLocals,
) -> IndexVec<Local, Option<Local>> {
let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, &*local_decls);
let mut used = Local::new(0);
for alive_index in local_decls.indices() {
// `is_used` treats the `RETURN_PLACE` and arguments as used.
if !used_locals.is_used(alive_index) {
continue;
}
map[alive_index] = Some(used);
if alive_index != used {
local_decls.swap(alive_index, used);
}
used.increment_by(1);
}
local_decls.truncate(used.index());
map
}
/// Keeps track of used & unused locals.
struct UsedLocals {
increment: bool,
arg_count: u32,
use_count: IndexVec<Local, u32>,
}
impl UsedLocals {
/// Determines which locals are used & unused in the given body.
fn new(body: &Body<'_>) -> Self {
let mut this = Self {
increment: true,
arg_count: body.arg_count.try_into().unwrap(),
use_count: IndexVec::from_elem(0, &body.local_decls),
};
this.visit_body(body);
this
}
/// Checks if local is used.
///
/// Return place and arguments are always considered used.
fn is_used(&self, local: Local) -> bool {
trace!("is_used({:?}): use_count: {:?}", local, self.use_count[local]);
local.as_u32() <= self.arg_count || self.use_count[local] != 0
}
/// Updates the use counts to reflect the removal of given statement.
fn statement_removed(&mut self, statement: &Statement<'_>) {
self.increment = false;
// The location of the statement is irrelevant.
let location = Location { block: START_BLOCK, statement_index: 0 };
self.visit_statement(statement, location);
}
/// Visits a left-hand side of an assignment.
fn visit_lhs(&mut self, place: &Place<'_>, location: Location) {
if place.is_indirect() {
// A use, not a definition.
self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
} else {
// A definition. The base local itself is not visited, so this occurrence is not counted
// toward its use count. There might be other locals still, used in an indexing
// projection.
self.super_projection(
place.as_ref(),
PlaceContext::MutatingUse(MutatingUseContext::Projection),
location,
);
}
}
}
impl<'tcx> Visitor<'tcx> for UsedLocals {
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
match statement.kind {
StatementKind::LlvmInlineAsm(..)
| StatementKind::CopyNonOverlapping(..)
| StatementKind::Retag(..)
| StatementKind::Coverage(..)
| StatementKind::FakeRead(..)
| StatementKind::AscribeUserType(..) => {
self.super_statement(statement, location);
}
StatementKind::Nop => {}
StatementKind::StorageLive(_local) | StatementKind::StorageDead(_local) => {}
StatementKind::Assign(box (ref place, ref rvalue)) => {
self.visit_lhs(place, location);
self.visit_rvalue(rvalue, location);
}
StatementKind::SetDiscriminant { ref place, variant_index: _ } => {
self.visit_lhs(place, location);
}
}
}
fn visit_local(&mut self, local: &Local, _ctx: PlaceContext, _location: Location) {
if self.increment {
self.use_count[*local] += 1;
} else {
assert_ne!(self.use_count[*local], 0);
self.use_count[*local] -= 1;
}
}
}
/// Removes unused definitions. Updates the used locals to reflect the changes made.
fn remove_unused_definitions(used_locals: &mut UsedLocals, body: &mut Body<'_>) {
// The use counts are updated as we remove the statements. A local might become unused
// during the retain operation, leading to a temporary inconsistency (storage statements or
// definitions referencing the local might remain). For correctness it is crucial that this
// computation reaches a fixed point.
let mut modified = true;
while modified {
modified = false;
for data in body.basic_blocks_mut() {
// Remove unnecessary StorageLive and StorageDead annotations.
data.statements.retain(|statement| {
let keep = match &statement.kind {
StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
used_locals.is_used(*local)
}
StatementKind::Assign(box (place, _)) => used_locals.is_used(place.local),
StatementKind::SetDiscriminant { ref place, .. } => {
used_locals.is_used(place.local)
}
_ => true,
};
if !keep {
trace!("removing statement {:?}", statement);
modified = true;
used_locals.statement_removed(statement);
}
keep
});
}
}
}
struct LocalUpdater<'tcx> {
map: IndexVec<Local, Option<Local>>,
tcx: TyCtxt<'tcx>,
}
impl<'tcx> MutVisitor<'tcx> for LocalUpdater<'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
fn visit_local(&mut self, l: &mut Local, _: PlaceContext, _: Location) {
*l = self.map[*l].unwrap();
}
}
| |
cif.js
|
"use strict";
/**
* @ngdoc controller
* @name materialsCloudApp.controller:CifDetailsCtrl
* @description
*
* Controller for displaying CIF structures.
*
* #### File location: app/scripts/controllers/explore/nodedetails/data/cif.js
*
* @requires $scope : scope object for this page
* @requires $window : rezise event of window object is used to resize structure
* @requires nodeVisualization : service used to visualize structure
* @requires $timeout : used to add delay
* @requires CONFIG : materials cloud configuration file
* @requires nodeService : service used to request node data from server
*
*/
app.controller('CifDetailsCtrl',
["$scope", "$window", "nodeVisualization", "$timeout", "CONFIG", "nodeService", "utils",
function ($scope, $window, nodeVisualization, $timeout, CONFIG, nodeService, utils) {
/**
* @ngdoc
* @name materialsCloudApp.controller:CifDetailsCtrl#attributes
* @propertyOf materialsCloudApp.controller:CifDetailsCtrl
*
* @description
* stores the structure attributes received from the server.
*/
$scope.attributes = {};
$scope.strInfo = {};
/**
* Toggle the interaction of structure visualizer on double click event
*/
$scope.enableCifInteraction = true;
$scope.toggleCifVisInteraction = function(){
if ($scope.enableCifInteraction){
// enable interaction here
$("#str-overlay").css("display", "none");
$("#structure-content").css('pointer-events', 'auto');
$scope.enableCifInteraction = false;
}
else{
// disable interaction here
$("#str-overlay").css("display", "table");
$("#structure-content").css('pointer-events', 'none');
$scope.enableCifInteraction = true;
}
};
$scope.structureViewer = null;
$scope.jsmolAppletName = "";
$scope.crystalCif = "";
$scope.supercell = [1, 1, 1];
$scope.selectedAxesIdx = 1;
/**
* Get node visualization data from the server using nodeService
* and display structure using nodeVisualization.
*/
nodeService.getMetadata("DATA", $scope.nodeId, "VISUALIZATION_CIF", $scope.profileRestEndPoint)
.then(function (response) {
|
// download options
var base = $scope.profileRestEndPoint + CONFIG.REST_API.NODE[$scope.selectedProfileInfo.REST_API_MAJOR_VERSION.toUpperCase()]["DATA"] + "/" + $scope.nodeId
+ CONFIG.REST_API.NODE[$scope.selectedProfileInfo.REST_API_MAJOR_VERSION.toUpperCase()]["DOWNLOAD"] + "?format=";
$scope.downloadOptions = [
{"name": "CIF", link: base + "cif"}
];
// display crystal
if (response["str_viz_info"]["format"] == "chemdoodle") {
if (typeof response["str_viz_info"]["data"] === 'string' || response["str_viz_info"]["data"] instanceof String) {
var crystalCif = JSON.parse(response["str_viz_info"]["data"]);
} else {
var crystalCif = response["str_viz_info"]["data"];
}
// display crystal
$timeout(function () {
$scope.structureviewer = null;
$scope.structureviewer = nodeVisualization.chemDoodleCrystal(
crystalCif, "cif", "crystal", "structure-content", $scope.structureviewer);
$scope.toggleCifVisInteraction();
});
} else {
$scope.jsmolAppletName = "jmolApplet";
//get metadata
$timeout(function () {
$scope.crystalCif = response["str_viz_info"]["data"];
});
}
} else {
$scope.jsmolAppletName = "jmolApplet";
//get metadata
$timeout(function () {
$scope.crystalCif = response["data"];
});
}
});
if ($scope.selectedProfileInfo.REST_API_MAJOR_VERSION == 'v4') {
/**
* Get cif download options from the server using nodeService.
*/
nodeService.getDownloadFormats("DATA", $scope.profileRestEndPoint)
.then(function (response) {
response = response.data.data["data.cif.CifData.|"];
// download options
var base = $scope.profileRestEndPoint + CONFIG.REST_API.NODE[$scope.selectedProfileInfo.REST_API_MAJOR_VERSION.toUpperCase()]["DATA"]
+ "/" + $scope.nodeId + CONFIG.REST_API.NODE[$scope.selectedProfileInfo.REST_API_MAJOR_VERSION.toUpperCase()]["DOWNLOAD"] + "?download_format=";
$scope.downloadOptions = [];
angular.forEach(response, function (value) {
$scope.downloadOptions.push({"name": value.toUpperCase(), link: base + value});
});
});
}
/**
* Resize the structure on window resize event.
*/
angular.element($window).bind('resize', function(){
if ($scope.structureviewer) {
var parentcanvas = document.getElementById('structure-content');
var the_width = parentcanvas.offsetWidth -10;
var the_height = parentcanvas.offsetHeight -10;
$scope.structureviewer.resize(the_width, the_height);
}
});
}
]
);
|
response = response.data.data[CONFIG.DATA_PATH[$scope.selectedProfileInfo.REST_API_MAJOR_VERSION.toUpperCase()]["VISUALIZATION"]];
if ($scope.selectedProfileInfo.REST_API_MAJOR_VERSION == 'v2' || $scope.selectedProfileInfo.REST_API_MAJOR_VERSION == 'v3') {
|
libstdc++.a-gdb.py
|
# -*- python -*-
# Copyright (C) 2009-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-w64-mingw32/share/gcc-10.2.0/python'
libdir = '/scratch/jenkins/workspace/tpp-freedom-tools/tpp03--build-binary-packages--parameterized/obj/x86_64-w64-mingw32/install/riscv64-unknown-elf-gcc-10.2.0-2020.12.8-x86_64-w64-mingw32/riscv64-unknown-elf/lib/rv32emac/ilp32e'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
|
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
|
App.tsx
|
import { useEffect, useState } from "react";
import { Button } from "./components/Button";
import { MovieCard } from "./components/MovieCard";
import { SideBar } from "./components/SideBar";
import { Content } from "./components/Content";
import { api } from "./services/api";
import "./styles/global.scss";
import "./styles/sidebar.scss";
import "./styles/content.scss";
interface GenreResponseProps {
id: number;
name: "action" | "comedy" | "documentary" | "drama" | "horror" | "family";
title: string;
}
export function App() {
const [selectedGenreId, setSelectedGenreId] = useState(1);
const [selectedGenre, setSelectedGenre] = useState<GenreResponseProps>(
{} as GenreResponseProps
);
useEffect(() => {
api.get<GenreResponseProps>(`genres/${selectedGenreId}`).then((response) => {
setSelectedGenre(response.data);
});
}, [selectedGenreId]);
function handleClickButton(id: number) {
setSelectedGenreId(id);
|
<SideBar
selectedGenreId={selectedGenreId}
handleClickButton={handleClickButton}
/>
<Content
selectedGenre={selectedGenre}
selectedGenreId={selectedGenreId}
/>
</div>
);
}
|
}
return (
<div style={{ display: "flex", flexDirection: "row" }}>
|
action.rs
|
use std::convert::TryInto;
use crate::{
data::{query, DatabasePool, Transaction},
web::api::ApiKey,
Clip, ServiceError, ShortCode,
};
use super::ask::{GetClip, NewClip, UpdateClip};
pub async fn begin_transaction(pool: &DatabasePool) -> Result<Transaction<'_>, ServiceError> {
Ok(pool.begin().await?)
}
pub async fn end_transaction(transaction: Transaction<'_>) -> Result<(), ServiceError> {
Ok(transaction.commit().await?)
}
pub async fn increase_hit_count(
shortcode: &ShortCode,
hits: u32,
pool: &DatabasePool,
) -> Result<(), ServiceError> {
Ok(query::increase_hit_count(shortcode, hits, pool).await?)
}
|
Ok(query::new_clip(req, pool).await?.try_into()?)
}
pub async fn update_clip(req: UpdateClip, pool: &DatabasePool) -> Result<Clip, ServiceError> {
Ok(query::update_clip(req, pool).await?.try_into()?)
}
pub async fn get_clip(req: GetClip, pool: &DatabasePool) -> Result<Clip, ServiceError> {
let user_password = req.password.clone();
let clip: Clip = query::get_clip(req, pool).await?.try_into()?;
if clip.password.has_password() && clip.password != user_password {
Err(ServiceError::PermissionError("Invalid password".to_owned()))
} else {
Ok(clip)
}
}
pub async fn generate_api_key(pool: &DatabasePool) -> Result<ApiKey, ServiceError> {
let api_key = ApiKey::default();
Ok(query::save_api_key(api_key, pool).await?)
}
pub async fn revoke_api_key(
api_key: ApiKey,
pool: &DatabasePool,
) -> Result<query::RevocationStatus, ServiceError> {
Ok(query::revoke_api_key(api_key, pool).await?)
}
pub async fn api_key_is_valid(api_key: ApiKey, pool: &DatabasePool) -> Result<bool, ServiceError> {
Ok(query::api_key_is_valid(api_key, pool).await?)
}
pub async fn delete_expired(pool: &DatabasePool) -> Result<u64, ServiceError> {
Ok(query::delete_expired(pool).await?)
}
|
pub async fn new_clip(req: NewClip, pool: &DatabasePool) -> Result<Clip, ServiceError> {
|
container_manager_linux.go
|
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/configs"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
kubefeatures "k8s.io/kubernetes/pkg/features"
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
"k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/status"
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
utilfile "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilversion "k8s.io/kubernetes/pkg/util/version"
)
const (
// The percent of the machine memory capacity. The value is used to calculate
// docker memory resource container's hardlimit to workaround docker memory
// leakage issue. Please see kubernetes/issues/9881 for more detail.
DockerMemoryLimitThresholdPercent = 70
// The minimum memory limit allocated to docker container: 150Mi
MinDockerMemoryLimit = 150 * 1024 * 1024
dockerProcessName = "docker"
dockerPidFile = "/var/run/docker.pid"
containerdProcessName = "docker-containerd"
containerdPidFile = "/run/docker/libcontainerd/docker-containerd.pid"
)
var (
// The docker version in which containerd was introduced.
containerdAPIVersion = utilversion.MustParseGeneric("1.23")
)
// A non-user container tracked by the Kubelet.
type systemContainer struct {
// Absolute name of the container.
name string
// CPU limit in millicores.
cpuMillicores int64
// Function that ensures the state of the container.
// m is the cgroup manager for the specified container.
ensureStateFunc func(m *fs.Manager) error
// Manager for the cgroups of the external container.
manager *fs.Manager
}
func newSystemCgroups(containerName string) *systemContainer {
return &systemContainer{
name: containerName,
manager: createManager(containerName),
}
}
type containerManagerImpl struct {
sync.RWMutex
cadvisorInterface cadvisor.Interface
mountUtil mount.Interface
NodeConfig
status Status
// External containers being managed.
systemContainers []*systemContainer
qosContainers QOSContainersInfo
// Tasks that are run periodically
periodicTasks []func()
// Holds all the mounted cgroup subsystems
subsystems *CgroupSubsystems
nodeInfo *v1.Node
// Interface for cgroup management
cgroupManager CgroupManager
// Capacity of this node.
capacity v1.ResourceList
// Absolute cgroupfs path to a cgroup that Kubelet needs to place all pods under.
// This path include a top level container for enforcing Node Allocatable.
cgroupRoot string
// Event recorder interface.
recorder record.EventRecorder
// Interface for QoS cgroup management
qosContainerManager QOSContainerManager
// Interface for exporting and allocating devices reported by device plugins.
devicePluginManager deviceplugin.Manager
// Interface for CPU affinity management.
cpuManager cpumanager.Manager
}
type features struct {
cpuHardcapping bool
}
var _ ContainerManager = &containerManagerImpl{}
// checks if the required cgroups subsystems are mounted.
// As of now, only 'cpu' and 'memory' are required.
// cpu quota is a soft requirement.
func validateSystemRequirements(mountUtil mount.Interface) (features, error) {
const (
cgroupMountType = "cgroup"
localErr = "system validation failed"
)
var (
cpuMountPoint string
f features
)
mountPoints, err := mountUtil.List()
if err != nil {
return f, fmt.Errorf("%s - %v", localErr, err)
}
expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory")
for _, mountPoint := range mountPoints {
if mountPoint.Type == cgroupMountType {
for _, opt := range mountPoint.Opts {
if expectedCgroups.Has(opt) {
expectedCgroups.Delete(opt)
}
if opt == "cpu" {
cpuMountPoint = mountPoint.Path
}
}
}
}
if expectedCgroups.Len() > 0 {
return f, fmt.Errorf("%s - Following Cgroup subsystem not mounted: %v", localErr, expectedCgroups.List())
}
// Check if cpu quota is available.
// CPU cgroup is required and so it expected to be mounted at this point.
periodExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_period_us"))
if err != nil {
glog.Errorf("failed to detect if CPU cgroup cpu.cfs_period_us is available - %v", err)
}
quotaExists, err := utilfile.FileExists(path.Join(cpuMountPoint, "cpu.cfs_quota_us"))
if err != nil {
glog.Errorf("failed to detect if CPU cgroup cpu.cfs_quota_us is available - %v", err)
}
if quotaExists && periodExists {
f.cpuHardcapping = true
}
return f, nil
}
// TODO(vmarmol): Add limits to the system containers.
// Takes the absolute name of the specified containers.
// Empty container name disables use of the specified container.
func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.Interface, nodeConfig NodeConfig, failSwapOn bool, devicePluginEnabled bool, recorder record.EventRecorder) (ContainerManager, error) {
subsystems, err := GetCgroupSubsystems()
if err != nil {
return nil, fmt.Errorf("failed to get mounted cgroup subsystems: %v", err)
}
// Check whether swap is enabled. The Kubelet does not support running with swap enabled.
swapData, err := ioutil.ReadFile("/proc/swaps")
if err != nil {
return nil, err
}
swapData = bytes.TrimSpace(swapData) // extra trailing \n
swapLines := strings.Split(string(swapData), "\n")
// If there is more than one line (table headers) in /proc/swaps, swap is enabled and we should
// error out unless --fail-swap-on is set to false.
if failSwapOn && len(swapLines) > 1 {
return nil, fmt.Errorf("Running with swap on is not supported, please disable swap! or set --fail-swap-on flag to false. /proc/swaps contained: %v", swapLines)
}
var capacity = v1.ResourceList{}
// It is safe to invoke `MachineInfo` on cAdvisor before logically initializing cAdvisor here because
// machine info is computed and cached once as part of cAdvisor object creation.
// But `RootFsInfo` and `ImagesFsInfo` are not available at this moment so they will be called later during manager starts
machineInfo, err := cadvisorInterface.MachineInfo()
if err != nil {
return nil, err
}
capacity = cadvisor.CapacityFromMachineInfo(machineInfo)
cgroupRoot := nodeConfig.CgroupRoot
cgroupManager := NewCgroupManager(subsystems, nodeConfig.CgroupDriver)
// Check if Cgroup-root actually exists on the node
if nodeConfig.CgroupsPerQOS {
// this does default to / when enabled, but this tests against regressions.
if nodeConfig.CgroupRoot == "" {
return nil, fmt.Errorf("invalid configuration: cgroups-per-qos was specified and cgroup-root was not specified. To enable the QoS cgroup hierarchy you need to specify a valid cgroup-root")
}
// we need to check that the cgroup root actually exists for each subsystem
// of note, we always use the cgroupfs driver when performing this check since
// the input is provided in that format.
// this is important because we do not want any name conversion to occur.
if !cgroupManager.Exists(CgroupName(cgroupRoot)) {
return nil, fmt.Errorf("invalid configuration: cgroup-root %q doesn't exist: %v", cgroupRoot, err)
}
glog.Infof("container manager verified user specified cgroup-root exists: %v", cgroupRoot)
// Include the top level cgroup for enforcing node allocatable into cgroup-root.
// This way, all sub modules can avoid having to understand the concept of node allocatable.
cgroupRoot = path.Join(cgroupRoot, defaultNodeAllocatableCgroupName)
}
glog.Infof("Creating Container Manager object based on Node Config: %+v", nodeConfig)
qosContainerManager, err := NewQOSContainerManager(subsystems, cgroupRoot, nodeConfig, cgroupManager)
if err != nil {
return nil, err
}
cm := &containerManagerImpl{
cadvisorInterface: cadvisorInterface,
mountUtil: mountUtil,
NodeConfig: nodeConfig,
subsystems: subsystems,
cgroupManager: cgroupManager,
capacity: capacity,
cgroupRoot: cgroupRoot,
recorder: recorder,
qosContainerManager: qosContainerManager,
}
glog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
if devicePluginEnabled {
cm.devicePluginManager, err = deviceplugin.NewManagerImpl()
} else {
cm.devicePluginManager, err = deviceplugin.NewManagerStub()
}
if err != nil {
return nil, err
}
// Initialize CPU manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManager) {
cm.cpuManager, err = cpumanager.NewManager(
nodeConfig.ExperimentalCPUManagerPolicy,
nodeConfig.ExperimentalCPUManagerReconcilePeriod,
machineInfo,
cm.GetNodeAllocatableReservation(),
nodeConfig.KubeletRootDir,
)
if err != nil {
glog.Errorf("failed to initialize cpu manager: %v", err)
return nil, err
}
}
return cm, nil
}
// NewPodContainerManager is a factory method returns a PodContainerManager object
// If qosCgroups are enabled then it returns the general pod container manager
// otherwise it returns a no-op manager which essentially does nothing
func (cm *containerManagerImpl) NewPodContainerManager() PodContainerManager {
if cm.NodeConfig.CgroupsPerQOS {
return &podContainerManagerImpl{
qosContainersInfo: cm.GetQOSContainersInfo(),
subsystems: cm.subsystems,
cgroupManager: cm.cgroupManager,
}
}
return &podContainerManagerNoop{
cgroupRoot: CgroupName(cm.cgroupRoot),
}
}
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
return &internalContainerLifecycleImpl{cm.cpuManager}
}
// Create a cgroup container manager.
func createManager(containerName string) *fs.Manager
|
type KernelTunableBehavior string
const (
KernelTunableWarn KernelTunableBehavior = "warn"
KernelTunableError KernelTunableBehavior = "error"
KernelTunableModify KernelTunableBehavior = "modify"
)
// setupKernelTunables validates kernel tunable flags are set as expected
// depending upon the specified option, it will either warn, error, or modify the kernel tunable flags
func setupKernelTunables(option KernelTunableBehavior) error {
desiredState := map[string]int{
utilsysctl.VmOvercommitMemory: utilsysctl.VmOvercommitMemoryAlways,
utilsysctl.VmPanicOnOOM: utilsysctl.VmPanicOnOOMInvokeOOMKiller,
utilsysctl.KernelPanic: utilsysctl.KernelPanicRebootTimeout,
utilsysctl.KernelPanicOnOops: utilsysctl.KernelPanicOnOopsAlways,
utilsysctl.RootMaxKeys: utilsysctl.RootMaxKeysSetting,
utilsysctl.RootMaxBytes: utilsysctl.RootMaxBytesSetting,
}
sysctl := utilsysctl.New()
errList := []error{}
for flag, expectedValue := range desiredState {
val, err := sysctl.GetSysctl(flag)
if err != nil {
errList = append(errList, err)
continue
}
if val == expectedValue {
continue
}
switch option {
case KernelTunableError:
errList = append(errList, fmt.Errorf("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val))
case KernelTunableWarn:
glog.V(2).Infof("Invalid kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
case KernelTunableModify:
glog.V(2).Infof("Updating kernel flag: %v, expected value: %v, actual value: %v", flag, expectedValue, val)
err = sysctl.SetSysctl(flag, expectedValue)
if err != nil {
errList = append(errList, err)
}
}
}
return utilerrors.NewAggregate(errList)
}
func (cm *containerManagerImpl) setupNode(activePods ActivePodsFunc) error {
f, err := validateSystemRequirements(cm.mountUtil)
if err != nil {
return err
}
if !f.cpuHardcapping {
cm.status.SoftRequirements = fmt.Errorf("CPU hardcapping unsupported")
}
b := KernelTunableModify
if cm.GetNodeConfig().ProtectKernelDefaults {
b = KernelTunableError
}
if err := setupKernelTunables(b); err != nil {
return err
}
// Setup top level qos containers only if CgroupsPerQOS flag is specified as true
if cm.NodeConfig.CgroupsPerQOS {
if err := cm.createNodeAllocatableCgroups(); err != nil {
return err
}
err = cm.qosContainerManager.Start(cm.getNodeAllocatableAbsolute, activePods)
if err != nil {
return fmt.Errorf("failed to initialize top level QOS containers: %v", err)
}
}
// Enforce Node Allocatable (if required)
if err := cm.enforceNodeAllocatableCgroups(); err != nil {
return err
}
systemContainers := []*systemContainer{}
if cm.ContainerRuntime == "docker" {
// With the docker-CRI integration, dockershim will manage the cgroups
// and oom score for the docker processes.
// In the future, NodeSpec should mandate the cgroup that the
// runtime processes need to be in. For now, we still check the
// cgroup for docker periodically, so that kubelet can recognize
// the cgroup for docker and serve stats for the runtime.
// TODO(#27097): Fix this after NodeSpec is clearly defined.
cm.periodicTasks = append(cm.periodicTasks, func() {
glog.V(4).Infof("[ContainerManager]: Adding periodic tasks for docker CRI integration")
cont, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
if err != nil {
glog.Error(err)
return
}
glog.V(2).Infof("[ContainerManager]: Discovered runtime cgroups name: %s", cont)
cm.Lock()
defer cm.Unlock()
cm.RuntimeCgroupsName = cont
})
}
if cm.SystemCgroupsName != "" {
if cm.SystemCgroupsName == "/" {
return fmt.Errorf("system container cannot be root (\"/\")")
}
cont := newSystemCgroups(cm.SystemCgroupsName)
cont.ensureStateFunc = func(manager *fs.Manager) error {
return ensureSystemCgroups("/", manager)
}
systemContainers = append(systemContainers, cont)
}
if cm.KubeletCgroupsName != "" {
cont := newSystemCgroups(cm.KubeletCgroupsName)
allowAllDevices := true
manager := fs.Manager{
Cgroups: &configs.Cgroup{
Parent: "/",
Name: cm.KubeletCgroupsName,
Resources: &configs.Resources{
AllowAllDevices: &allowAllDevices,
},
},
}
cont.ensureStateFunc = func(_ *fs.Manager) error {
return ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, &manager)
}
systemContainers = append(systemContainers, cont)
} else {
cm.periodicTasks = append(cm.periodicTasks, func() {
if err := ensureProcessInContainerWithOOMScore(os.Getpid(), qos.KubeletOOMScoreAdj, nil); err != nil {
glog.Error(err)
return
}
cont, err := getContainer(os.Getpid())
if err != nil {
glog.Errorf("failed to find cgroups of kubelet - %v", err)
return
}
cm.Lock()
defer cm.Unlock()
cm.KubeletCgroupsName = cont
})
}
cm.systemContainers = systemContainers
return nil
}
func getContainerNameForProcess(name, pidFile string) (string, error) {
pids, err := getPidsForProcess(name, pidFile)
if err != nil {
return "", fmt.Errorf("failed to detect process id for %q - %v", name, err)
}
if len(pids) == 0 {
return "", nil
}
cont, err := getContainer(pids[0])
if err != nil {
return "", err
}
return cont, nil
}
func (cm *containerManagerImpl) GetNodeConfig() NodeConfig {
cm.RLock()
defer cm.RUnlock()
return cm.NodeConfig
}
func (cm *containerManagerImpl) GetMountedSubsystems() *CgroupSubsystems {
return cm.subsystems
}
func (cm *containerManagerImpl) GetQOSContainersInfo() QOSContainersInfo {
return cm.qosContainerManager.GetQOSContainersInfo()
}
func (cm *containerManagerImpl) UpdateQOSCgroups() error {
return cm.qosContainerManager.UpdateCgroups()
}
func (cm *containerManagerImpl) Status() Status {
cm.RLock()
defer cm.RUnlock()
return cm.status
}
func (cm *containerManagerImpl) Start(node *v1.Node,
activePods ActivePodsFunc,
sourcesReady config.SourcesReady,
podStatusProvider status.PodStatusProvider,
runtimeService internalapi.RuntimeService) error {
// Initialize CPU manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManager) {
cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), podStatusProvider, runtimeService)
}
// cache the node Info including resource capacity and
// allocatable of the node
cm.nodeInfo = node
// Ensure that node allocatable configuration is valid.
if err := cm.validateNodeAllocatable(); err != nil {
return err
}
// Setup the node
if err := cm.setupNode(activePods); err != nil {
return err
}
// Don't run a background thread if there are no ensureStateFuncs.
hasEnsureStateFuncs := false
for _, cont := range cm.systemContainers {
if cont.ensureStateFunc != nil {
hasEnsureStateFuncs = true
break
}
}
if hasEnsureStateFuncs {
// Run ensure state functions every minute.
go wait.Until(func() {
for _, cont := range cm.systemContainers {
if cont.ensureStateFunc != nil {
if err := cont.ensureStateFunc(cont.manager); err != nil {
glog.Warningf("[ContainerManager] Failed to ensure state of %q: %v", cont.name, err)
}
}
}
}, time.Minute, wait.NeverStop)
}
if len(cm.periodicTasks) > 0 {
go wait.Until(func() {
for _, task := range cm.periodicTasks {
if task != nil {
task()
}
}
}, 5*time.Minute, wait.NeverStop)
}
// Local storage filesystem information from `RootFsInfo` and `ImagesFsInfo` is available at a later time
// depending on the time when cadvisor manager updates container stats. Therefore use a go routine to keep
// retrieving the information until it is available.
stopChan := make(chan struct{})
go wait.Until(func() {
if err := cm.setFsCapacity(); err != nil {
glog.Errorf("[ContainerManager]: %v", err)
return
}
close(stopChan)
}, time.Second, stopChan)
// Starts device plugin manager.
if err := cm.devicePluginManager.Start(deviceplugin.ActivePodsFunc(activePods), sourcesReady); err != nil {
return err
}
return nil
}
func (cm *containerManagerImpl) setFsCapacity() error {
rootfs, err := cm.cadvisorInterface.RootFsInfo()
if err != nil {
return fmt.Errorf("Fail to get rootfs information %v", err)
}
cm.Lock()
for rName, rCap := range cadvisor.EphemeralStorageCapacityFromFsInfo(rootfs) {
cm.capacity[rName] = rCap
}
cm.Unlock()
return nil
}
// TODO: move the GetResources logic to PodContainerManager.
func (cm *containerManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error) {
opts := &kubecontainer.RunContainerOptions{}
// Allocate should already be called during predicateAdmitHandler.Admit(),
// just try to fetch device runtime information from cached state here
devOpts := cm.devicePluginManager.GetDeviceRunContainerOptions(pod, container)
if devOpts == nil {
return opts, nil
}
opts.Devices = append(opts.Devices, devOpts.Devices...)
opts.Mounts = append(opts.Mounts, devOpts.Mounts...)
opts.Envs = append(opts.Envs, devOpts.Envs...)
return opts, nil
}
func (cm *containerManagerImpl) UpdatePluginResources(node *schedulercache.NodeInfo, attrs *lifecycle.PodAdmitAttributes) error {
return cm.devicePluginManager.Allocate(node, attrs)
}
func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
cpuLimit := int64(0)
// Sum up resources of all external containers.
for _, cont := range cm.systemContainers {
cpuLimit += cont.cpuMillicores
}
return v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
cpuLimit,
resource.DecimalSI),
}
}
func isProcessRunningInHost(pid int) (bool, error) {
// Get init pid namespace.
initPidNs, err := os.Readlink("/proc/1/ns/pid")
if err != nil {
return false, fmt.Errorf("failed to find pid namespace of init process")
}
glog.V(10).Infof("init pid ns is %q", initPidNs)
processPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", pid))
if err != nil {
return false, fmt.Errorf("failed to find pid namespace of process %q", pid)
}
glog.V(10).Infof("Pid %d pid ns is %q", pid, processPidNs)
return initPidNs == processPidNs, nil
}
func getPidFromPidFile(pidFile string) (int, error) {
file, err := os.Open(pidFile)
if err != nil {
return 0, fmt.Errorf("error opening pid file %s: %v", pidFile, err)
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
return 0, fmt.Errorf("error reading pid file %s: %v", pidFile, err)
}
pid, err := strconv.Atoi(string(data))
if err != nil {
return 0, fmt.Errorf("error parsing %s as a number: %v", string(data), err)
}
return pid, nil
}
func getPidsForProcess(name, pidFile string) ([]int, error) {
if len(pidFile) == 0 {
return procfs.PidOf(name)
}
pid, err := getPidFromPidFile(pidFile)
if err == nil {
return []int{pid}, nil
}
// Try to lookup pid by process name
pids, err2 := procfs.PidOf(name)
if err2 == nil {
return pids, nil
}
// Return error from getPidFromPidFile since that should have worked
// and is the real source of the problem.
glog.V(4).Infof("unable to get pid from %s: %v", pidFile, err)
return []int{}, err
}
// Ensures that the Docker daemon is in the desired container.
// Temporarily export the function to be used by dockershim.
// TODO(yujuhong): Move this function to dockershim once kubelet migrates to
// dockershim as the default.
func EnsureDockerInContainer(dockerAPIVersion *utilversion.Version, oomScoreAdj int, manager *fs.Manager) error {
type process struct{ name, file string }
dockerProcs := []process{{dockerProcessName, dockerPidFile}}
if dockerAPIVersion.AtLeast(containerdAPIVersion) {
dockerProcs = append(dockerProcs, process{containerdProcessName, containerdPidFile})
}
var errs []error
for _, proc := range dockerProcs {
pids, err := getPidsForProcess(proc.name, proc.file)
if err != nil {
errs = append(errs, fmt.Errorf("failed to get pids for %q: %v", proc.name, err))
continue
}
// Move if the pid is not already in the desired container.
for _, pid := range pids {
if err := ensureProcessInContainerWithOOMScore(pid, oomScoreAdj, manager); err != nil {
errs = append(errs, fmt.Errorf("errors moving %q pid: %v", proc.name, err))
}
}
}
return utilerrors.NewAggregate(errs)
}
func ensureProcessInContainerWithOOMScore(pid int, oomScoreAdj int, manager *fs.Manager) error {
if runningInHost, err := isProcessRunningInHost(pid); err != nil {
// Err on the side of caution. Avoid moving the docker daemon unless we are able to identify its context.
return err
} else if !runningInHost {
// Process is running inside a container. Don't touch that.
glog.V(2).Infof("pid %d is not running in the host namespaces", pid)
return nil
}
var errs []error
if manager != nil {
cont, err := getContainer(pid)
if err != nil {
errs = append(errs, fmt.Errorf("failed to find container of PID %d: %v", pid, err))
}
if cont != manager.Cgroups.Name {
err = manager.Apply(pid)
if err != nil {
errs = append(errs, fmt.Errorf("failed to move PID %d (in %q) to %q: %v", pid, cont, manager.Cgroups.Name, err))
}
}
}
// Also apply oom-score-adj to processes
oomAdjuster := oom.NewOOMAdjuster()
glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
if err := oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err != nil {
glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d: %v", oomScoreAdj, pid, err))
}
return utilerrors.NewAggregate(errs)
}
// getContainer returns the cgroup associated with the specified pid.
// It enforces a unified hierarchy for memory and cpu cgroups.
// On systemd environments, it uses the name=systemd cgroup for the specified pid.
func getContainer(pid int) (string, error) {
cgs, err := cgroups.ParseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
cpu, found := cgs["cpu"]
if !found {
return "", cgroups.NewNotFoundError("cpu")
}
memory, found := cgs["memory"]
if !found {
return "", cgroups.NewNotFoundError("memory")
}
// since we use this container for accounting, we need to ensure its a unified hierarchy.
if cpu != memory {
return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory)
}
// on systemd, every pid is in a unified cgroup hierarchy (name=systemd as seen in systemd-cgls)
// cpu and memory accounting is off by default, users may choose to enable it per unit or globally.
// users could enable CPU and memory accounting globally via /etc/systemd/system.conf (DefaultCPUAccounting=true DefaultMemoryAccounting=true).
// users could also enable CPU and memory accounting per unit via CPUAccounting=true and MemoryAccounting=true
// we only warn if accounting is not enabled for CPU or memory so as to not break local development flows where kubelet is launched in a terminal.
// for example, the cgroup for the user session will be something like /user.slice/user-X.slice/session-X.scope, but the cpu and memory
// cgroup will be the closest ancestor where accounting is performed (most likely /) on systems that launch docker containers.
// as a result, on those systems, you will not get cpu or memory accounting statistics for kubelet.
// in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally).
if systemd, found := cgs["name=systemd"]; found {
if systemd != cpu {
glog.Warningf("CPUAccounting not enabled for pid: %d", pid)
}
if systemd != memory {
glog.Warningf("MemoryAccounting not enabled for pid: %d", pid)
}
return systemd, nil
}
return cpu, nil
}
// Ensures the system container is created and all non-kernel threads and process 1
// without a container are moved to it.
//
// The reason of leaving kernel threads at root cgroup is that we don't want to tie the
// execution of these threads with to-be defined /system quota and create priority inversions.
//
func ensureSystemCgroups(rootCgroupPath string, manager *fs.Manager) error {
// Move non-kernel PIDs to the system container.
attemptsRemaining := 10
var errs []error
for attemptsRemaining >= 0 {
// Only keep errors on latest attempt.
errs = []error{}
attemptsRemaining--
allPids, err := cmutil.GetPids(rootCgroupPath)
if err != nil {
errs = append(errs, fmt.Errorf("failed to list PIDs for root: %v", err))
continue
}
// Remove kernel pids and other protected PIDs (pid 1, PIDs already in system & kubelet containers)
pids := make([]int, 0, len(allPids))
for _, pid := range allPids {
if pid == 1 || isKernelPid(pid) {
continue
}
pids = append(pids, pid)
}
glog.Infof("Found %d PIDs in root, %d of them are not to be moved", len(allPids), len(allPids)-len(pids))
// Check if we have moved all the non-kernel PIDs.
if len(pids) == 0 {
break
}
glog.Infof("Moving non-kernel processes: %v", pids)
for _, pid := range pids {
err := manager.Apply(pid)
if err != nil {
errs = append(errs, fmt.Errorf("failed to move PID %d into the system container %q: %v", pid, manager.Cgroups.Name, err))
}
}
}
if attemptsRemaining < 0 {
errs = append(errs, fmt.Errorf("ran out of attempts to create system containers %q", manager.Cgroups.Name))
}
return utilerrors.NewAggregate(errs)
}
// Determines whether the specified PID is a kernel PID.
func isKernelPid(pid int) bool {
// Kernel threads have no associated executable.
_, err := os.Readlink(fmt.Sprintf("/proc/%d/exe", pid))
return err != nil
}
// Helper for getting the docker API version.
func getDockerAPIVersion(cadvisor cadvisor.Interface) *utilversion.Version {
versions, err := cadvisor.VersionInfo()
if err != nil {
glog.Errorf("Error requesting cAdvisor VersionInfo: %v", err)
return utilversion.MustParseSemantic("0.0")
}
dockerAPIVersion, err := utilversion.ParseGeneric(versions.DockerAPIVersion)
if err != nil {
glog.Errorf("Error parsing docker version %q: %v", versions.DockerVersion, err)
return utilversion.MustParseSemantic("0.0")
}
return dockerAPIVersion
}
func (cm *containerManagerImpl) GetCapacity() v1.ResourceList {
cm.RLock()
defer cm.RUnlock()
return cm.capacity
}
func (cm *containerManagerImpl) GetDevicePluginResourceCapacity() (v1.ResourceList, []string) {
return cm.devicePluginManager.GetCapacity()
}
|
{
allowAllDevices := true
return &fs.Manager{
Cgroups: &configs.Cgroup{
Parent: "/",
Name: containerName,
Resources: &configs.Resources{
AllowAllDevices: &allowAllDevices,
},
},
}
}
|
test_atom.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
#"""
#Tests for the Atom DataFrame
##############################
#The tests here use some contrived examples.
|
#import numpy as np
#from unittest import TestCase
#from exa import units
#from exa.core.dataframe import ColumnError
#from exatomic.atom import Atom
#
#
#class TestAtom(TestCase):
# """Tests for :class:`~exatomic.atom.Atom`."""
# def test_init(self):
# """Test that the atom dataframe raises errors correctly."""
# with self.assertRaises(ColumnError):
# Atom()
|
#"""
|
MonPrereq.py
|
import os
import utils.log as log
import time
class EnableRepos(object):
def __init__(self,host, qa_username, qa_password, pool_id):
self.username = qa_username
self.password = qa_password
self.poolid = pool_id
self.host = host
self.ssh = 'ssh %s ' %host.hostname # do not forget add space after the format specifier
def enable_rhel_repo(self):
print "Subscribing to RHEL rpms"
|
self.refresh_node = " 'sudo subscription-manager refresh' "
self.attach_poolid = " 'sudo subscription-manager attach --pool=%s' " % (self.poolid)
self.enable_repo = " 'sudo subscription-manager repos --enable=rhel-7-server-rpms' "
self.yum_update = " 'sudo yum update -y' "
return self.unregister, self.register_node, self.refresh_node, self.attach_poolid, self.enable_repo, self.yum_update,
def execute(self):
print "Enabling the repos"
commands = self.enable_rhel_repo()
for command in commands:
command = self.ssh + command
log.info("Enabling RHEL repos in Mon Node")
log.debug(command)
os.system(command)
class MonFireWallSettings(object):
def __init__(self,host):
self.host = host
self.ssh = 'ssh %s ' %host.hostname # do not forget add space after the format specifier
def firewall_settings_commands(self):
print 'in Mon firewall' # format to specifiy the command: " '<command>' "
self.start_firewalld = " ' sudo systemctl start firewalld' "
self.enable_firewalld = " ' sudo systemctl enable firewalld' "
self.verify_firewalld = "' sudo systemctl status firewalld.service' "
self.open_6789 = " 'sudo firewall-cmd --zone=public --add-port=6789/tcp --permanent' "
self.saveiptables = " 'sudo firewall-cmd --reload' "
return self.start_firewalld, self.enable_firewalld, self.verify_firewalld, self.open_6789, self.saveiptables,
def execute(self):
print 'in admin execute'
commands = self.firewall_settings_commands()
for command in commands:
command = self.ssh + command
log.info('execting firewall settings in Mon node with command')
log.debug(command)
os.system(command)
class InstallNTP(object):
def __init__(self,host):
self.host = host
self.ssh = 'ssh %s ' %host.hostname # do not forget add space after the format specifier
def install_ntp_commands(self):
print 'Installing NTP'
self.install_ntp = " 'sudo yum install ntp -y' "
self.enable_ntp = " 'sudo systemctl enable ntpd.service' "
self.start_ntp = " 'sudo systemctl start ntpd' "
self.verify_ntp = " 'sudo systemctl status ntpd' "
self.ntp_sync = " 'sudo ntpq -p' "
return self.install_ntp, self. enable_ntp, self.start_ntp, self.verify_ntp, self.ntp_sync,
def execute(self):
print 'Installing NTP on Mon Node'
commands = self.install_ntp_commands()
for command in commands:
command = self.ssh + command
log.info('Installing NTP in Mon Node ')
log.debug(command)
os.system(command)
class DisableSelinux(object):
def __init__(self,host):
self.host = host
self.ssh = 'ssh %s ' %host.hostname
def disable_selinux_commands(self):
print "Disable Selinux"
self.disable_cli = " 'sudo setenforce 0'"
self.disable_config = " 'sudo sed -i s/SELINUX=enforcing/SELINUX=permissive/ /etc/selinux/config' "
return self.disable_cli, self.disable_config,
def execute(self):
print 'Disabling Selinux'
commands = self.disable_selinux_commands()
for command in commands:
command = self.ssh + command
log.info('Disabling Selinux ')
log.debug(command)
os.system(command)
class Adjustpid(object):
def __init__(self,host):
self.host = host
self.ssh = 'ssh %s ' %host.hostname
def adjust_pid_command(self):
print "adjust pid"
self.modify = " 'echo 4194303 | sudo tee /proc/sys/kernel/pid_max' "
self.save_changes = " 'sudo sysctl -p' "
return self.modify, self.save_changes,
def execute(self):
print 'adjust pid'
commands = self.adjust_pid_command()
for command in commands:
command = self.ssh + command
log.info('adjust pid')
log.debug(command)
os.system(command)
class DoMonSettings(object):
def __init__(self, mons, creds):
self.mons = mons
self.qa_username = creds['qa_username']
self.qa_password = creds['qa_password']
self.pool_id = creds['pool_id']
def do_settings(self):
log.info('in mon pre settings')
for each_mon in self.mons:
time.sleep(5)
add_repos = EnableRepos(each_mon, self.qa_username, self.qa_password, self.pool_id)
add_repos.execute()
firewall_setting = MonFireWallSettings(each_mon)
firewall_setting.execute()
install_ntp = InstallNTP(each_mon)
install_ntp.execute()
disable_selinux = DisableSelinux(each_mon)
disable_selinux.execute()
adjust_pid = Adjustpid(each_mon)
adjust_pid.execute()
|
self.unregister = " 'sudo subscription-manager unregister' "
self.register_node = " 'sudo subscription-manager register --username %s --password %s' " % (self.username, self.password)
|
api_get_snapshot.go
|
/*
Copyright 2020 Authors of Arktos.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package httpserver
import (
"k8s.io/klog"
"net/http"
"k8s.io/kubernetes/globalscheduler/pkg/scheduler/utils"
"k8s.io/kubernetes/resourcecollector/pkg/collector"
"github.com/emicklei/go-restful"
)
// Schedule get snapshot
func GetSnapshot(req *restful.Request, resp *restful.Response)
|
{
col, err := collector.GetCollector()
if err != nil {
klog.Errorf("get new collector failed, err: %s", err.Error())
utils.WriteFailedJSONResponse(resp, http.StatusInternalServerError, utils.InternalServerError())
return
}
snapshot, err := col.GetSnapshot()
if err != nil {
klog.Errorf("Collector snapshot failed! err : %s", err)
utils.WriteFailedJSONResponse(resp, http.StatusInternalServerError, utils.InternalServerError())
return
}
resp.WriteAsJson(snapshot)
}
|
|
collate_search_data.py
|
import os, glob, json, codecs
F_SEARCH = sorted(glob.glob("search_data/*"))
def
|
(f):
with codecs.open(f,'r','utf-8') as FIN:
js = json.loads(FIN.read())
return js
for f in F_SEARCH:
js = load_word_file(f)
try:
js["items"] = [item["full_name"] for item in js["items"]]
except:
print "{} looks to be collated already".format(f)
continue
with codecs.open(f,'w','utf-8') as FOUT:
FOUT.write(json.dumps(js))
print "Completed {}".format(f)
|
load_word_file
|
bitfield.rs
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Defines a consecutive sequence of bits.
#[derive(Copy, Clone)]
pub struct BitRange {
/// The first bit of the sequence.
pub start: usize,
/// The length in bits of the sequence.
pub length: usize,
}
impl BitRange {
/// Returns the first bit following a bit range.
pub fn end(self) -> usize {
self.start + self.length
}
}
/// Defines a consecutive sequence of bytes.
///
/// The bits in those bytes are ignored which essentially creates a gap in a sequence of bits. The
/// gap is necessarily at byte boundaries. This is used to ignore the user data in an entry
/// essentially providing a view of the entry information (header and footer).
#[derive(Copy, Clone)]
pub struct ByteGap {
pub start: usize,
pub length: usize,
}
/// Empty gap. All bits count.
pub const NO_GAP: ByteGap = ByteGap {
start: 0,
length: 0,
};
impl ByteGap {
/// Translates a bit to skip the gap.
fn shift(self, bit: usize) -> usize {
if bit < 8 * self.start {
bit
} else {
bit + 8 * self.length
}
}
/// Returns the slice of `data` corresponding to the gap.
pub fn slice(self, data: &[u8]) -> &[u8] {
&data[self.start..self.start + self.length]
}
}
/// Returns whether a bit is set in a sequence of bits.
///
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
/// are in `data` but not in `gap`.
pub fn is_zero(bit: usize, data: &[u8], gap: ByteGap) -> bool {
let bit = gap.shift(bit);
debug_assert!(bit < 8 * data.len());
data[bit / 8] & (1 << (bit % 8)) == 0
}
/// Sets a bit to zero in a sequence of bits.
///
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
/// are in `data` but not in `gap`.
pub fn set_zero(bit: usize, data: &mut [u8], gap: ByteGap) {
let bit = gap.shift(bit);
debug_assert!(bit < 8 * data.len());
data[bit / 8] &= !(1 << (bit % 8));
}
/// Returns a little-endian value in a sequence of bits.
///
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by
/// `range`. The value must fit in a `usize`.
pub fn get_range(range: BitRange, data: &[u8], gap: ByteGap) -> usize {
debug_assert!(range.length <= 8 * core::mem::size_of::<usize>());
let mut result = 0;
for i in 0..range.length {
if !is_zero(range.start + i, data, gap) {
result |= 1 << i;
}
}
result
}
/// Sets a little-endian value in a sequence of bits.
///
/// The sequence of bits is little-endian (both for bytes and bits) and defined by the bits that
/// are in `data` but not in `gap`. The range of bits where the value is stored in defined by
/// `range`. The bits set to 1 in `value` must also be set to `1` in the sequence of bits.
pub fn set_range(range: BitRange, data: &mut [u8], gap: ByteGap, value: usize) {
debug_assert!(range.length == 8 * core::mem::size_of::<usize>() || value < 1 << range.length);
for i in 0..range.length {
if value & 1 << i == 0 {
set_zero(range.start + i, data, gap);
}
}
}
/// Tests the `is_zero` and `set_zero` pair of functions.
#[test]
fn
|
() {
const GAP: ByteGap = ByteGap {
start: 2,
length: 1,
};
for i in 0..24 {
assert!(!is_zero(i, &[0xffu8, 0xff, 0x00, 0xff] as &[u8], GAP));
}
// Tests reading and setting a bit. The result should have all bits set to 1 except for the bit
// to test and the gap.
fn test(bit: usize, result: &[u8]) {
assert!(is_zero(bit, result, GAP));
let mut data = vec![0xff; result.len()];
// Set the gap bits to 0.
for i in 0..GAP.length {
data[GAP.start + i] = 0x00;
}
set_zero(bit, &mut data, GAP);
assert_eq!(data, result);
}
test(0, &[0xfe, 0xff, 0x00, 0xff]);
test(1, &[0xfd, 0xff, 0x00, 0xff]);
test(2, &[0xfb, 0xff, 0x00, 0xff]);
test(7, &[0x7f, 0xff, 0x00, 0xff]);
test(8, &[0xff, 0xfe, 0x00, 0xff]);
test(15, &[0xff, 0x7f, 0x00, 0xff]);
test(16, &[0xff, 0xff, 0x00, 0xfe]);
test(17, &[0xff, 0xff, 0x00, 0xfd]);
test(23, &[0xff, 0xff, 0x00, 0x7f]);
}
/// Tests the `get_range` and `set_range` pair of functions.
#[test]
fn range_ok() {
// Tests reading and setting a range. The result should have all bits set to 1 except for the
// range to test and the gap.
fn test(start: usize, length: usize, value: usize, result: &[u8], gap: ByteGap) {
let range = BitRange { start, length };
assert_eq!(get_range(range, result, gap), value);
let mut data = vec![0xff; result.len()];
for i in 0..gap.length {
data[gap.start + i] = 0x00;
}
set_range(range, &mut data, gap, value);
assert_eq!(data, result);
}
test(0, 8, 42, &[42], NO_GAP);
test(3, 12, 0b11_0101, &[0b1010_1111, 0b1000_0001], NO_GAP);
test(0, 16, 0x1234, &[0x34, 0x12], NO_GAP);
test(4, 16, 0x1234, &[0x4f, 0x23, 0xf1], NO_GAP);
let mut gap = ByteGap {
start: 1,
length: 1,
};
test(3, 12, 0b11_0101, &[0b1010_1111, 0x00, 0b1000_0001], gap);
gap.length = 2;
test(0, 16, 0x1234, &[0x34, 0x00, 0x00, 0x12], gap);
gap.start = 2;
gap.length = 1;
test(4, 16, 0x1234, &[0x4f, 0x23, 0x00, 0xf1], gap);
}
|
zero_ok
|
24_swap_nodes_in_pairs.py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:
return self.recursive(head)
def recursive(self, head):
if head is None or head.next is None:
|
current = head
successor = head.next
subproblem = head.next.next
# Make successor the new head
successor.next = current
current.next = self.recursive(subproblem)
return successor
|
return head
|
ramn28.rs
|
#[doc = "Register `RAMn28` reader"]
pub struct R(crate::R<RAMN28_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<RAMN28_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<RAMN28_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<RAMN28_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `RAMn28` writer"]
pub struct W(crate::W<RAMN28_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<RAMN28_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<RAMN28_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<RAMN28_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `DATA_BYTE_3` reader - Data byte 3 of Rx/Tx frame."]
pub struct DATA_BYTE_3_R(crate::FieldReader<u8, u8>);
impl DATA_BYTE_3_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
DATA_BYTE_3_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DATA_BYTE_3_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DATA_BYTE_3` writer - Data byte 3 of Rx/Tx frame."]
pub struct DATA_BYTE_3_W<'a> {
w: &'a mut W,
}
impl<'a> DATA_BYTE_3_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff);
self.w
}
}
#[doc = "Field `DATA_BYTE_2` reader - Data byte 2 of Rx/Tx frame."]
pub struct DATA_BYTE_2_R(crate::FieldReader<u8, u8>);
impl DATA_BYTE_2_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
DATA_BYTE_2_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DATA_BYTE_2_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DATA_BYTE_2` writer - Data byte 2 of Rx/Tx frame."]
pub struct DATA_BYTE_2_W<'a> {
w: &'a mut W,
}
impl<'a> DATA_BYTE_2_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 8)) | ((value as u32 & 0xff) << 8);
self.w
}
}
#[doc = "Field `DATA_BYTE_1` reader - Data byte 1 of Rx/Tx frame."]
pub struct DATA_BYTE_1_R(crate::FieldReader<u8, u8>);
impl DATA_BYTE_1_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
DATA_BYTE_1_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DATA_BYTE_1_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `DATA_BYTE_1` writer - Data byte 1 of Rx/Tx frame."]
pub struct DATA_BYTE_1_W<'a> {
w: &'a mut W,
}
impl<'a> DATA_BYTE_1_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 16)) | ((value as u32 & 0xff) << 16);
self.w
}
}
#[doc = "Field `DATA_BYTE_0` reader - Data byte 0 of Rx/Tx frame."]
pub struct DATA_BYTE_0_R(crate::FieldReader<u8, u8>);
impl DATA_BYTE_0_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
DATA_BYTE_0_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for DATA_BYTE_0_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
|
w: &'a mut W,
}
impl<'a> DATA_BYTE_0_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 24)) | ((value as u32 & 0xff) << 24);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - Data byte 3 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_3(&self) -> DATA_BYTE_3_R {
DATA_BYTE_3_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 8:15 - Data byte 2 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_2(&self) -> DATA_BYTE_2_R {
DATA_BYTE_2_R::new(((self.bits >> 8) & 0xff) as u8)
}
#[doc = "Bits 16:23 - Data byte 1 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_1(&self) -> DATA_BYTE_1_R {
DATA_BYTE_1_R::new(((self.bits >> 16) & 0xff) as u8)
}
#[doc = "Bits 24:31 - Data byte 0 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_0(&self) -> DATA_BYTE_0_R {
DATA_BYTE_0_R::new(((self.bits >> 24) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - Data byte 3 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_3(&mut self) -> DATA_BYTE_3_W {
DATA_BYTE_3_W { w: self }
}
#[doc = "Bits 8:15 - Data byte 2 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_2(&mut self) -> DATA_BYTE_2_W {
DATA_BYTE_2_W { w: self }
}
#[doc = "Bits 16:23 - Data byte 1 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_1(&mut self) -> DATA_BYTE_1_W {
DATA_BYTE_1_W { w: self }
}
#[doc = "Bits 24:31 - Data byte 0 of Rx/Tx frame."]
#[inline(always)]
pub fn data_byte_0(&mut self) -> DATA_BYTE_0_W {
DATA_BYTE_0_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Embedded RAM\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ramn28](index.html) module"]
pub struct RAMN28_SPEC;
impl crate::RegisterSpec for RAMN28_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [ramn28::R](R) reader structure"]
impl crate::Readable for RAMN28_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [ramn28::W](W) writer structure"]
impl crate::Writable for RAMN28_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets RAMn28 to value 0"]
impl crate::Resettable for RAMN28_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
#[doc = "Field `DATA_BYTE_0` writer - Data byte 0 of Rx/Tx frame."]
pub struct DATA_BYTE_0_W<'a> {
|
goal.js
|
import { VisFactoryProvider } from 'ui/vis/vis_factory';
import { VisSchemasProvider } from 'ui/vis/editors/default/schemas';
import { CATEGORY } from 'ui/vis/vis_category';
import gaugeTemplate from 'plugins/kbn_vislib_vis_types/editors/gauge.html';
import { vislibColorMaps } from 'ui/vislib/components/color/colormaps';
import image from './images/icon-goal.svg';
export default function GoalVisType(Private) {
const VisFactory = Private(VisFactoryProvider);
const Schemas = Private(VisSchemasProvider);
return VisFactory.createVislibVisualization({
name: 'goal',
title: 'Goal',
image,
description: 'A goal chart indicates how close you are to your final goal.',
category: CATEGORY.DATA,
visConfig: {
defaults: {
addTooltip: true,
addLegend: false,
isDisplayWarning: false,
type: 'gauge',
gauge: {
verticalSplit: false,
autoExtend: false,
percentageMode: true,
gaugeType: 'Arc',
gaugeStyle: 'Full',
backStyle: 'Full',
orientation: 'vertical',
useRanges: false,
colorSchema: 'Green to Red',
gaugeColorMode: 'None',
colorsRange: [
{ from: 0, to: 10000 }
],
invertColors: false,
labels: {
show: true,
color: 'black'
},
scale: {
show: false,
labels: false,
color: '#333',
width: 2
},
type: 'meter',
style: {
bgFill: '#000',
bgColor: false,
labelColor: false,
subText: '',
fontSize: 60,
}
}
},
},
|
gaugeTypes: ['Arc', 'Circle'],
gaugeColorMode: ['None', 'Labels', 'Background'],
scales: ['linear', 'log', 'square root'],
colorSchemas: Object.keys(vislibColorMaps),
},
optionsTemplate: gaugeTemplate,
schemas: new Schemas([
{
group: 'metrics',
name: 'metric',
title: 'Metric',
min: 1,
aggFilter: [
'!std_dev', '!geo_centroid', '!percentiles', '!percentile_ranks',
'!derivative', '!serial_diff', '!moving_avg', '!cumulative_sum', '!geo_bounds'],
defaults: [
{ schema: 'metric', type: 'count' }
]
},
{
group: 'buckets',
name: 'group',
title: 'Split Group',
min: 0,
max: 1,
aggFilter: ['!geohash_grid', '!filter']
}
])
}
});
}
|
editorConfig: {
collections: {
|
InvoicesList.js
|
import React from 'react'
import {Button, Panel} from 'react-bootstrap'
import {ItemsList} from 'components/common'
function InvoiceListItem ({id, customer, discount, total}) {
return <Panel
bsStyle='info'
header={`Invoice ${id}`}
footer={`Total price is $${total}`}
>
<p>Customer: {customer && customer.name}</p> {/*TODO: Add loading!*/}
<p>Discount is {discount}%</p>
</Panel>
}
type Props = {
data: Array
};
export default class
|
extends React.Component {
props: Props;
render () {
const {data} = this.props
return (
<ItemsList data={data} item={InvoiceListItem} />
)
}
}
|
InvoicesList
|
eval_attribution_attack.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import numpy as np
import shutil
import json
import math
import os
import sys
import time
import tensorflow as tf
import gtsrb_input
from model import Model
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# Global constants
with open('config.json') as config_file:
config = json.load(config_file)
num_eval_examples = config['num_eval_examples']
epsilon = config['epsilon']
random_seed = config['np_random_seed']
model_dir = config['model_dir']
num_IG_steps = config['num_IG_steps']
k_top = config['k_top']
eval_k_top = config['eval_k_top']
saliency_type = config['saliency_type']
attribution_attack_method = config['attribution_attack_method']
attribution_attack_measure = config['attribution_attack_measure']
attribution_attack_step_size = config['attribution_attack_step_size']
attribution_attack_steps = config['attribution_attack_steps']
attribution_attack_times = config['attribution_attack_times']
data_path = config['data_path']
if saliency_type == 'ig':
from ig_attack import IntegratedGradientsAttack as SaliencyAttack
elif saliency_type == 'simple_gradient':
from simple_gradient_attack import SimpleGradientAttack as SaliencyAttack
else:
assert False, ('Unknown saliency type.')
np.random.seed(random_seed)
# Set upd the data, hyperparameters, and the model
gtsrb = gtsrb_input.GTSRBData(data_path)
reference_image = np.zeros((32,32,3))
model = Model(mode='eval', create_saliency_op=saliency_type)
saver = tf.train.Saver()
global_step = tf.contrib.framework.get_or_create_global_step()
checkpoint = tf.train.latest_checkpoint(model_dir)
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with tf.Session(config = tf_config) as sess:
# Restore the checkpoint
saver.restore(sess, checkpoint)
test_images = gtsrb.eval_data.xs
test_labels = gtsrb.eval_data.ys
min_intersections = []
min_spearmans = []
min_kendalls = []
correct_cnt = 0
for i in range(num_eval_examples):
test_image = test_images[i]
original_label = test_labels[i]
module = SaliencyAttack(sess = sess, test_image = test_image, original_label = original_label, NET = model,
attack_method = attribution_attack_method, epsilon = epsilon,
|
attack_iters = attribution_attack_steps,
attack_times = attribution_attack_times,
alpha = attribution_attack_step_size, attack_measure = attribution_attack_measure,
reference_image = reference_image, same_label = True)
if module.status == 1:
correct_cnt += 1
intersections, spearmans, kendalls = module.iterative_attack()
idx = np.argmin(kendalls)
min_intersections.append(intersections[idx])
min_spearmans.append(spearmans[idx])
min_kendalls.append(kendalls[idx])
res_str = '{} {} '.format(i, 1)
for k in range(attribution_attack_times):
res_str += '{:.6f} {:.6f} {:.6f} '.format(intersections[k], spearmans[k], kendalls[k])
print('progress: {}/{}, {}'.format(i + 1, num_eval_examples, res_str))
else:
res_str = '{} {} '.format(i, 0)
for k in range(attribution_attack_times):
res_str += '{:.6f} {:.6f} {:.6f} '.format(0, 0, 0)
print('progress: {}/{}, prediction incorrect!'.format(i + 1, num_eval_examples))
avg_intersection = np.mean(min_intersections)
avg_spearman = np.mean(min_spearmans)
avg_kendall = np.mean(min_kendalls)
print('process {} examples'.format(num_eval_examples))
print('accuracy {}'.format(float(correct_cnt)/num_eval_examples))
print('Average top-k intersection: {:.4f}'.format(avg_intersection))
print('Average spearman rank correlation: {:.4f}'.format(avg_spearman))
print('Average kendall rank correlation: {:.4f}'.format(avg_kendall))
|
k_top = k_top, eval_k_top = eval_k_top, num_steps = num_IG_steps,
|
kronecker.py
|
import networkx as nx
import numpy as np
import torch
from torch.utils.data import Dataset
from dsloader.util import kron_graph, random_binary, make_fractional
class KroneckerDataset (Dataset):
def
|
(self, kron_iter=4, seed_size=4, fixed_seed=None, num_graphs=1, perms_per_graph=256, progress_bar=False):
self.kron_iter = kron_iter
self.seed_size = seed_size
self.num_nodes = seed_size ** (kron_iter + 1)
self.seeds = []
self.matrices = []
num_iter = range(num_graphs)
if progress_bar:
from tqdm import tqdm
num_iter = tqdm(num_iter)
for i in num_iter:
seed = random_binary(seed_size, use_sparsity=False)
self.seeds.append(seed)
if fixed_seed is not None:
k_g = kron_graph(fixed_seed, n=kron_iter).astype(np.float)
else:
k_g = kron_graph(seed, n=kron_iter).astype(np.float)
for j in range(perms_per_graph):
self.matrices.append(make_fractional(k_g, inplace=False))
def __len__(self):
return len(self.matrices)
def __getitem__(self, idx):
return torch.tensor(self.matrices[idx])
|
__init__
|
interface.go
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package rpcserviceiface provides an interface to enable mocking the RPC Service service client
// for testing your code.
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters.
package rpcserviceiface
import (
"github.com/Beeketing/aws-sdk-go/aws"
"github.com/Beeketing/aws-sdk-go/aws/request"
"github.com/Beeketing/aws-sdk-go/private/model/api/codegentest/service/rpcservice"
)
// RPCServiceAPI provides an interface to enable mocking the
// rpcservice.RPCService service client's API operation,
// paginators, and waiters. This make unit testing your code that calls out
// to the SDK's service client's calls easier.
//
// The best way to use this interface is so the SDK's service client's calls
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // RPC Service.
// func myFunc(svc rpcserviceiface.RPCServiceAPI) bool {
// // Make svc.EmptyStream request
// }
//
// func main() {
// sess := session.New()
// svc := rpcservice.New(sess)
//
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockRPCServiceClient struct {
// rpcserviceiface.RPCServiceAPI
// }
// func (m *mockRPCServiceClient) EmptyStream(input *rpcservice.EmptyStreamInput) (*rpcservice.EmptyStreamOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockRPCServiceClient{}
//
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,
// and waiters. Its suggested to use the pattern above for testing, or using
// tooling to generate mocks to satisfy the interfaces.
|
EmptyStreamRequest(*rpcservice.EmptyStreamInput) (*request.Request, *rpcservice.EmptyStreamOutput)
GetEventStream(*rpcservice.GetEventStreamInput) (*rpcservice.GetEventStreamOutput, error)
GetEventStreamWithContext(aws.Context, *rpcservice.GetEventStreamInput, ...request.Option) (*rpcservice.GetEventStreamOutput, error)
GetEventStreamRequest(*rpcservice.GetEventStreamInput) (*request.Request, *rpcservice.GetEventStreamOutput)
}
var _ RPCServiceAPI = (*rpcservice.RPCService)(nil)
|
type RPCServiceAPI interface {
EmptyStream(*rpcservice.EmptyStreamInput) (*rpcservice.EmptyStreamOutput, error)
EmptyStreamWithContext(aws.Context, *rpcservice.EmptyStreamInput, ...request.Option) (*rpcservice.EmptyStreamOutput, error)
|
searchengine.py
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.8
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_searchengine', [dirname(__file__)])
except ImportError:
import _searchengine
return _searchengine
if fp is not None:
try:
_mod = imp.load_module('_searchengine', fp, pathname, description)
finally:
fp.close()
return _mod
_searchengine = swig_import_helper()
del swig_import_helper
else:
import _searchengine
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
|
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class TSearchIndex(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, TSearchIndex, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, TSearchIndex, name)
__repr__ = _swig_repr
def __init__(self, indexLocation):
this = _searchengine.new_TSearchIndex(indexLocation)
try:
self.this.append(this)
except Exception:
self.this = this
__swig_destroy__ = _searchengine.delete_TSearchIndex
__del__ = lambda self: None
def ExecuteQuery(self, keysStr, firstResult2Return, results2Return, objectsStr):
return _searchengine.TSearchIndex_ExecuteQuery(self, keysStr, firstResult2Return, results2Return, objectsStr)
TSearchIndex_swigregister = _searchengine.TSearchIndex_swigregister
TSearchIndex_swigregister(TSearchIndex)
cvar = _searchengine.cvar
OBJ_BITS = cvar.OBJ_BITS
SEGMENT_POS_BITS = cvar.SEGMENT_POS_BITS
WEIGHT_BITS = cvar.WEIGHT_BITS
NON_SEGMENT_BITS = cvar.NON_SEGMENT_BITS
MAX_SEGMENTS_PER_OBJECT = cvar.MAX_SEGMENTS_PER_OBJECT
MAX_WORDS4QUERY = cvar.MAX_WORDS4QUERY
MAX_KEYS2CONSIDER = cvar.MAX_KEYS2CONSIDER
CRUDE_FILTER_TRIM_PROPORTION = cvar.CRUDE_FILTER_TRIM_PROPORTION
MAX_OCCURENCES2RETURN = cvar.MAX_OCCURENCES2RETURN
# This file is compatible with both classic and new-style classes.
|
return _swig_getattr_nondynamic(self, class_type, name, 0)
|
create_experiment_sample.py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
from google.cloud import aiplatform
# [START aiplatform_sdk_create_experiment_sample]
def create_experiment_sample(
experiment_name: str,
experiment_description: str,
experiment_tensorboard: Optional[Union[str, aiplatform.Tensorboard]],
project: str,
location: str,
):
|
# [END aiplatform_sdk_create_experiment_sample]
|
aiplatform.init(
experiment_name=experiment_name,
experiment_description=experiment_description,
experiment_tensorboard=experiment_tensorboard,
project=project,
location=location,
)
|
genclasses.go
|
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bind
import (
"fmt"
"path"
"reflect"
"strings"
"unicode"
"unicode/utf8"
"github.com/danbrough/mobile/internal/importers"
"github.com/danbrough/mobile/internal/importers/java"
)
type (
// ClassGen generates Go and C stubs for Java classes so import statements
// on the form
//
//
// import "Java/classpath/to/Class"
//
// will work.
ClassGen struct {
*Printer
// JavaPkg is the Java package prefix for the generated classes. The prefix is prepended to the Go
// package name to create the full Java package name.
JavaPkg string
imported map[string]struct{}
// The list of imported Java classes
classes []*java.Class
|
// The list of Go package paths with Java interfaces inside
jpkgs []string
// For each Go package path, the list of Java classes.
typePkgs map[string][]*java.Class
// For each Go package path, the Java class with static functions
// or constants.
clsPkgs map[string]*java.Class
// goClsMap is the map of Java class names to Go type names, qualified with package name. Go types
// that implement Java classes need Super methods and Unwrap methods.
goClsMap map[string]string
// goClsImports is the list of imports of user packages that contains the Go types implementing Java
// classes.
goClsImports []string
}
)
func (g *ClassGen) isSupported(t *java.Type) bool {
switch t.Kind {
case java.Array:
// TODO: Support all array types
return t.Elem.Kind == java.Byte
default:
return true
}
}
func (g *ClassGen) isFuncSetSupported(fs *java.FuncSet) bool {
for _, f := range fs.Funcs {
if g.isFuncSupported(f) {
return true
}
}
return false
}
func (g *ClassGen) isFuncSupported(f *java.Func) bool {
for _, a := range f.Params {
if !g.isSupported(a) {
return false
}
}
if f.Ret != nil {
return g.isSupported(f.Ret)
}
return true
}
func (g *ClassGen) goType(t *java.Type, local bool) string {
if t == nil {
// interface{} is used for parameters types for overloaded methods
// where no common ancestor type exists.
return "interface{}"
}
switch t.Kind {
case java.Int:
return "int32"
case java.Boolean:
return "bool"
case java.Short:
return "int16"
case java.Char:
return "uint16"
case java.Byte:
return "byte"
case java.Long:
return "int64"
case java.Float:
return "float32"
case java.Double:
return "float64"
case java.String:
return "string"
case java.Array:
return "[]" + g.goType(t.Elem, local)
case java.Object:
name := goClsName(t.Class)
if !local {
name = "Java." + name
}
return name
default:
panic("invalid kind")
}
}
// Init initializes the class wrapper generator. Classes is the
// list of classes to wrap, goClasses is the list of Java classes
// implemented in Go.
func (g *ClassGen) Init(classes []*java.Class, goClasses []importers.Struct) {
g.goClsMap = make(map[string]string)
impMap := make(map[string]struct{})
for _, s := range goClasses {
n := s.Pkg + "." + s.Name
jn := n
if g.JavaPkg != "" {
jn = g.JavaPkg + "." + jn
}
g.goClsMap[jn] = n
if _, exists := impMap[s.PkgPath]; !exists {
impMap[s.PkgPath] = struct{}{}
g.goClsImports = append(g.goClsImports, s.PkgPath)
}
}
g.classes = classes
g.imported = make(map[string]struct{})
g.typePkgs = make(map[string][]*java.Class)
g.clsPkgs = make(map[string]*java.Class)
pkgSet := make(map[string]struct{})
for _, cls := range classes {
g.imported[cls.Name] = struct{}{}
clsPkg := strings.Replace(cls.Name, ".", "/", -1)
g.clsPkgs[clsPkg] = cls
typePkg := path.Dir(clsPkg)
g.typePkgs[typePkg] = append(g.typePkgs[typePkg], cls)
if _, exists := pkgSet[clsPkg]; !exists {
pkgSet[clsPkg] = struct{}{}
g.jpkgs = append(g.jpkgs, clsPkg)
}
if _, exists := pkgSet[typePkg]; !exists {
pkgSet[typePkg] = struct{}{}
g.jpkgs = append(g.jpkgs, typePkg)
}
}
}
// Packages return the list of Go packages to be generated.
func (g *ClassGen) Packages() []string {
return g.jpkgs
}
func (g *ClassGen) GenPackage(idx int) {
jpkg := g.jpkgs[idx]
g.Printf(gobindPreamble)
g.Printf("package %s\n\n", path.Base(jpkg))
g.Printf("import \"Java\"\n\n")
g.Printf("const _ = Java.Dummy\n\n")
for _, cls := range g.typePkgs[jpkg] {
g.Printf("type %s Java.%s\n", cls.PkgName, goClsName(cls.Name))
}
if cls, ok := g.clsPkgs[jpkg]; ok {
g.Printf("const (\n")
g.Indent()
// Constants
for _, v := range cls.Vars {
if g.isSupported(v.Type) && v.Constant() {
g.Printf("%s = %s\n", initialUpper(v.Name), v.Val)
}
}
g.Outdent()
g.Printf(")\n\n")
g.Printf("var (\n")
g.Indent()
// Functions
loop:
for _, fs := range cls.Funcs {
for _, f := range fs.Funcs {
if f.Public && g.isFuncSupported(f) {
g.Printf("%s func", fs.GoName)
g.genFuncDecl(false, fs)
g.Printf("\n")
continue loop
}
}
}
g.Printf("// Cast takes a proxy for a Java object and converts it to a %s proxy.\n", cls.Name)
g.Printf("// Cast panics if the argument is not a proxy or if the underlying object does\n")
g.Printf("// not extend or implement %s.\n", cls.Name)
g.Printf("Cast func(v interface{}) Java.%s\n", goClsName(cls.Name))
g.Outdent()
g.Printf(")\n\n")
}
}
func (g *ClassGen) GenGo() {
g.Printf(classesGoHeader)
for _, cls := range g.classes {
pkgName := strings.Replace(cls.Name, ".", "/", -1)
g.Printf("import %q\n", "Java/"+pkgName)
}
for _, imp := range g.goClsImports {
g.Printf("import %q\n", imp)
}
if len(g.classes) > 0 {
g.Printf("import \"unsafe\"\n\n")
g.Printf("import \"reflect\"\n\n")
g.Printf("import \"fmt\"\n\n")
}
g.Printf("type proxy interface { Bind_proxy_refnum__() int32 }\n\n")
g.Printf("// Suppress unused package error\n\n")
g.Printf("var _ = _seq.FromRefNum\n")
g.Printf("const _ = Java.Dummy\n\n")
g.Printf("//export initClasses\n")
g.Printf("func initClasses() {\n")
g.Indent()
g.Printf("C.init_proxies()\n")
for _, cls := range g.classes {
g.Printf("init_%s()\n", cls.JNIName)
}
g.Outdent()
g.Printf("}\n\n")
for _, cls := range g.classes {
g.genGo(cls)
}
}
func (g *ClassGen) GenH() {
g.Printf(classesHHeader)
for _, tn := range []string{"jint", "jboolean", "jshort", "jchar", "jbyte", "jlong", "jfloat", "jdouble", "nstring", "nbyteslice"} {
g.Printf("typedef struct ret_%s {\n", tn)
g.Printf(" %s res;\n", tn)
g.Printf(" jint exc;\n")
g.Printf("} ret_%s;\n", tn)
}
g.Printf("\n")
for _, cls := range g.classes {
for _, fs := range cls.AllMethods {
for _, f := range fs.Funcs {
if !g.isFuncSupported(f) {
continue
}
g.Printf("extern ")
g.genCMethodDecl("cproxy", cls.JNIName, f)
g.Printf(";\n")
if _, ok := g.goClsMap[cls.Name]; ok {
g.Printf("extern ")
g.genCMethodDecl("csuper", cls.JNIName, f)
g.Printf(";\n")
}
}
}
}
for _, cls := range g.classes {
g.genH(cls)
}
}
func (g *ClassGen) GenC() {
g.Printf(classesCHeader)
for _, cls := range g.classes {
g.Printf("static jclass class_%s;\n", cls.JNIName)
if _, ok := g.goClsMap[cls.Name]; ok {
g.Printf("static jclass sclass_%s;\n", cls.JNIName)
}
for _, fs := range cls.Funcs {
for _, f := range fs.Funcs {
if !f.Public || !g.isFuncSupported(f) {
continue
}
g.Printf("static jmethodID m_s_%s_%s;\n", cls.JNIName, f.JNIName)
}
}
for _, fs := range cls.AllMethods {
for _, f := range fs.Funcs {
if g.isFuncSupported(f) {
g.Printf("static jmethodID m_%s_%s;\n", cls.JNIName, f.JNIName)
if _, ok := g.goClsMap[cls.Name]; ok {
g.Printf("static jmethodID sm_%s_%s;\n", cls.JNIName, f.JNIName)
}
}
}
}
g.genC(cls)
}
g.Printf("\n")
g.Printf("void init_proxies() {\n")
g.Indent()
g.Printf("JNIEnv *env = go_seq_push_local_frame(%d);\n", len(g.classes))
g.Printf("jclass clazz;\n")
for _, cls := range g.classes {
g.Printf("clazz = go_seq_find_class(%q);\n", strings.Replace(cls.FindName, ".", "/", -1))
g.Printf("if (clazz != NULL) {\n")
g.Indent()
g.Printf("class_%s = (*env)->NewGlobalRef(env, clazz);\n", cls.JNIName)
if _, ok := g.goClsMap[cls.Name]; ok {
g.Printf("sclass_%s = (*env)->GetSuperclass(env, clazz);\n", cls.JNIName)
g.Printf("sclass_%s = (*env)->NewGlobalRef(env, sclass_%s);\n", cls.JNIName, cls.JNIName)
}
for _, fs := range cls.Funcs {
for _, f := range fs.Funcs {
if !f.Public || !g.isFuncSupported(f) {
continue
}
g.Printf("m_s_%s_%s = ", cls.JNIName, f.JNIName)
if f.Constructor {
g.Printf("go_seq_get_method_id(clazz, \"<init>\", %q);\n", f.Desc)
} else {
g.Printf("go_seq_get_static_method_id(clazz, %q, %q);\n", f.Name, f.Desc)
}
}
}
for _, fs := range cls.AllMethods {
for _, f := range fs.Funcs {
if g.isFuncSupported(f) {
g.Printf("m_%s_%s = go_seq_get_method_id(clazz, %q, %q);\n", cls.JNIName, f.JNIName, f.Name, f.Desc)
if _, ok := g.goClsMap[cls.Name]; ok {
g.Printf("sm_%s_%s = go_seq_get_method_id(sclass_%s, %q, %q);\n", cls.JNIName, f.JNIName, cls.JNIName, f.Name, f.Desc)
}
}
}
}
g.Outdent()
g.Printf("}\n")
}
g.Printf("go_seq_pop_local_frame(env);\n")
g.Outdent()
g.Printf("}\n\n")
for _, cls := range g.classes {
for _, fs := range cls.AllMethods {
for _, f := range fs.Funcs {
if !g.isFuncSupported(f) {
continue
}
g.genCMethodDecl("cproxy", cls.JNIName, f)
g.genCMethodBody(cls, f, false)
if _, ok := g.goClsMap[cls.Name]; ok {
g.genCMethodDecl("csuper", cls.JNIName, f)
g.genCMethodBody(cls, f, true)
}
}
}
}
}
func (g *ClassGen) GenInterfaces() {
g.Printf(classesPkgHeader)
for _, cls := range g.classes {
g.genInterface(cls)
}
}
func (g *ClassGen) genCMethodBody(cls *java.Class, f *java.Func, virtual bool) {
g.Printf(" {\n")
g.Indent()
// Add 1 for the 'this' argument
g.Printf("JNIEnv *env = go_seq_push_local_frame(%d);\n", len(f.Params)+1)
g.Printf("// Must be a Java object\n")
g.Printf("jobject _this = go_seq_from_refnum(env, this, NULL, NULL);\n")
for i, a := range f.Params {
g.genCToJava(fmt.Sprintf("a%d", i), a)
}
if f.Ret != nil {
g.Printf("%s res = ", f.Ret.JNIType())
}
g.Printf("(*env)->Call")
if virtual {
g.Printf("Nonvirtual")
}
if f.Ret != nil {
g.Printf(f.Ret.JNICallType())
} else {
g.Printf("Void")
}
g.Printf("Method(env, _this, ")
if virtual {
g.Printf("sclass_%s, sm_%s_%s", cls.JNIName, cls.JNIName, f.JNIName)
} else {
g.Printf("m_%s_%s", cls.JNIName, f.JNIName)
}
for i := range f.Params {
g.Printf(", _a%d", i)
}
g.Printf(");\n")
g.Printf("jobject _exc = go_seq_get_exception(env);\n")
g.Printf("int32_t _exc_ref = go_seq_to_refnum(env, _exc);\n")
if f.Ret != nil {
g.genCRetClear("res", f.Ret, "_exc")
g.genJavaToC("res", f.Ret)
}
g.Printf("go_seq_pop_local_frame(env);\n")
if f.Ret != nil {
g.Printf("ret_%s __res = {_res, _exc_ref};\n", f.Ret.CType())
g.Printf("return __res;\n")
} else {
g.Printf("return _exc_ref;\n")
}
g.Outdent()
g.Printf("}\n\n")
}
func initialUpper(s string) string {
if s == "" {
return ""
}
r, n := utf8.DecodeRuneInString(s)
return string(unicode.ToUpper(r)) + s[n:]
}
func (g *ClassGen) genFuncDecl(local bool, fs *java.FuncSet) {
g.Printf("(")
for i, a := range fs.Params {
if i > 0 {
g.Printf(", ")
}
g.Printf("a%d ", i)
if i == len(fs.Params)-1 && fs.Variadic {
g.Printf("...")
}
g.Printf(g.goType(a, local))
}
g.Printf(")")
if fs.Throws {
if fs.HasRet {
g.Printf(" (%s, error)", g.goType(fs.Ret, local))
} else {
g.Printf(" error")
}
} else if fs.HasRet {
g.Printf(" %s", g.goType(fs.Ret, local))
}
}
func (g *ClassGen) genC(cls *java.Class) {
for _, fs := range cls.Funcs {
for _, f := range fs.Funcs {
if !f.Public || !g.isFuncSupported(f) {
continue
}
g.genCFuncDecl(cls.JNIName, f)
g.Printf(" {\n")
g.Indent()
g.Printf("JNIEnv *env = go_seq_push_local_frame(%d);\n", len(f.Params))
for i, a := range f.Params {
g.genCToJava(fmt.Sprintf("a%d", i), a)
}
if f.Constructor {
g.Printf("jobject res = (*env)->NewObject(env")
} else if f.Ret != nil {
g.Printf("%s res = (*env)->CallStatic%sMethod(env", f.Ret.JNIType(), f.Ret.JNICallType())
} else {
g.Printf("(*env)->CallStaticVoidMethod(env")
}
g.Printf(", class_%s, m_s_%s_%s", cls.JNIName, cls.JNIName, f.JNIName)
for i := range f.Params {
g.Printf(", _a%d", i)
}
g.Printf(");\n")
g.Printf("jobject _exc = go_seq_get_exception(env);\n")
g.Printf("int32_t _exc_ref = go_seq_to_refnum(env, _exc);\n")
if f.Ret != nil {
g.genCRetClear("res", f.Ret, "_exc")
g.genJavaToC("res", f.Ret)
}
g.Printf("go_seq_pop_local_frame(env);\n")
if f.Ret != nil {
g.Printf("ret_%s __res = {_res, _exc_ref};\n", f.Ret.CType())
g.Printf("return __res;\n")
} else {
g.Printf("return _exc_ref;\n")
}
g.Outdent()
g.Printf("}\n\n")
}
}
}
func (g *ClassGen) genH(cls *java.Class) {
for _, fs := range cls.Funcs {
for _, f := range fs.Funcs {
if !f.Public || !g.isFuncSupported(f) {
continue
}
g.Printf("extern ")
g.genCFuncDecl(cls.JNIName, f)
g.Printf(";\n")
}
}
}
func (g *ClassGen) genCMethodDecl(prefix, jniName string, f *java.Func) {
if f.Ret != nil {
g.Printf("ret_%s", f.Ret.CType())
} else {
// Return only the exception, if any
g.Printf("jint")
}
g.Printf(" %s_%s_%s(jint this", prefix, jniName, f.JNIName)
for i, a := range f.Params {
g.Printf(", %s a%d", a.CType(), i)
}
g.Printf(")")
}
func (g *ClassGen) genCFuncDecl(jniName string, f *java.Func) {
if f.Ret != nil {
g.Printf("ret_%s", f.Ret.CType())
} else {
// Return only the exception, if any
g.Printf("jint")
}
g.Printf(" cproxy_s_%s_%s(", jniName, f.JNIName)
for i, a := range f.Params {
if i > 0 {
g.Printf(", ")
}
g.Printf("%s a%d", a.CType(), i)
}
g.Printf(")")
}
func (g *ClassGen) genGo(cls *java.Class) {
g.Printf("var class_%s C.jclass\n\n", cls.JNIName)
g.Printf("func init_%s() {\n", cls.JNIName)
g.Indent()
g.Printf("cls := C.CString(%q)\n", strings.Replace(cls.FindName, ".", "/", -1))
g.Printf("clazz := C.go_seq_find_class(cls)\n")
g.Printf("C.free(unsafe.Pointer(cls))\n")
// Before Go 1.11 clazz was a pointer value, an uintptr after.
g.Printf("if uintptr(clazz) == 0 {\n")
g.Printf(" return\n")
g.Printf("}\n")
g.Printf("class_%s = clazz\n", cls.JNIName)
for _, fs := range cls.Funcs {
var supported bool
for _, f := range fs.Funcs {
if f.Public && g.isFuncSupported(f) {
supported = true
break
}
}
if !supported {
continue
}
g.Printf("%s.%s = func", cls.PkgName, fs.GoName)
g.genFuncDecl(false, fs)
g.genFuncBody(cls, fs, "cproxy_s", true)
}
g.Printf("%s.Cast = func(v interface{}) Java.%s {\n", cls.PkgName, goClsName(cls.Name))
g.Indent()
g.Printf("t := reflect.TypeOf((*proxy_class_%s)(nil))\n", cls.JNIName)
g.Printf("cv := reflect.ValueOf(v).Convert(t).Interface().(*proxy_class_%s)\n", cls.JNIName)
g.Printf("ref := C.jint(_seq.ToRefNum(cv))\n")
g.Printf("if C.go_seq_isinstanceof(ref, class_%s) != 1 {\n", cls.JNIName)
g.Printf(" panic(fmt.Errorf(\"%%T is not an instance of %%s\", v, %q))\n", cls.Name)
g.Printf("}\n")
g.Printf("return cv\n")
g.Outdent()
g.Printf("}\n")
g.Outdent()
g.Printf("}\n\n")
g.Printf("type proxy_class_%s _seq.Ref\n\n", cls.JNIName)
g.Printf("func (p *proxy_class_%s) Bind_proxy_refnum__() int32 {\n", cls.JNIName)
g.Indent()
g.Printf("return (*_seq.Ref)(p).Bind_IncNum()\n")
g.Outdent()
g.Printf("}\n\n")
for _, fs := range cls.AllMethods {
if !g.isFuncSetSupported(fs) {
continue
}
g.Printf("func (p *proxy_class_%s) %s", cls.JNIName, fs.GoName)
g.genFuncDecl(false, fs)
g.genFuncBody(cls, fs, "cproxy", false)
}
if cls.Throwable {
g.Printf("func (p *proxy_class_%s) Error() string {\n", cls.JNIName)
g.Printf(" return p.ToString()\n")
g.Printf("}\n")
}
if goName, ok := g.goClsMap[cls.Name]; ok {
g.Printf("func (p *proxy_class_%s) Super() Java.%s {\n", cls.JNIName, goClsName(cls.Name))
g.Printf(" return &super_%s{p}\n", cls.JNIName)
g.Printf("}\n\n")
g.Printf("type super_%s struct {*proxy_class_%[1]s}\n\n", cls.JNIName)
g.Printf("func (p *proxy_class_%s) Unwrap() interface{} {\n", cls.JNIName)
g.Indent()
g.Printf("goRefnum := C.go_seq_unwrap(C.jint(p.Bind_proxy_refnum__()))\n")
g.Printf("return _seq.FromRefNum(int32(goRefnum)).Get().(*%s)\n", goName)
g.Outdent()
g.Printf("}\n\n")
for _, fs := range cls.AllMethods {
if !g.isFuncSetSupported(fs) {
continue
}
g.Printf("func (p *super_%s) %s", cls.JNIName, fs.GoName)
g.genFuncDecl(false, fs)
g.genFuncBody(cls, fs, "csuper", false)
}
}
}
// genFuncBody generated a Go function body for a FuncSet. It resolves overloading dynamically,
// by inspecting the number of arguments (if the FuncSet contains varying parameter counts),
// and their types.
func (g *ClassGen) genFuncBody(cls *java.Class, fs *java.FuncSet, prefix string, static bool) {
maxp := len(fs.Funcs[0].Params)
minp := maxp
// sort the function variants into argument sizes.
buckets := make(map[int][]*java.Func)
numF := 0
for _, f := range fs.Funcs {
if !g.isFuncSupported(f) {
continue
}
numF++
n := len(f.Params)
if n < minp {
minp = n
} else if n > maxp {
maxp = n
}
buckets[n] = append(buckets[n], f)
}
g.Printf(" {\n")
g.Indent()
if len(buckets) != 1 {
// Switch over the number of arguments.
g.Printf("switch %d + len(a%d) {\n", minp, minp)
}
for i := minp; i <= maxp; i++ {
funcs := buckets[i]
if len(funcs) == 0 {
continue
}
if len(buckets) != 1 {
g.Printf("case %d:\n", i)
g.Indent()
}
for _, f := range funcs {
if len(funcs) > 1 {
g.Printf("{\n")
g.Indent()
}
var argNames []string
var preds []string
for i, a := range f.Params {
var ct *java.Type
var argName string
if i >= minp {
argName = fmt.Sprintf("a%d[%d]", minp, i-minp)
ct = fs.Params[minp]
} else {
argName = fmt.Sprintf("a%d", i)
ct = fs.Params[i]
}
if !reflect.DeepEqual(ct, a) {
g.Printf("_a%d, ok%d := %s.(%s)\n", i, i, argName, g.goType(a, false))
argName = fmt.Sprintf("_a%d", i)
preds = append(preds, fmt.Sprintf("ok%d", i))
}
argNames = append(argNames, argName)
}
if len(preds) > 0 {
g.Printf("if %s {\n", strings.Join(preds, " && "))
g.Indent()
}
for i, a := range f.Params {
g.genWrite(fmt.Sprintf("__a%d", i), argNames[i], a, modeTransient)
}
g.Printf("res := C.%s_%s_%s(", prefix, cls.JNIName, f.JNIName)
if !static {
g.Printf("C.jint(p.Bind_proxy_refnum__())")
}
for i := range f.Params {
if !static || i > 0 {
g.Printf(", ")
}
g.Printf("__a%d", i)
}
g.Printf(")\n")
g.genFuncRet(fs, f, numF > 1)
if len(preds) > 0 {
g.Outdent()
g.Printf("}\n")
}
if len(funcs) > 1 {
g.Outdent()
g.Printf("}\n")
}
}
if len(buckets) != 1 {
g.Outdent()
}
}
if len(buckets) != 1 {
g.Printf("}\n")
}
if numF > 1 {
g.Printf("panic(\"no overloaded method found for %s.%s that matched the arguments\")\n", cls.Name, fs.Name)
}
g.Outdent()
g.Printf("}\n\n")
}
func (g *ClassGen) genFuncRet(fs *java.FuncSet, f *java.Func, mustReturn bool) {
if f.Ret != nil {
g.genRead("_res", "res.res", f.Ret, modeRetained)
g.genRefRead("_exc", "res.exc", "error", "proxy_error", true)
} else {
g.genRefRead("_exc", "res", "error", "proxy_error", true)
}
if !fs.Throws {
g.Printf("if (_exc != nil) { panic(_exc) }\n")
if fs.HasRet {
if f.Ret != nil {
g.Printf("return _res\n")
} else {
// The variant doesn't return a value, but the common
// signature does. Use nil as a placeholder return value.
g.Printf("return nil\n")
}
} else if mustReturn {
// If there are overloaded variants, return here to avoid the fallback
// panic generated in genFuncBody.
g.Printf("return\n")
}
} else {
if fs.HasRet {
if f.Ret != nil {
g.Printf("return _res, _exc\n")
} else {
// As above, use a nil placeholder return value.
g.Printf("return nil, _exc\n")
}
} else {
g.Printf("return _exc\n")
}
}
}
func (g *ClassGen) genRead(to, from string, t *java.Type, mode varMode) {
switch t.Kind {
case java.Int, java.Short, java.Char, java.Byte, java.Long, java.Float, java.Double:
g.Printf("%s := %s(%s)\n", to, g.goType(t, false), from)
case java.Boolean:
g.Printf("%s := %s != C.JNI_FALSE\n", to, from)
case java.String:
g.Printf("%s := decodeString(%s)\n", to, from)
case java.Array:
if t.Elem.Kind != java.Byte {
panic("unsupported array type")
}
g.Printf("%s := toSlice(%s, %v)\n", to, from, mode == modeRetained)
case java.Object:
_, hasProxy := g.imported[t.Class]
g.genRefRead(to, from, g.goType(t, false), "proxy_class_"+flattenName(t.Class), hasProxy)
default:
panic("invalid kind")
}
}
func (g *ClassGen) genRefRead(to, from string, intfName, proxyName string, hasProxy bool) {
g.Printf("var %s %s\n", to, intfName)
g.Printf("%s_ref := _seq.FromRefNum(int32(%s))\n", to, from)
g.Printf("if %s_ref != nil {\n", to)
g.Printf(" if %s < 0 { // go object\n", from)
g.Printf(" %s = %s_ref.Get().(%s)\n", to, to, intfName)
g.Printf(" } else { // foreign object\n")
if hasProxy {
g.Printf(" %s = (*%s)(%s_ref)\n", to, proxyName, to)
} else {
g.Printf(" %s = %s_ref\n", to, to)
}
g.Printf(" }\n")
g.Printf("}\n")
}
func (g *ClassGen) genWrite(dst, v string, t *java.Type, mode varMode) {
switch t.Kind {
case java.Int, java.Short, java.Char, java.Byte, java.Long, java.Float, java.Double:
g.Printf("%s := C.%s(%s)\n", dst, t.CType(), v)
case java.Boolean:
g.Printf("%s := C.jboolean(C.JNI_FALSE)\n", dst)
g.Printf("if %s {\n", v)
g.Printf(" %s = C.jboolean(C.JNI_TRUE)\n", dst)
g.Printf("}\n")
case java.String:
g.Printf("%s := encodeString(%s)\n", dst, v)
case java.Array:
if t.Elem.Kind != java.Byte {
panic("unsupported array type")
}
g.Printf("%s := fromSlice(%s, %v)\n", dst, v, mode == modeRetained)
case java.Object:
g.Printf("var %s C.jint = _seq.NullRefNum\n", dst)
g.Printf("if %s != nil {\n", v)
g.Printf(" %s = C.jint(_seq.ToRefNum(%s))\n", dst, v)
g.Printf("}\n")
default:
panic("invalid kind")
}
}
// genCRetClear clears the result value from a JNI call if an exception was
// raised.
func (g *ClassGen) genCRetClear(v string, t *java.Type, exc string) {
g.Printf("if (%s != NULL) {\n", exc)
g.Indent()
switch t.Kind {
case java.Int, java.Short, java.Char, java.Byte, java.Long, java.Float, java.Double, java.Boolean:
g.Printf("%s = 0;\n", v)
default:
// Assume a nullable type. It will break if we missed a type.
g.Printf("%s = NULL;\n", v)
}
g.Outdent()
g.Printf("}\n")
}
func (g *ClassGen) genJavaToC(v string, t *java.Type) {
switch t.Kind {
case java.Int, java.Short, java.Char, java.Byte, java.Long, java.Float, java.Double, java.Boolean:
g.Printf("%s _%s = %s;\n", t.JNIType(), v, v)
case java.String:
g.Printf("nstring _%s = go_seq_from_java_string(env, %s);\n", v, v)
case java.Array:
if t.Elem.Kind != java.Byte {
panic("unsupported array type")
}
g.Printf("nbyteslice _%s = go_seq_from_java_bytearray(env, %s, 1);\n", v, v)
case java.Object:
g.Printf("jint _%s = go_seq_to_refnum(env, %s);\n", v, v)
default:
panic("invalid kind")
}
}
func (g *ClassGen) genCToJava(v string, t *java.Type) {
switch t.Kind {
case java.Int, java.Short, java.Char, java.Byte, java.Long, java.Float, java.Double, java.Boolean:
g.Printf("%s _%s = %s;\n", t.JNIType(), v, v)
case java.String:
g.Printf("jstring _%s = go_seq_to_java_string(env, %s);\n", v, v)
case java.Array:
if t.Elem.Kind != java.Byte {
panic("unsupported array type")
}
g.Printf("jbyteArray _%s = go_seq_to_java_bytearray(env, %s, 0);\n", v, v)
case java.Object:
g.Printf("jobject _%s = go_seq_from_refnum(env, %s, NULL, NULL);\n", v, v)
default:
panic("invalid kind")
}
}
func goClsName(n string) string {
return initialUpper(strings.Replace(n, ".", "_", -1))
}
func (g *ClassGen) genInterface(cls *java.Class) {
g.Printf("type %s interface {\n", goClsName(cls.Name))
g.Indent()
// Methods
for _, fs := range cls.AllMethods {
if !g.isFuncSetSupported(fs) {
continue
}
g.Printf(fs.GoName)
g.genFuncDecl(true, fs)
g.Printf("\n")
}
if goName, ok := g.goClsMap[cls.Name]; ok {
g.Printf("Super() %s\n", goClsName(cls.Name))
g.Printf("// Unwrap returns the Go object this Java instance\n")
g.Printf("// is wrapping.\n")
g.Printf("// The return value is a %s, but the delclared type is\n", goName)
g.Printf("// interface{} to avoid import cycles.\n")
g.Printf("Unwrap() interface{}\n")
}
if cls.Throwable {
g.Printf("Error() string\n")
}
g.Outdent()
g.Printf("}\n\n")
}
// Flatten java class names. "java.package.Class$Inner" is converted to
// "java_package_Class_Inner"
func flattenName(n string) string {
return strings.Replace(strings.Replace(n, ".", "_", -1), "$", "_", -1)
}
var (
classesPkgHeader = gobindPreamble + `
package Java
// Used to silence this package not used errors
const Dummy = 0
`
classesCHeader = gobindPreamble + `
#include <jni.h>
#include "seq.h"
#include "classes.h"
`
classesHHeader = gobindPreamble + `
#include <jni.h>
#include "seq.h"
extern void init_proxies();
`
javaImplHeader = gobindPreamble
classesGoHeader = gobindPreamble + `
package main
/*
#include <stdlib.h> // for free()
#include <jni.h>
#include "seq.h"
#include "classes.h"
*/
import "C"
import (
"Java"
_seq "github.com/danbrough/mobile/bind/seq"
)
`
)
| |
automation.go
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20190101preview
import (
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// The security automation resource.
type Automation struct {
pulumi.CustomResourceState
// A collection of the actions which are triggered if all the configured rules evaluations, within at least one rule set, are true.
Actions pulumi.ArrayOutput `pulumi:"actions"`
// The security automation description.
Description pulumi.StringPtrOutput `pulumi:"description"`
// Entity tag is used for comparing two or more entities from the same requested resource.
Etag pulumi.StringPtrOutput `pulumi:"etag"`
// Indicates whether the security automation is enabled.
IsEnabled pulumi.BoolPtrOutput `pulumi:"isEnabled"`
// Kind of the resource
Kind pulumi.StringPtrOutput `pulumi:"kind"`
// Location where the resource is stored
Location pulumi.StringPtrOutput `pulumi:"location"`
// Resource name
Name pulumi.StringOutput `pulumi:"name"`
// A collection of scopes on which the security automations logic is applied. Supported scopes are the subscription itself or a resource group under that subscription. The automation will only apply on defined scopes.
Scopes AutomationScopeResponseArrayOutput `pulumi:"scopes"`
// A collection of the source event types which evaluate the security automation set of rules.
Sources AutomationSourceResponseArrayOutput `pulumi:"sources"`
// A list of key value pairs that describe the resource.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// Resource type
Type pulumi.StringOutput `pulumi:"type"`
}
// NewAutomation registers a new resource with the given unique name, arguments, and options.
func NewAutomation(ctx *pulumi.Context,
name string, args *AutomationArgs, opts ...pulumi.ResourceOption) (*Automation, error)
|
// GetAutomation gets an existing Automation resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetAutomation(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *AutomationState, opts ...pulumi.ResourceOption) (*Automation, error) {
var resource Automation
err := ctx.ReadResource("azure-nextgen:security/v20190101preview:Automation", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering Automation resources.
type automationState struct {
// A collection of the actions which are triggered if all the configured rules evaluations, within at least one rule set, are true.
Actions []interface{} `pulumi:"actions"`
// The security automation description.
Description *string `pulumi:"description"`
// Entity tag is used for comparing two or more entities from the same requested resource.
Etag *string `pulumi:"etag"`
// Indicates whether the security automation is enabled.
IsEnabled *bool `pulumi:"isEnabled"`
// Kind of the resource
Kind *string `pulumi:"kind"`
// Location where the resource is stored
Location *string `pulumi:"location"`
// Resource name
Name *string `pulumi:"name"`
// A collection of scopes on which the security automations logic is applied. Supported scopes are the subscription itself or a resource group under that subscription. The automation will only apply on defined scopes.
Scopes []AutomationScopeResponse `pulumi:"scopes"`
// A collection of the source event types which evaluate the security automation set of rules.
Sources []AutomationSourceResponse `pulumi:"sources"`
// A list of key value pairs that describe the resource.
Tags map[string]string `pulumi:"tags"`
// Resource type
Type *string `pulumi:"type"`
}
type AutomationState struct {
// A collection of the actions which are triggered if all the configured rules evaluations, within at least one rule set, are true.
Actions pulumi.ArrayInput
// The security automation description.
Description pulumi.StringPtrInput
// Entity tag is used for comparing two or more entities from the same requested resource.
Etag pulumi.StringPtrInput
// Indicates whether the security automation is enabled.
IsEnabled pulumi.BoolPtrInput
// Kind of the resource
Kind pulumi.StringPtrInput
// Location where the resource is stored
Location pulumi.StringPtrInput
// Resource name
Name pulumi.StringPtrInput
// A collection of scopes on which the security automations logic is applied. Supported scopes are the subscription itself or a resource group under that subscription. The automation will only apply on defined scopes.
Scopes AutomationScopeResponseArrayInput
// A collection of the source event types which evaluate the security automation set of rules.
Sources AutomationSourceResponseArrayInput
// A list of key value pairs that describe the resource.
Tags pulumi.StringMapInput
// Resource type
Type pulumi.StringPtrInput
}
func (AutomationState) ElementType() reflect.Type {
return reflect.TypeOf((*automationState)(nil)).Elem()
}
type automationArgs struct {
// A collection of the actions which are triggered if all the configured rules evaluations, within at least one rule set, are true.
Actions []interface{} `pulumi:"actions"`
// The security automation name.
AutomationName string `pulumi:"automationName"`
// The security automation description.
Description *string `pulumi:"description"`
// Entity tag is used for comparing two or more entities from the same requested resource.
Etag *string `pulumi:"etag"`
// Indicates whether the security automation is enabled.
IsEnabled *bool `pulumi:"isEnabled"`
// Kind of the resource
Kind *string `pulumi:"kind"`
// Location where the resource is stored
Location *string `pulumi:"location"`
// The name of the resource group within the user's subscription. The name is case insensitive.
ResourceGroupName string `pulumi:"resourceGroupName"`
// A collection of scopes on which the security automations logic is applied. Supported scopes are the subscription itself or a resource group under that subscription. The automation will only apply on defined scopes.
Scopes []AutomationScope `pulumi:"scopes"`
// A collection of the source event types which evaluate the security automation set of rules.
Sources []AutomationSource `pulumi:"sources"`
// A list of key value pairs that describe the resource.
Tags map[string]string `pulumi:"tags"`
}
// The set of arguments for constructing a Automation resource.
type AutomationArgs struct {
// A collection of the actions which are triggered if all the configured rules evaluations, within at least one rule set, are true.
Actions pulumi.ArrayInput
// The security automation name.
AutomationName pulumi.StringInput
// The security automation description.
Description pulumi.StringPtrInput
// Entity tag is used for comparing two or more entities from the same requested resource.
Etag pulumi.StringPtrInput
// Indicates whether the security automation is enabled.
IsEnabled pulumi.BoolPtrInput
// Kind of the resource
Kind pulumi.StringPtrInput
// Location where the resource is stored
Location pulumi.StringPtrInput
// The name of the resource group within the user's subscription. The name is case insensitive.
ResourceGroupName pulumi.StringInput
// A collection of scopes on which the security automations logic is applied. Supported scopes are the subscription itself or a resource group under that subscription. The automation will only apply on defined scopes.
Scopes AutomationScopeArrayInput
// A collection of the source event types which evaluate the security automation set of rules.
Sources AutomationSourceArrayInput
// A list of key value pairs that describe the resource.
Tags pulumi.StringMapInput
}
func (AutomationArgs) ElementType() reflect.Type {
return reflect.TypeOf((*automationArgs)(nil)).Elem()
}
|
{
if args == nil || args.AutomationName == nil {
return nil, errors.New("missing required argument 'AutomationName'")
}
if args == nil || args.ResourceGroupName == nil {
return nil, errors.New("missing required argument 'ResourceGroupName'")
}
if args == nil {
args = &AutomationArgs{}
}
var resource Automation
err := ctx.RegisterResource("azure-nextgen:security/v20190101preview:Automation", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
|
formatter.go
|
/*
this file defines formatter for names and attributes used in openGauss-operator
*/
package util
import (
"fmt"
v1 "github.com/waterme7on/openGauss-operator/pkg/apis/opengausscontroller/v1"
)
func OpenGaussClusterFormatter(og *v1.OpenGauss) *openGaussClusterFormatter
|
type openGaussClusterFormatter struct {
OpenGauss *v1.OpenGauss
}
func (formatter *openGaussClusterFormatter) PersistentVolumeCLaimName() string {
return formatter.OpenGauss.Name + "-pvc"
}
func (formatter *openGaussClusterFormatter) MycatConfigMapName() string {
return formatter.OpenGauss.Name + "-mycat-cm"
}
func (formatter *openGaussClusterFormatter) MycatStatefulsetName() string {
return formatter.OpenGauss.Name + "-mycat-sts"
}
func (formatter *openGaussClusterFormatter) MycatServiceName() string {
return formatter.OpenGauss.Name + "-mycat-svc"
}
func (formatter *openGaussClusterFormatter) MycatTableConfig() string {
ret := ""
if formatter.OpenGauss.Spec.OpenGauss.Tables != nil {
for _, table := range formatter.OpenGauss.Spec.OpenGauss.Tables {
ret = fmt.Sprintf("%s%s\n", ret, table)
}
}
return ret
}
// MycatConfigMap returns mycat configs including master and replicas ip list
func (formatter *openGaussClusterFormatter) MycatHostConfig() string {
// ret := ""
// ret := fmt.Sprintf("1 %s.%s 5432\n", Master(formatter.OpenGauss).ServiceName(), formatter.OpenGauss.Namespace)
// ret = fmt.Sprintf("%s3 %s.%s 5432\n", ret, Replica(formatter.OpenGauss).ServiceName(), formatter.OpenGauss.Namespace)
ret := ""
if formatter.OpenGauss.Status != nil {
for _, ip := range formatter.OpenGauss.Status.MasterIPs {
ret = fmt.Sprintf("%s1 %s 5432\n", ret, ip)
}
for _, ip := range formatter.OpenGauss.Status.ReplicasIPs {
ret = fmt.Sprintf("%s2 %s 5432\n", ret, ip)
}
}
return ret
}
type StatefulsetFormatterInterface interface {
StatefulSetName() string
ServiceName() string
ReplConnInfo() string
ConfigMapName() string
}
func Master(og *v1.OpenGauss) StatefulsetFormatterInterface {
return &MasterFormatter{OpenGauss: og}
}
func Replica(og *v1.OpenGauss) StatefulsetFormatterInterface {
return &ReplicaFormatter{OpenGauss: og}
}
type MasterFormatter struct {
OpenGauss *v1.OpenGauss
}
func (formatter *MasterFormatter) StatefulSetName() string {
return formatter.OpenGauss.Name + "-masters"
}
func (formatter *MasterFormatter) ServiceName() string {
return formatter.OpenGauss.Name + "-master-service"
}
func (formatter *MasterFormatter) ReplConnInfo() string {
replica := Replica(formatter.OpenGauss)
replicaStatefulsetName := replica.StatefulSetName()
// workerSize := int(math.Max(float64(*formatter.OpenGauss.Spec.OpenGauss.Worker.Replicas), 1))
replInfo := ""
for i := 0; i < 1; i++ {
replInfo += fmt.Sprintf("replconninfo%d='localhost=%s-0 remotehost=%s-%d", i+1, formatter.StatefulSetName(), replicaStatefulsetName, i)
replInfo += " localport=5434 localservice=5432 remoteport=5434 remoteservice=5432'\n"
}
return replInfo
}
func (formatter *MasterFormatter) ConfigMapName() string {
return formatter.OpenGauss.Name + "-master-config"
}
type ReplicaFormatter struct {
OpenGauss *v1.OpenGauss
}
func (formatter *ReplicaFormatter) StatefulSetName() string {
return formatter.OpenGauss.Name + "-replicas"
}
func (formatter *ReplicaFormatter) ServiceName() string {
return formatter.OpenGauss.Name + "-replicas-service"
}
func (formatter *ReplicaFormatter) ReplConnInfo() string {
master := Master(formatter.OpenGauss)
masterStatefulsetName := master.StatefulSetName()
replInfo := ""
replInfo += fmt.Sprintf("replconninfo1='localhost=127.0.0.1 remotehost=%s-0", masterStatefulsetName)
replInfo += " localport=5434 localservice=5432 remoteport=5434 remoteservice=5432'\n"
// workerSize := int(math.Max(float64(*formatter.OpenGauss.Spec.OpenGauss.Worker.Replicas), 1))
// replInfo := ""
// for i := 0; i < workerSize; i++ {
// replInfo += fmt.Sprintf("replconninfo%d='localhost=%s-%d remotehost=%s-0", i+1, formatter.StatefulSetName(), i, masterStatefulsetName)
// replInfo += " localport=5434 localservice=5432 remoteport=5434 remoteservice=5432'\n"
// }
return replInfo
}
func (formatter *ReplicaFormatter) ConfigMapName() string {
return formatter.OpenGauss.Name + "-replicas-config"
}
|
{
return &openGaussClusterFormatter{
OpenGauss: og,
}
}
|
payment_data_factory.py
|
# -*- coding: utf-8 -*-
from yandex_checkout.domain.common.type_factory import TypeFactory
from yandex_checkout.domain.models.payment_data.payment_data_class_map import PaymentDataClassMap
class
|
(TypeFactory):
"""
Factory for payment data objects
"""
def __init__(self):
super(PaymentDataFactory, self).__init__(PaymentDataClassMap())
|
PaymentDataFactory
|
gaussian_estimators.py
|
from __future__ import annotations
import numpy as np
from numpy.linalg import inv, det, slogdet
class UnivariateGaussian:
"""
Class for univariate Gaussian Distribution Estimator
"""
def __init__(self, biased_var: bool = False) -> UnivariateGaussian:
"""
Estimator for univariate Gaussian mean and variance parameters
Parameters
----------
biased_var : bool, default=False
Should fitted estimator of variance be a biased or unbiased estimator
Attributes
----------
fitted_ : bool
Initialized as false indicating current estimator instance has not been fitted.
To be set as True in `UnivariateGaussian.fit` function.
mu_: float
Estimated expectation initialized as None. To be set in `UnivariateGaussian.fit`
function.
var_: float
Estimated variance initialized as None. To be set in `UnivariateGaussian.fit`
function.
"""
self.biased_ = biased_var
self.fitted_, self.mu_, self.var_ = False, None, None
def fit(self, X: np.ndarray) -> UnivariateGaussian:
"""
Estimate Gaussian expectation and variance from given samples
Parameters
----------
X: ndarray of shape (n_samples, )
Training data
Returns
-------
self : returns an instance of self.
Notes
-----
Sets `self.mu_`, `self.var_` attributes according to calculated estimation (where
estimator is either biased or unbiased). Then sets `self.fitted_` attribute to `True`
"""
self.mu_ = sum(X) / X.size
var_sum = 0
for s in X:
var_sum += (s - self.mu_) ** 2
self.var_ = (var_sum / (X.size if self.biased_ else X.size - 1)) ** 0.5
self.fitted_ = True
return self
def pdf(self, X: np.ndarray) -> np.ndarray:
"""
Calculate PDF of observations under Gaussian model with fitted estimators
Parameters
----------
X: ndarray of shape (n_samples, )
Samples to calculate PDF for
Returns
-------
pdfs: ndarray of shape (n_samples, )
Calculated values of given samples for PDF function of N(mu_, var_)
Raises
------
ValueError: In case function was called prior fitting the model
"""
if not self.fitted_:
raise ValueError(
"Estimator must first be fitted before calling `pdf` function")
pdfs = np.ndarray(X.size)
for i in range(X.size):
pdfs[i] = np.exp(
-((X[i] - self.mu_) ** 2) / 2 * self.var_) / np.sqrt(
2 * np.pi * self.var_)
return pdfs
@staticmethod
def log_likelihood(mu: float, sigma: float, X: np.ndarray) -> float:
"""
Calculate the log-likelihood of the data under a specified Gaussian model
Parameters
----------
mu : float
Expectation of Gaussian
sigma : float
Variance of Gaussian
X : ndarray of shape (n_samples, )
Samples to calculate log-likelihood with
Returns
-------
log_likelihood: float
log-likelihood calculated
"""
temp_sum = 0
for i in range(X.size):
temp_sum += (X[i] - mu) ** 2
return -(X.size / 2) * (
np.log(2 * np.pi) + np.log(sigma)) - temp_sum / (
2 * sigma)
class MultivariateGaussian:
"""
Class for multivariate Gaussian Distribution Estimator
"""
def __init__(self):
"""
Initialize an instance of multivariate Gaussian estimator
Attributes
----------
fitted_ : bool
Initialized as false indicating current estimator instance has not been fitted.
To be set as True in `MultivariateGaussian.fit` function.
mu_: ndarray of shape (n_features,)
Estimated expectation initialized as None. To be set in `MultivariateGaussian.fit`
function.
cov_: ndarray of shape (n_features, n_features)
Estimated covariance initialized as None. To be set in `MultivariateGaussian.fit`
function.
"""
self.mu_, self.cov_ = None, None
self.fitted_ = False
def fit(self, X: np.ndarray) -> MultivariateGaussian:
"""
Estimate Gaussian expectation and covariance from given samples
Parameters
----------
X: ndarray of shape (n_samples, n_features)
Training data
Returns
-------
self : returns an instance of self
Notes
-----
Sets `self.mu_`, `self.cov_` attributes according to calculated estimation.
Then sets `self.fitted_` attribute to `True`
"""
rows, cols = X.shape
self.mu_ = np.sum(X, axis=0) / rows
X_gal = np.array([X[i] - self.mu_ for i in range(rows)])
self.cov_ = np.dot(X_gal.transpose(), X_gal) / (rows - 1)
self.fitted_ = True
return self
def pdf(self, X: np.ndarray):
"""
Calculate PDF of observations under Gaussian model with fitted estimators
Parameters
----------
X: ndarray of shape (n_samples, n_features)
Samples to calculate PDF for
Returns
-------
pdfs: ndarray of shape (n_samples, )
Calculated values of given samples for PDF function of N(mu_, cov_)
Raises
------
ValueError: In case function was called prior fitting the model
"""
if not self.fitted_:
raise ValueError(
"Estimator must first be fitted before calling `pdf` function")
mahalanobis = np.einsum("bi,ij,bj->b", X-self.mu_, inv(self.cov_), X-self.mu_)
return np.exp(-.5 * mahalanobis) / \
np.sqrt((2*np.pi) ** len(X) * det(self.cov_))
@staticmethod
def log_likelihood(mu: np.ndarray, cov: np.ndarray,
X: np.ndarray) -> float:
"""
|
Calculate the log-likelihood of the data under a specified Gaussian model
Parameters
----------
mu : ndarray of shape (n_features,)
Expectation of Gaussian
cov : ndarray of shape (n_features, n_features)
covariance matrix of Gaussian
X : ndarray of shape (n_samples, n_features)
Samples to calculate log-likelihood with
Returns
-------
log_likelihood: float
log-likelihood calculated over all input data and under given parameters of Gaussian
"""
rows, cols = X.shape
X_gal = np.array([X[i] - mu for i in range(rows)])
temp_sun = 0
for i in range(rows):
temp_sun += np.linalg.multi_dot([X_gal[i].transpose(),
np.linalg.inv(cov),
X_gal[i]])
return -(X.size / 2) * (cols * np.log(2 * np.pi) + np.log(
np.linalg.det(cov))) - 0.5 * temp_sun
| |
TripitLogin.js
|
var tripitLogin = {
CONNECT_TRIPIT: 'a[rel^=connect_Tripit]',
TRIPIT_BUTTON_CONNECT: 'button[rel^=connect_Tripit]',
TRIPIT_ELEMENT_TARGET: 'connect_Tripit',
WINDOW_OPEN: 'mywindow',
WINDOW_WIDTH: 500,
WINDOW_HEIGHT: 400,
TRIPIT_DISCONNECT: 'a[rel^=disconnect_Tripit]',
TRIPIT_BUTTON_DISCONNECT: 'button[rel^=disconnect_Tripit]',
ELEMENT_TARGET_DISCONNECT: 'disconnect_Tripit',
READ_ATTRIBUTE: 'name',
ELEMENT_TARGET: 'rel',
TRIPIT_WIDTH: 'width=',
TRIPIT_HEIGHT: ',height=',
EMPTY_STRING: '',
TRIPIT_DATA: '_social/disconnect',
TRIPIT_URL_IDENTITY_VALUE: "#tripitConnectorUrl",
CLICK_EVENT_VALUE: 'click',
};
jQuery('body').on('click', 'button', function(){
var elementTarget = jQuery(this).parent();
var connectorPath = jQuery(tripitLogin.TRIPIT_URL_IDENTITY_VALUE).val();
|
var loginWindow = window.open(connectorPath, tripitLogin.WINDOW_OPEN, tripitLogin.TRIPIT_WIDTH + tripitLogin.WINDOW_WIDTH + tripitLogin.TRIPIT_HEIGHT + tripitLogin.WINDOW_HEIGHT + tripitLogin.EMPTY_STRING);
var timer = setInterval(function() {
if(loginWindow.closed) {
clearInterval(timer);
$("#socialConnectorsGrid").data("kendoGrid").dataSource.read();
refreshMessage();
}
}, 1000);
}
if (elementTarget && elementTarget.attr(tripitLogin.ELEMENT_TARGET) == tripitLogin.ELEMENT_TARGET_DISCONNECT) {
var disconnectPath = connectorPath + tripitLogin.TRIPIT_DATA;
$.ajax({
type: 'GET',
url: disconnectPath,
success: function (eventData) {
$("#socialConnectorsGrid").data("kendoGrid").dataSource.read();
var data = jQuery.parseJSON(eventData);
var type = data.type;
var message = data.message;
localStorage.setItem('type',type);
localStorage.setItem('msg',message);
refreshMessage();
}
});
}
});
|
if (elementTarget && elementTarget.attr(tripitLogin.ELEMENT_TARGET) == tripitLogin.TRIPIT_ELEMENT_TARGET) {
|
DescribeDBInstanceNetInfoRequest.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DescribeDBInstanceNetInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDBInstanceNetInfo','rds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Flag(self):
return self.get_query_params().get('Flag')
def set_Flag(self,Flag):
self.add_query_param('Flag',Flag)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
|
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_DBInstanceNetRWSplitType(self):
return self.get_query_params().get('DBInstanceNetRWSplitType')
def set_DBInstanceNetRWSplitType(self,DBInstanceNetRWSplitType):
self.add_query_param('DBInstanceNetRWSplitType',DBInstanceNetRWSplitType)
|
self.add_query_param('OwnerAccount',OwnerAccount)
|
create-user.js
|
const mongoose = require('mongoose');
const bcrypt = require('bcrypt');
const { Output } = require('../util/Output.js');
const Emailer = require('../util/Emailer.js');
/**
* Create a user.
*/
function post(req, res) {
var o = new Output(res);
// Input
var { email, password, name, type } = req.body;
if ([email, password, name, type].some(a => a === undefined))
return o.err('MISSING_POST').out();
// Validate input.
if (!email || email.lenth > 100 || (email.match(/@/g) || []).length != 1)
return o.err('INVALID_INPUT', 'Invalid email.').out();
if (password.length < 6 || password.length > 50)
return o.err('INVALID_INPUT', 'Invalid password.').out();
if (name.length < 2 || name.length > 50)
return o.err('INVALID_INPUT', 'Invalid name.').out();
if (![1, 2].includes(type)) // Can only create students/intructors.
return o.err('INVALID_INPUT', 'Invalid type.').out();
email = email.toLowerCase();
// Email in use?
var User = mongoose.model('User');
User.findOne({ 'email': email }, function (err, result) {
if (err) return o.err('DATABASE').out();
else if (result) return o.err('EMAIL_TAKEN').out();
else {
// Hash password.
const SALT_ROUNDS = 10;
const PASSWORD = 'TreelBcryptPassword';
bcrypt.genSalt(SALT_ROUNDS, function(err, salt) {
bcrypt.hash(password, salt, function(err, passwordHash) {
// Create user, login, and output.
newUser = new User({ email, passwordHash, name, type });
newUser.save(function (err, newUser) {
if (err) return o.err('DATABASE').out();
o.set('user', newUser).out();
return createVerification(o, newUser);
});
});
});
}
});
}
// Create a Verification and send email.
function
|
(o, user) {
// Generate unique code.
let time = new Date().valueOf().toString(36).substring(2),
rand = Math.random().toString(36).substring(5),
code = time + rand;
let Verification = mongoose.model('Verification'),
v = new Verification({ user, code });
v.isVerified = false;
v.save((err, savedV) => {
Emailer.sendVerification(user.email, savedV.code);
});
}
module.exports = { post };
|
createVerification
|
skopt_forest_main.py
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
|
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
import numpy.core.umath_tests # pylint: disable=unused-import
import skopt
from guild import batch_util
from . import skopt_util
log = logging.getLogger("guild")
def main():
batch_util.init_logging()
batch_run = batch_util.batch_run()
skopt_util.handle_seq_trials(batch_run, _suggest_x)
def _suggest_x(dims, x0, y0, random_start, random_state, opts):
res = skopt.forest_minimize(
lambda *args: 0,
dims,
n_calls=1,
n_random_starts=1 if random_start else 0,
x0=x0,
y0=y0,
random_state=random_state,
kappa=opts["kappa"],
xi=opts["xi"],
)
return res.x_iters[-1], res.random_state
def gen_trials(
flags, prev_results_cb, opt_random_starts=3, opt_kappa=1.96, opt_xi=0.01, **kw
):
"""ipy interface for trials."""
return skopt_util.ipy_gen_trials(
flags,
prev_results_cb,
_suggest_x,
random_starts=opt_random_starts,
suggest_x_opts={
"kappa": opt_kappa,
"xi": opt_xi,
},
**kw
)
if __name__ == "__main__":
main()
| |
pool.go
|
package pool
import (
"errors"
"io"
"log"
"sync"
)
// 常见的并发模式实现(二):通过缓冲通道实现共享资源池
// https://laravelacademy.org/post/22060
// 定义资源池结构体
type Pool struct {
// 通过锁机制确保资源池的并发安全
m sync.Mutex
// 通过缓冲通道管理资源池,资源池大小即缓冲值
resources chan io.Closer
// 在资源池中注册新的资源
factory func() (io.Closer, error)
// 标识资源池是否关闭
closed bool
}
var ErrPoolClosed = errors.New("资源池已关闭")
// 初始化资源池
func New(fn func() (io.Closer, error), size uint) (*Pool, error) {
if size <= 0 {
return nil, errors.New("资源池容量需要大于0")
}
return &Pool{
factory: fn,
resources: make(chan io.Closer, size),
}, nil
}
// 从资源池申请资源
func (p *Pool) Acquire() (io.Closer, error) {
s
|
// 资源池为空则调用 p.factory() 方法注册新资源
log.Println("Acquire:", "新增资源")
return p.factory()
}
}
// 资源使用完成后释放
func (p *Pool) Release(r io.Closer) {
p.m.Lock()
defer p.m.Unlock()
// 资源池已关闭则支持释放资源
if p.closed {
r.Close()
return
}
// 否则将资源归还到资源池
select {
case p.resources <- r:
log.Println("Release:", "In Queue")
default:
log.Println("Release:", "Closing")
r.Close()
}
}
// 关闭资源池
func (p *Pool) Close() {
p.m.Lock()
defer p.m.Unlock()
if p.closed {
return
}
// 释放资源池和资源池中所有资源
p.closed = true
close(p.resources)
for r := range p.resources {
r.Close()
}
}
|
elect {
case r, ok := <-p.resources:
// 资源池不为空则从中获取资源
log.Println("Acquire:", "共享资源")
if !ok {
return nil, ErrPoolClosed
}
return r, nil
default:
|
scan.go
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"sync"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/sql/distsqlpb"
"github.com/cockroachdb/cockroach/pkg/sql/distsqlrun"
"github.com/cockroachdb/cockroach/pkg/sql/privilege"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/pkg/errors"
)
var scanNodePool = sync.Pool{
New: func() interface{} {
return &scanNode{}
},
}
// A scanNode handles scanning over the key/value pairs for a table and
// reconstructing them into rows.
type scanNode struct {
desc *sqlbase.ImmutableTableDescriptor
index *sqlbase.IndexDescriptor
// Set if an index was explicitly specified.
specifiedIndex *sqlbase.IndexDescriptor
specifiedIndexReverse bool
// Set if the NO_INDEX_JOIN hint was given.
noIndexJoin bool
colCfg scanColumnsConfig
// The table columns, possibly including ones currently in schema changes.
cols []sqlbase.ColumnDescriptor
// There is a 1-1 correspondence between cols and resultColumns.
resultColumns sqlbase.ResultColumns
// For each column in resultColumns, indicates if the value is
// needed (used as an optimization when the upper layer doesn't need
// all values).
// TODO(radu/knz): currently the optimization always loads the
// entire row from KV and only skips unnecessary decodes to
// Datum. Investigate whether performance is to be gained (e.g. for
// tables with wide rows) by reading only certain columns from KV
// using point lookups instead of a single range lookup for the
// entire row.
valNeededForCol util.FastIntSet
// Map used to get the index for columns in cols.
colIdxMap map[sqlbase.ColumnID]int
// The number of backfill columns among cols. These backfill
// columns are always the last columns within cols.
numBackfillColumns int
spans []roachpb.Span
reverse bool
props physicalProps
// filter that can be evaluated using only this table/index; it contains
// tree.IndexedVar leaves generated using filterVars.
filter tree.TypedExpr
filterVars tree.IndexedVarHelper
// origFilter is the original filtering expression, which might have gotten
// simplified during index selection. For example "k > 0" is converted to a
// span and the filter is nil. But we still want to deduce not-null columns
// from the original filter.
origFilter tree.TypedExpr
// if non-zero, hardLimit indicates that the scanNode only needs to provide
// this many rows (after applying any filter). It is a "hard" guarantee that
// Next will only be called this many times.
hardLimit int64
// if non-zero, softLimit is an estimation that only this many rows (after
// applying any filter) might be needed. It is a (potentially optimistic)
// "hint". If hardLimit is set (non-zero), softLimit must be unset (zero).
softLimit int64
disableBatchLimits bool
// Should be set to true if sqlbase.ParallelScans is true.
parallelScansEnabled bool
isSecondaryIndex bool
// Indicates if this scanNode will do a physical data check. This is
// only true when running SCRUB commands.
isCheck bool
// This struct must be allocated on the heap and its location stay
// stable after construction because it implements
// IndexedVarContainer and the IndexedVar objects in sub-expressions
// will link to it by reference after checkRenderStar / analyzeExpr.
// Enforce this using NoCopy.
_ util.NoCopy
// Set when the scanNode is created via the exec factory.
createdByOpt bool
// maxResults, if greater than 0, is the maximum number of results that a
// scan is guaranteed to return.
maxResults uint64
// Indicates if this scan is the source for a delete node.
isDeleteSource bool
// estimatedRowCount is the estimated number of rows that this scanNode will
// output.
estimatedRowCount uint64
}
// scanVisibility represents which table columns should be included in a scan.
type scanVisibility int8
const (
publicColumns scanVisibility = 0
// Use this to request mutation columns that are currently being
// backfilled. These columns are needed to correctly update/delete
// a row by correctly constructing ColumnFamilies and Indexes.
publicAndNonPublicColumns scanVisibility = 1
)
func (s scanVisibility) toDistSQLScanVisibility() distsqlpb.ScanVisibility {
switch s {
case publicColumns:
return distsqlpb.ScanVisibility_PUBLIC
case publicAndNonPublicColumns:
return distsqlpb.ScanVisibility_PUBLIC_AND_NOT_PUBLIC
default:
panic(fmt.Sprintf("Unknown visibility %+v", s))
}
}
// scanColumnsConfig controls the "schema" of a scan node. The zero value is the
// default: all "public" columns.
// Note that not all columns in the schema are read and decoded; that is further
// controlled by scanNode.valNeededForCol.
type scanColumnsConfig struct {
// If set, only these columns are part of the scan node schema, in this order
// (with the caveat that the addUnwantedAsHidden flag below can add more
// columns). Non public columns can only be added if allowed by the visibility
// flag below.
// If not set, then all visible columns will be part of the scan node schema,
// as specified by the visibility flag below. The addUnwantedAsHidden flag
// is ignored in this case.
wantedColumns []tree.ColumnID
// When set, the columns that are not in the wantedColumns list are added to
// the list of columns as hidden columns. Only useful in conjunction with
// wantedColumns.
addUnwantedAsHidden bool
// If visibility is set to publicAndNonPublicColumns, then mutation columns
// can be added to the list of columns.
visibility scanVisibility
}
var publicColumnsCfg = scanColumnsConfig{}
func (p *planner) Scan() *scanNode {
n := scanNodePool.Get().(*scanNode)
return n
}
// scanNode implements tree.IndexedVarContainer.
var _ tree.IndexedVarContainer = &scanNode{}
func (n *scanNode) IndexedVarEval(idx int, ctx *tree.EvalContext) (tree.Datum, error) {
panic("scanNode can't be run in local mode")
}
func (n *scanNode) IndexedVarResolvedType(idx int) *types.T {
return n.resultColumns[idx].Typ
}
func (n *scanNode) IndexedVarNodeFormatter(idx int) tree.NodeFormatter {
return (*tree.Name)(&n.resultColumns[idx].Name)
}
func (n *scanNode) startExec(params runParams) error {
panic("scanNode can't be run in local mode")
}
func (n *scanNode) Close(context.Context) {
*n = scanNode{}
scanNodePool.Put(n)
}
func (n *scanNode) Next(params runParams) (bool, error) {
panic("scanNode can't be run in local mode")
}
func (n *scanNode) Values() tree.Datums {
panic("scanNode can't be run in local mode")
}
// disableBatchLimit disables the kvfetcher batch limits. Used for index-join,
// where we scan batches of unordered spans.
func (n *scanNode) disableBatchLimit() {
n.disableBatchLimits = true
n.hardLimit = 0
n.softLimit = 0
}
// canParallelize returns true if this scanNode can be parallelized at the
// distSender level safely.
func (n *scanNode) canParallelize() bool {
// We choose only to parallelize if we are certain that no more than
// ParallelScanResultThreshold results will be returned, to prevent potential
// memory blowup.
// We can't parallelize if we have a non-zero limit hint, since DistSender
// is limited to running limited batches serially.
return n.maxResults != 0 &&
n.maxResults < distsqlrun.ParallelScanResultThreshold &&
n.limitHint() == 0 &&
n.parallelScansEnabled
}
func (n *scanNode) limitHint() int64 {
var limitHint int64
if n.hardLimit != 0 {
limitHint = n.hardLimit
if !isFilterTrue(n.filter) {
// The limit is hard, but it applies after the filter; read a multiple of
// the limit to avoid needing a second batch. The multiple should be an
// estimate for the selectivity of the filter, but we have no way of
// calculating that right now.
limitHint *= 2
}
} else {
// Like above, read a multiple of the limit when the limit is "soft".
limitHint = n.softLimit * 2
}
return limitHint
}
// Initializes a scanNode with a table descriptor.
func (n *scanNode) initTable(
ctx context.Context,
p *planner,
desc *sqlbase.ImmutableTableDescriptor,
indexFlags *tree.IndexFlags,
colCfg scanColumnsConfig,
) error {
n.desc = desc
if !p.skipSelectPrivilegeChecks {
if err := p.CheckPrivilege(ctx, n.desc, privilege.SELECT); err != nil {
return err
}
}
if indexFlags != nil {
if err := n.lookupSpecifiedIndex(indexFlags); err != nil {
return err
}
}
n.noIndexJoin = (indexFlags != nil && indexFlags.NoIndexJoin)
return n.initDescDefaults(p.curPlan.deps, colCfg)
}
func (n *scanNode) lookupSpecifiedIndex(indexFlags *tree.IndexFlags) error {
if indexFlags.Index != "" {
// Search index by name.
indexName := string(indexFlags.Index)
if indexName == n.desc.PrimaryIndex.Name {
n.specifiedIndex = &n.desc.PrimaryIndex
} else {
for i := range n.desc.Indexes {
if indexName == n.desc.Indexes[i].Name {
n.specifiedIndex = &n.desc.Indexes[i]
break
}
}
}
if n.specifiedIndex == nil {
return errors.Errorf("index %q not found", tree.ErrString(&indexFlags.Index))
}
} else if indexFlags.IndexID != 0 {
// Search index by ID.
if n.desc.PrimaryIndex.ID == sqlbase.IndexID(indexFlags.IndexID) {
n.specifiedIndex = &n.desc.PrimaryIndex
} else {
for i := range n.desc.Indexes {
if n.desc.Indexes[i].ID == sqlbase.IndexID(indexFlags.IndexID) {
n.specifiedIndex = &n.desc.Indexes[i]
break
}
}
}
if n.specifiedIndex == nil {
return errors.Errorf("index [%d] not found", indexFlags.IndexID)
}
}
if indexFlags.Direction == tree.Descending {
n.specifiedIndexReverse = true
}
return nil
}
// initCols initializes n.cols and n.numBackfillColumns according to n.desc and n.colCfg.
func (n *scanNode) initCols() error {
n.numBackfillColumns = 0
if n.colCfg.wantedColumns == nil {
// Add all active and maybe mutation columns.
if n.colCfg.visibility == publicColumns {
n.cols = n.desc.Columns
} else {
n.cols = n.desc.ReadableColumns
n.numBackfillColumns = len(n.desc.ReadableColumns) - len(n.desc.Columns)
}
return nil
}
n.cols = make([]sqlbase.ColumnDescriptor, 0, len(n.desc.ReadableColumns))
for _, wc := range n.colCfg.wantedColumns {
var c *sqlbase.ColumnDescriptor
var err error
isBackfillCol := false
if id := sqlbase.ColumnID(wc); n.colCfg.visibility == publicColumns {
c, err = n.desc.FindActiveColumnByID(id)
} else {
c, isBackfillCol, err = n.desc.FindReadableColumnByID(id)
}
if err != nil {
return err
}
n.cols = append(n.cols, *c)
if isBackfillCol {
n.numBackfillColumns++
}
}
if n.colCfg.addUnwantedAsHidden {
for i := range n.desc.Columns {
c := &n.desc.Columns[i]
found := false
for _, wc := range n.colCfg.wantedColumns {
if sqlbase.ColumnID(wc) == c.ID {
found = true
break
}
}
if !found {
col := *c
col.Hidden = true
n.cols = append(n.cols, col)
}
}
}
return nil
}
// Initializes the column structures.
func (n *scanNode) initDescDefaults(planDeps planDependencies, colCfg scanColumnsConfig) error {
n.colCfg = colCfg
n.index = &n.desc.PrimaryIndex
if err := n.initCols(); err != nil {
return err
}
// Register the dependency to the planner, if requested.
if planDeps != nil {
indexID := sqlbase.IndexID(0)
if n.specifiedIndex != nil {
indexID = n.specifiedIndex.ID
}
usedColumns := make([]sqlbase.ColumnID, len(n.cols))
for i := range n.cols {
usedColumns[i] = n.cols[i].ID
}
deps := planDeps[n.desc.ID]
deps.desc = n.desc
deps.deps = append(deps.deps, sqlbase.TableDescriptor_Reference{
IndexID: indexID,
ColumnIDs: usedColumns,
})
planDeps[n.desc.ID] = deps
}
// Set up the rest of the scanNode.
n.resultColumns = sqlbase.ResultColumnsFromColDescs(n.cols)
n.colIdxMap = make(map[sqlbase.ColumnID]int, len(n.cols))
for i, c := range n.cols {
n.colIdxMap[c.ID] = i
}
n.valNeededForCol = util.FastIntSet{}
if len(n.cols) > 0 {
n.valNeededForCol.AddRange(0, len(n.cols)-1)
}
n.filterVars = tree.MakeIndexedVarHelper(n, len(n.cols))
return nil
}
// initOrdering initializes the ordering info using the selected index. This
// must be called after index selection is performed.
func (n *scanNode) initOrdering(exactPrefix int, evalCtx *tree.EvalContext) {
if n.index == nil {
return
}
n.props = n.computePhysicalProps(n.index, exactPrefix, n.reverse, evalCtx)
}
// computePhysicalProps calculates ordering information for table columns
// assuming that:
// - we scan a given index (potentially in reverse order), and
// - the first `exactPrefix` columns of the index each have a constant value
// (see physicalProps).
func (n *scanNode) computePhysicalProps(
index *sqlbase.IndexDescriptor, exactPrefix int, reverse bool, evalCtx *tree.EvalContext,
) physicalProps {
var pp physicalProps
columnIDs, dirs := index.FullColumnIDs()
var keySet util.FastIntSet
for i, colID := range columnIDs {
idx, ok := n.colIdxMap[colID]
if !ok
|
if i < exactPrefix {
pp.addConstantColumn(idx)
} else {
dir, err := dirs[i].ToEncodingDirection()
if err != nil {
panic(err)
}
if reverse {
dir = dir.Reverse()
}
pp.addOrderColumn(idx, dir)
}
if !n.cols[idx].Nullable {
pp.addNotNullColumn(idx)
}
keySet.Add(idx)
}
// We included any implicit columns, so the columns form a (possibly weak)
// key.
pp.addWeakKey(keySet)
pp.applyExpr(evalCtx, n.origFilter)
return pp
}
|
{
panic(fmt.Sprintf("index refers to unknown column id %d", colID))
}
|
buckets_request_builder.go
|
package buckets
import (
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be "github.com/microsoftgraph/msgraph-beta-sdk-go/models"
i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459 "github.com/microsoftgraph/msgraph-beta-sdk-go/models/odataerrors"
i8d5205674f9abadf10cf6dffe9b3020acaf534e108f0583a0e0ec9e75af82808 "github.com/microsoftgraph/msgraph-beta-sdk-go/planner/buckets/count"
)
// BucketsRequestBuilder provides operations to manage the buckets property of the microsoft.graph.planner entity.
type BucketsRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string
// The request adapter to use to execute the requests.
requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter
// Url template to use to build the URL for the current request builder
urlTemplate string
}
// BucketsRequestBuilderGetQueryParameters read-only. Nullable. Returns a collection of the specified buckets
type BucketsRequestBuilderGetQueryParameters struct {
// Include count of items
Count *bool `uriparametername:"%24count"`
// Expand related entities
Expand []string `uriparametername:"%24expand"`
// Filter items by property values
Filter *string `uriparametername:"%24filter"`
// Order items by property values
Orderby []string `uriparametername:"%24orderby"`
// Search items by search phrases
Search *string `uriparametername:"%24search"`
// Select properties to be returned
Select []string `uriparametername:"%24select"`
// Skip the first n items
Skip *int32 `uriparametername:"%24skip"`
// Show only the first n items
Top *int32 `uriparametername:"%24top"`
}
// BucketsRequestBuilderGetRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type BucketsRequestBuilderGetRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
// Request query parameters
QueryParameters *BucketsRequestBuilderGetQueryParameters
}
// BucketsRequestBuilderPostRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type BucketsRequestBuilderPostRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
}
// NewBucketsRequestBuilderInternal instantiates a new BucketsRequestBuilder and sets the default values.
func NewBucketsRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*BucketsRequestBuilder) {
m := &BucketsRequestBuilder{
}
m.urlTemplate = "{+baseurl}/planner/buckets{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewBucketsRequestBuilder instantiates a new BucketsRequestBuilder and sets the default values.
func NewBucketsRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*BucketsRequestBuilder)
|
// Count the count property
func (m *BucketsRequestBuilder) Count()(*i8d5205674f9abadf10cf6dffe9b3020acaf534e108f0583a0e0ec9e75af82808.CountRequestBuilder) {
return i8d5205674f9abadf10cf6dffe9b3020acaf534e108f0583a0e0ec9e75af82808.NewCountRequestBuilderInternal(m.pathParameters, m.requestAdapter);
}
// CreateGetRequestInformation read-only. Nullable. Returns a collection of the specified buckets
func (m *BucketsRequestBuilder) CreateGetRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreateGetRequestInformationWithRequestConfiguration(nil);
}
// CreateGetRequestInformationWithRequestConfiguration read-only. Nullable. Returns a collection of the specified buckets
func (m *BucketsRequestBuilder) CreateGetRequestInformationWithRequestConfiguration(requestConfiguration *BucketsRequestBuilderGetRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.GET
if requestConfiguration != nil {
if requestConfiguration.QueryParameters != nil {
requestInfo.AddQueryParameters(*(requestConfiguration.QueryParameters))
}
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// CreatePostRequestInformation create new navigation property to buckets for planner
func (m *BucketsRequestBuilder) CreatePostRequestInformation(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketable)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreatePostRequestInformationWithRequestConfiguration(body, nil);
}
// CreatePostRequestInformationWithRequestConfiguration create new navigation property to buckets for planner
func (m *BucketsRequestBuilder) CreatePostRequestInformationWithRequestConfiguration(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketable, requestConfiguration *BucketsRequestBuilderPostRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.POST
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", body)
if requestConfiguration != nil {
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// Get read-only. Nullable. Returns a collection of the specified buckets
func (m *BucketsRequestBuilder) Get()(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketCollectionResponseable, error) {
return m.GetWithRequestConfigurationAndResponseHandler(nil, nil);
}
// GetWithRequestConfigurationAndResponseHandler read-only. Nullable. Returns a collection of the specified buckets
func (m *BucketsRequestBuilder) GetWithRequestConfigurationAndResponseHandler(requestConfiguration *BucketsRequestBuilderGetRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketCollectionResponseable, error) {
requestInfo, err := m.CreateGetRequestInformationWithRequestConfiguration(requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreatePlannerBucketCollectionResponseFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketCollectionResponseable), nil
}
// Post create new navigation property to buckets for planner
func (m *BucketsRequestBuilder) Post(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketable)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketable, error) {
return m.PostWithRequestConfigurationAndResponseHandler(body, nil, nil);
}
// PostWithRequestConfigurationAndResponseHandler create new navigation property to buckets for planner
func (m *BucketsRequestBuilder) PostWithRequestConfigurationAndResponseHandler(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketable, requestConfiguration *BucketsRequestBuilderPostRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketable, error) {
requestInfo, err := m.CreatePostRequestInformationWithRequestConfiguration(body, requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreatePlannerBucketFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.PlannerBucketable), nil
}
|
{
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewBucketsRequestBuilderInternal(urlParams, requestAdapter)
}
|
system_tests_console.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import errno
import re
import time
import unittest
from subprocess import PIPE
import subprocess
from system_test import main_module, SkipIfNeeded, TestCase
from system_test import Qdrouterd, TIMEOUT, AsyncTestSender, AsyncTestReceiver
try:
import queue as Queue # 3.x
except ImportError:
import Queue as Queue # 2.7
from threading import Thread
from threading import Event
import uuid
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
class ConsolePreReq(object):
@staticmethod
def is_cmd(name):
''' determine if a command is present and executes on the system '''
try:
devnull = open(os.devnull, "w")
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate()
except OSError as e:
if errno == errno.ENOENT:
return False
return True
@staticmethod
def should_skip():
try:
found_npm = ConsolePreReq.is_cmd('npm')
return not found_npm
except OSError:
return True
class ConsoleTest(TestCase):
"""Run npm console tests"""
@classmethod
def setUpClass(cls):
super(ConsoleTest, cls).setUpClass()
def router(name, mode, extra):
config = [
('router', {'mode': mode, 'id': name}),
('listener', {'role': 'normal', 'port': cls.tester.get_port()})
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
cls.routers = []
interrouter_port = cls.tester.get_port()
cls.http_port = cls.tester.get_port()
cls.sender_port = cls.tester.get_port()
|
cls.receiver_port = cls.tester.get_port()
router('A', 'interior',
[('listener', {'role': 'inter-router', 'port': interrouter_port}),
('listener', {'role': 'normal', 'port': cls.sender_port}),
('listener', {'role': 'normal', 'port': cls.http_port, 'http': True})])
cls.INT_A = cls.routers[0]
cls.INT_A.listener = cls.INT_A.addresses[0]
router('B', 'interior',
[('connector', {'name': 'connectorToA', 'role': 'inter-router',
'port': interrouter_port}),
('listener', {'role': 'normal', 'port': cls.receiver_port})])
cls.INT_B = cls.routers[1]
cls.INT_B.listener = cls.INT_B.addresses[0]
cls.INT_A.wait_router_connected('B')
cls.INT_B.wait_router_connected('A')
def run_console_test(self):
address = "toB"
# create a slow receiver so that we get delayedDeliveries
receiver = AsyncSlowReceiver(self.INT_B.listener, address)
sender = AsyncStopableSender(self.INT_A.listener, address)
pret = 0
out = ''
prg = ['npm', 'test', '--', '--watchAll=false']
p = self.popen(prg,
cwd=os.path.join(os.environ.get('BUILD_DIR'), 'console'),
env=dict(os.environ, TEST_PORT="%d" % self.http_port),
stdout=PIPE,
expect=None)
out = p.communicate()[0]
pret = p.returncode
# write the output
with open('run_console_test.out', 'w') as popenfile:
popenfile.write('returncode was %s\n' % p.returncode)
popenfile.write('out was:\n')
popenfile.writelines(str(out))
sender.stop()
receiver.stop()
time.sleep(1)
assert pret == 0, \
"console test exit status %d, output:\n%s" % (pret, out)
return out
# If we are unable to run the npm command. Skip the test
@SkipIfNeeded(ConsolePreReq.should_skip(), 'Test skipped: npm command not found')
def test_console(self):
self.run_console_test()
class AsyncStopableSender(AsyncTestSender):
def __init__(self, hostport, address):
super(AsyncStopableSender, self).__init__(hostport, address, 999999999)
self._stop_thread = False
self.sent = 0
def _main(self):
self._container.start()
while self._container.process():
if self._stop_thread:
if self._conn:
self._conn.close()
self._conn = None
def on_sendable(self, event):
self._sender.send(Message(body="message %d" % self.sent))
self.sent += 1
def stop(self, timeout=TIMEOUT):
self._stop_thread = True
self._container.wakeup()
self._thread.join(timeout=TIMEOUT)
if self._thread.is_alive():
raise Exception("AsyncStopableSender did not exit")
# Based on gsim's slow_recv.py
class TimedFlow(MessagingHandler):
def __init__(self, receiver, credit):
super(TimedFlow, self).__init__()
self.receiver = receiver
self.credit = credit
def on_timer_task(self, event):
self.receiver.flow(self.credit)
class AsyncSlowReceiver(AsyncTestReceiver):
def __init__(self, hostport, target):
super(AsyncSlowReceiver, self).__init__(hostport, target, msg_args={"prefetch": 0})
def on_link_opened(self, event):
super(AsyncSlowReceiver, self).on_link_opened(event)
self.request_batch(event)
def request_batch(self, event):
event.container.schedule(1, TimedFlow(event.receiver, 10))
def check_empty(self, receiver):
return not receiver.credit and not receiver.queued
def on_link_flow(self, event):
if self.check_empty(event.receiver):
self.request_batch(event)
def on_message(self, event):
print (event.message.body)
if self.check_empty(event.receiver):
self.request_batch(event)
if __name__ == '__main__':
unittest.main(main_module())
| |
RecommendationResolvers.ts
|
/**
*
* GENERATED FILE DO NOT EDIT CHANGES WITH BE OVERWRITEN EXTEND INSTEAD
*
|
import { UseGuards } from '@nestjs/common';
import { Resolver, Mutation,Query,Info, Args, Context, Parent } from '@nestjs/graphql';
import {
Recommendation,
RecommendationResponse,
RecommendationBatchResponse,
RecommendationCountResponse,
RecommendationListResponse,
AggregateRecommendationResponse,
// RecommendationCreateInput,
// RecommendationUpdateInput,
// RecommendationUpdateManyMutationInput,
// RecommendationWhereUniqueInput,
// RecommendationWhereInput,
// RecommendationOrderByInput,
// RecommendationScalarFieldEnum,
// BatchPayload,
} from '../../models/graphql';
import {PrismaClient } from '@mechsoft/prisma-client'
import { TenantContext } from '@mechsoft/common';
import {AuthorizerGuard} from '@mechsoft/enforcer';
@Resolver((of)=>Recommendation)
@UseGuards(AuthorizerGuard)
export class RecommendationResolver {
}
|
*/
|
main.go
|
package main
import (
"context"
"fmt"
"io"
"log"
"math/rand"
"net/http"
_ "net/http/pprof"
"os"
"reflect"
"runtime/pprof"
"strings"
"sync"
"time"
"github.com/dustin/go-humanize"
"github.com/kiteco/kiteco/kite-golib/lexicalv0"
"github.com/kiteco/kiteco/kite-golib/tensorflow"
"github.com/alexflint/go-arg"
"github.com/kiteco/kiteco/kite-go/lang/lexical/lexicalcomplete/api"
"github.com/kiteco/kiteco/kite-go/lang/lexical/lexicalmodels"
"github.com/kiteco/kiteco/kite-golib/complete/data"
"github.com/kiteco/kiteco/kite-golib/kitectx"
"github.com/kiteco/kiteco/kite-golib/lexicalv0/inspect"
"github.com/kiteco/kiteco/kite-golib/lexicalv0/status"
"github.com/kiteco/kiteco/kite-golib/licensing"
)
const cursor = "$"
type arguments struct {
Batches int
BatchSize int
Language string
Local bool
MinLines int
CPUProfile string
MemProfile string
}
var (
args = arguments{
Batches: 10,
BatchSize: 1,
Language: "",
Local: false,
MinLines: 0,
}
language lexicalv0.LangGroup
)
type sample struct {
Path string
Code string
}
func getSamples() [][]sample {
codeGenerator, err := inspect.NewCodeGenerator(language, args.Local, cursor)
if err != nil {
log.Fatal(err)
}
defer codeGenerator.Close()
var batches [][]sample
for len(batches) < args.Batches+1 {
code, path, err := codeGenerator.Next()
if err != nil {
log.Fatal(err)
}
code = strings.Split(code, cursor)[0]
if strings.Count(code, "\n") < args.MinLines {
continue
}
if args.BatchSize == 1 {
batches = append(batches, []sample{{
Path: path,
Code: code,
}})
continue
}
if len(code) <= args.BatchSize {
continue
}
var batch []sample
j := rand.Intn(len(code) - args.BatchSize)
for k := 0; k < args.BatchSize; k++ {
batch = append(batch, sample{
Path: path,
Code: code[:j+k],
})
}
batches = append(batches, batch)
}
return batches
}
func main() {
arg.MustParse(&args)
language = lexicalv0.MustLangGroupFromName(args.Language)
batches := getSamples()
go http.ListenAndServe(":6060", nil)
rand.Seed(time.Now().UnixNano())
tensorflow.SetTensorflowThreadpoolSize(1)
var err error
var cpuw io.WriteCloser
var once sync.Once
if args.CPUProfile != "" {
cpuw, err = os.Create(args.CPUProfile)
if err != nil {
log.Fatalln(err)
}
defer cpuw.Close()
}
models, err := lexicalmodels.NewModels(lexicalmodels.DefaultModelOptions)
if err != nil {
log.Fatal(err)
}
completer := api.New(context.Background(), api.Options{Models: models}, licensing.Pro)
opts := api.CompleteOptions{
BlockDebug: true,
}
var start time.Time
for i, batch := range batches {
if i == 1 {
fmt.Println("warmed up!")
start = time.Now()
if args.CPUProfile != "" {
once.Do(func() {
pprof.StartCPUProfile(cpuw)
})
}
}
for _, sample := range batch {
buf := data.NewBuffer(sample.Code)
sel := data.Cursor(len(sample.Code))
request := data.APIRequest{
UMF: data.UMF{
Filename: sample.Path,
},
SelectedBuffer: buf.Select(sel),
}
completer.Complete(kitectx.Background(), opts, request, nil, nil)
}
}
fmt.Println(time.Since(start))
show("EmbedInitialContext", status.Stats.Percentiles(), status.EmbedInitialContextDuration.Values(), showDuration)
show("PartialRunQuery", status.Stats.Percentiles(), status.PartialRunQueryDuration.Values(), showDuration)
show("PartialRunOverlapDist", status.Stats.Percentiles(), status.PartialRunOverlapDist.Values(), showInt)
show("CloseState", status.Stats.Percentiles(), status.ClosePartialRunDuration.Values(), showDuration)
show("NewPredictState", status.Stats.Percentiles(), status.NewPredictStateDuration.Values(), showDuration)
show("Search", status.Stats.Percentiles(), status.SearchDuration.Values(), showDuration)
show("Prettify", status.Stats.Percentiles(), status.PrettifyDuration.Values(), showDuration)
show("FormatCompletion", status.Stats.Percentiles(), status.FormatCompletionDuration.Values(), showDuration)
show("FormatBytes", status.Stats.Percentiles(), status.FormatBytes.Values(), showBytes)
show("Completion API", api.Stats.Percentiles(), api.CompletionDuration.Values(), showDuration)
fmt.Printf("\nPRM reuse rate: %.1f%%\n", status.PartialRunReuseRate.Value())
fmt.Printf("Prediction reuse rate: %.1f%%\n", status.PredictionReuseRate.Value())
if args.CPUProfile != "" {
pprof.StopCPUProfile()
}
if args.MemProfile != "" {
memw, err := os.Create(args.MemProfile)
if err != nil {
log.Fatalln(err)
}
defer memw.Close()
if err := pprof.WriteHeapProfile(memw); err != nil {
log.Fatal("could not write memory profile: ", err)
}
}
}
type showType string
var (
showDuration showType = "duration"
showBytes showType = "bytes"
showInt showType = "int"
)
func show(label string, percentiles []float64, values []int64, t showType)
|
{
zeros := make([]int64, len(values))
if reflect.DeepEqual(values, zeros) {
fmt.Printf("=== %s: no values, skipping\n\n", label)
return
}
fmt.Printf("=== %s\n", label)
for i, percentile := range percentiles {
switch t {
case showBytes:
fmt.Printf("%dth percentile: %v\n", int(100*percentile), humanize.Bytes(uint64(values[i])))
case showDuration:
fmt.Printf("%dth percentile: %v\n", int(100*percentile), time.Duration(values[i]))
case showInt:
fmt.Printf("%dth percentile: %v\n", int(100*percentile), values[i])
}
}
fmt.Println()
}
|
|
scan.go
|
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scan
import (
"errors"
"fmt"
"github.com/gomodule/redigo/redis"
)
//1) 1) "mystream"
// 2) 1) 1) 1519073278252-0
// 2) 1) "foo"
// 2) "value_1"
// 2) 1) 1519073279157-0
// 2) 1) "foo"
// 2) "value_2"
type StreamElements []StreamElement
type StreamElement struct {
// Name is the stream name
Name string
// Items is the stream items (ID and list of field-value pairs)
Items []StreamItem
}
type StreamItem struct {
// ID is the item ID
ID string
// FieldValue represent the unscan list of field-value pairs
FieldValues []string
}
func ScanXReadReply(src []interface{}, dst StreamElements) (StreamElements, error) {
if dst == nil || len(dst) != len(src) {
dst = make(StreamElements, len(src))
}
for i, stream := range src {
elem, err := redis.Values(stream, nil)
if err != nil {
return nil, err
}
if len(elem) != 2 {
return nil, fmt.Errorf("unexpected stream element slice length (%d)", len(elem))
}
name, err := redis.String(elem[0], nil)
if err != nil {
return nil, err
}
dst[i].Name = name
items, err := redis.Values(elem[1], nil)
if err != nil {
return nil, err
}
if len(dst[i].Items) != len(items) {
// Reallocate
dst[i].Items = make([]StreamItem, len(items))
}
for j, rawitem := range items {
item, err := redis.Values(rawitem, nil)
if err != nil {
return nil, err
}
if len(item) != 2 {
return nil, fmt.Errorf("unexpected stream item slice length (%d)", len(elem))
}
id, err := redis.String(item[0], nil)
if err != nil {
return nil, err
}
dst[i].Items[j].ID = id
fvs, err := redis.Values(item[1], nil)
if err != nil {
return nil, err
}
if len(dst[i].Items[j].FieldValues) != len(fvs) {
// Reallocate
dst[i].Items[j].FieldValues = make([]string, len(fvs))
}
for k, rawfv := range fvs {
fv, err := redis.String(rawfv, nil)
if err != nil {
return nil, err
}
dst[i].Items[j].FieldValues[k] = fv
}
}
}
return dst, nil
}
//XINFO GROUPS mystream
//1) 1) name
//2) "mygroup"
//3) consumers
//4) (integer) 2
//5) pending
//6) (integer) 2
//7) last-delivered-id
//8) "1588152489012-0"
//1) 1) name
//2) "some-other-group"
//3) consumers
//4) (integer) 1
//5) pending
//6) (integer) 0
//7) last-delivered-id
//8) "1588152498034-0"
type StreamGroups map[string]StreamGroup
type StreamGroup struct {
// Consumers is the number of consumers in the group
Consumers int
// Pending is the number of the pending messages (not ACKED)
Pending int
// LastDeliveredId is the ID of the last delivered item
LastDeliveredId string
}
func ScanXInfoGroupReply(reply interface{}, err error) (StreamGroups, error) {
if err != nil {
return nil, err
}
groups, err := redis.Values(reply, nil)
if err != nil {
return nil, errors.New("expected a reply of type array")
}
dst := make(StreamGroups)
for _, group := range groups {
entries, err := redis.Values(group, nil)
if err != nil {
return nil, err
}
if len(entries) != 8 {
return nil, fmt.Errorf("unexpected group reply size (%d)", len(entries))
}
name, err := redis.String(entries[1], nil)
if err != nil {
return nil, err
}
consumers, err := redis.Int(entries[3], nil)
if err != nil {
return nil, err
}
pending, err := redis.Int(entries[5], nil)
if err != nil {
return nil, err
}
lastid, err := redis.String(entries[7], nil)
if err != nil {
return nil, err
}
dst[name] = StreamGroup{
Consumers: consumers,
Pending: pending,
LastDeliveredId: lastid,
}
}
return dst, nil
}
//XPENDING mystream mygroup [<start-id> <end-id> <count> [<consumer-name>]]
//1) 1) 1526569498055-0
// 2) "Bob"
// 3) (integer) 74170458
// 4) (integer) 1
//2) 1) 1526569506935-0
// 2) "Bob"
// 3) (integer) 74170458
// 4) (integer) 1
type PendingMessages []PendingMessage
type PendingMessage struct {
// MessageID is the ID of the pending message
MessageID string
// ConsumerName is the name of the consumer who was sent the pending message
ConsumerName string
// IdleTime is how much milliseconds have passed since the last time the message was delivered to the consumer
IdleTime int
// NumberOfDeliveries is the number of times that this message was delivered to the consumer
DeliveryCount int
}
func ScanXPendingReply(reply interface{}, err error) (PendingMessages, error) {
if err != nil {
return nil, err
}
pendingmessages, err := redis.Values(reply, nil)
if err != nil {
return nil, errors.New("expected a reply of type array")
}
dst := make(PendingMessages, len(pendingmessages))
for i, pendingmessage := range pendingmessages {
elem, err := redis.Values(pendingmessage, nil)
if err != nil {
return nil, err
}
if len(elem) != 4
|
msgid, err := redis.String(elem[0], nil)
if err != nil {
return nil, err
}
name, err := redis.String(elem[1], nil)
if err != nil {
return nil, err
}
idletime, err := redis.Int(elem[2], nil)
if err != nil {
return nil, err
}
delcount, err := redis.Int(elem[3], nil)
if err != nil {
return nil, err
}
dst[i] = PendingMessage{
MessageID: msgid,
ConsumerName: name,
IdleTime: idletime,
DeliveryCount: delcount,
}
}
return dst, nil
}
|
{
return nil, fmt.Errorf("unexpected pending message element slice length (%d)", len(elem))
}
|
test_AlignIO.py
|
# Copyright 2008 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for AlignIO module."""
import unittest
import warnings
from io import StringIO
from Bio import AlignIO
from Bio import SeqIO
from Bio.Align import AlignInfo
from Bio.Align import MultipleSeqAlignment
from Bio.Data import IUPACData
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
test_write_read_alignment_formats = sorted(AlignIO._FormatToWriter)
test_write_read_align_with_seq_count = test_write_read_alignment_formats + [
"fasta",
"tab",
]
class TestAlignIO_exceptions(unittest.TestCase):
t_formats = list(AlignIO._FormatToWriter) + list(SeqIO._FormatToWriter)
def test_phylip_reject_duplicate(self):
"""Check that writing duplicated IDs after truncation fails for PHYLIP."""
handle = StringIO()
sequences = [
SeqRecord(Seq("AAAA"), id="longsequencename1"),
SeqRecord(Seq("AAAA"), id="longsequencename2"),
SeqRecord(Seq("AAAA"), id="other_sequence"),
]
alignment = MultipleSeqAlignment(sequences)
with self.assertRaises(ValueError) as cm:
AlignIO.write(alignment, handle, "phylip")
self.assertEqual(
"Repeated name 'longsequen' (originally 'longsequencename2'), possibly due to truncation",
str(cm.exception),
)
def test_parsing_empty_files(self):
"""Check that parsing an empty file returns an empty list."""
for t_format in AlignIO._FormatToIterator:
handle = StringIO()
alignments = list(AlignIO.parse(handle, t_format))
self.assertEqual(alignments, [])
def test_writing_empty_files(self):
"""Check that writers can cope with no alignments."""
for t_format in self.t_formats:
handle = StringIO()
number = AlignIO.write([], handle, t_format)
self.assertEqual(number, 0)
def test_writing_not_alignments(self):
"""Check that writers reject records that are not alignments."""
path = "Clustalw/opuntia.aln"
records = list(AlignIO.read(path, "clustal"))
for t_format in self.t_formats:
handle = StringIO()
self.assertRaises(Exception, AlignIO.write, [records], handle, t_format)
class TestAlignIO_reading(unittest.TestCase):
def simple_alignment_comparison(self, alignments, alignments2, fmt):
self.assertEqual(len(alignments), len(alignments2))
for a1, a2 in zip(alignments, alignments2):
self.assertEqual(a1.get_alignment_length(), a2.get_alignment_length())
self.assertEqual(len(a1), len(a2))
for r1, r2 in zip(a1, a2):
# Check the bare minimum (ID and sequence) as
# many formats can't store more than that.
# Check the sequence:
self.assertEqual(r1.seq, r2.seq)
# Beware of different quirks and limitations in the
# valid character sets and the identifier lengths!
if fmt in ["phylip", "phylip-sequential"]:
id1 = r1.id.replace("[", "").replace("]", "")[:10]
elif fmt == "phylip-relaxed":
id1 = r1.id.replace(" ", "").replace(":", "|")
elif fmt == "clustal":
id1 = r1.id.replace(" ", "_")[:30]
elif fmt in ["stockholm", "maf"]:
id1 = r1.id.replace(" ", "_")
elif fmt == "fasta":
id1 = r1.id.split()[0]
else:
id1 = r1.id
id2 = r2.id
self.assertEqual(id1, id2)
def check_reverse_write_read(self, alignments, indent=" "):
alignments.reverse()
for fmt in test_write_read_align_with_seq_count:
records_per_alignment = len(alignments[0])
for a in alignments:
if records_per_alignment != len(a):
records_per_alignment = None
# Can we expect this format to work?
if (
not records_per_alignment
and fmt not in test_write_read_alignment_formats
):
continue
# Going to write to a handle...
handle = StringIO()
if fmt == "nexus":
with self.assertRaises(ValueError) as cm:
c = AlignIO.write(alignments, handle=handle, format=fmt)
self.assertEqual(
"We can only write one Alignment to a Nexus file.",
str(cm.exception),
)
continue
c = AlignIO.write(alignments, handle=handle, format=fmt)
self.assertEqual(c, len(alignments))
# First, try with the seq_count
if records_per_alignment:
handle.flush()
handle.seek(0)
alignments2 = list(
AlignIO.parse(
handle=handle, format=fmt, seq_count=records_per_alignment
)
)
self.simple_alignment_comparison(alignments, alignments2, fmt)
if fmt in test_write_read_alignment_formats:
# Don't need the seq_count
handle.flush()
handle.seek(0)
alignments2 = list(AlignIO.parse(handle=handle, format=fmt))
self.simple_alignment_comparison(alignments, alignments2, fmt)
# Try writing just one Alignment (not a list)
handle = StringIO()
AlignIO.write(alignments[0:1], handle, fmt)
self.assertEqual(handle.getvalue(), format(alignments[0], fmt))
def check_iterator_for_loop_handle(self, path, fmt, length, m=None):
# Try using the iterator with a for loop and a handle
with open(path) as handle:
alignments = list(AlignIO.parse(handle, format=fmt))
self.assertEqual(len(alignments), length)
if m is not None:
for alignment in alignments:
self.assertEqual(len(alignment), m)
return alignments
def check_iterator_for_loop_filename(self, path, fmt, length):
# Try using the iterator with a for loop and a filename not handle
counter = 0
for record in AlignIO.parse(path, format=fmt):
counter += 1
self.assertEqual(counter, length)
def check_iterator_next(self, path, fmt, length):
# Try using the iterator with the next() method
counter = 0
alignments = AlignIO.parse(path, format=fmt)
while True:
try:
alignment = next(alignments)
except StopIteration:
break
self.assertIsNotNone(alignment)
counter += 1
self.assertEqual(counter, length)
def check_iterator_next_and_list(self, path, fmt, length):
# Try a mixture of next() and list
counter = 0
alignments = AlignIO.parse(path, format=fmt)
alignment = next(alignments)
counter = 1
counter += len(list(alignments))
self.assertEqual(counter, length)
def check_iterator_next_for_loop(self, path, fmt, length):
# Try a mixture of next() and for loop
alignments = AlignIO.parse(path, format=fmt)
alignment = next(alignments)
counter = 1
for alignment in alignments:
counter += 1
self.assertEqual(counter, length)
def check_write_three_times_and_read(self, path, fmt, m):
with open(path) as handle:
data = handle.read()
handle = StringIO()
handle.write(data + "\n\n" + data + "\n\n" + data)
handle.seek(0)
self.assertEqual(
len(list(AlignIO.parse(handle=handle, format=fmt, seq_count=m))), 3
)
handle.close()
def check_read(self, path, fmt, m, k):
# Check Bio.AlignIO.read(...)
with open(path) as handle:
alignment = AlignIO.read(handle, format=fmt)
self.assertIsInstance(alignment, MultipleSeqAlignment)
self.assertEqual(len(alignment), m)
self.assertEqual(alignment.get_alignment_length(), k)
return alignment
def check_read_fails(self, path, fmt):
with open(path) as handle:
self.assertRaises(ValueError, AlignIO.read, handle, format=fmt)
def check_alignment_rows(self, alignment, sequences, column_annotations=None):
max_len = 40
items = []
for record in alignment:
name = record.id
sequence = record.seq
if len(sequence) > max_len:
sequence = sequence[: max_len - 6] + "..." + sequence[-3:]
item = (name, sequence)
items.append(item)
self.assertEqual(sequences, sorted(items))
if column_annotations is None:
self.assertEqual(alignment.column_annotations, {})
else:
self.assertEqual(alignment.column_annotations, column_annotations)
def check_alignment_columns(self, alignment, columns):
alignment_len = alignment.get_alignment_length()
# Compare each sequence column
for index in range(min(5, alignment_len)):
self.assertEqual(alignment[:, index], columns[index])
if alignment_len > 5:
self.assertEqual(alignment[:, -1], columns[-1])
def check_summary_simple(self, alignment):
summary = AlignInfo.SummaryInfo(alignment)
dumb_consensus = summary.dumb_consensus()
# gap_consensus = summary.gap_consensus()
def check_summary(self, alignment, molecule_type):
# Check AlignInfo.SummaryInfo likes the alignment; smoke test only
if molecule_type == "DNA":
letters = IUPACData.unambiguous_dna_letters
all_letters = IUPACData.ambiguous_dna_letters
elif molecule_type == "RNA":
letters = IUPACData.unambiguous_rna_letters
all_letters = IUPACData.ambiguous_rna_letters
elif molecule_type == "protein":
letters = IUPACData.protein_letters
all_letters = IUPACData.protein_letters
else:
raise ValueError(f"Unknown molecule type '{molecule_type}'")
summary = AlignInfo.SummaryInfo(alignment)
dumb_consensus = summary.dumb_consensus()
# gap_consensus = summary.gap_consensus()
pssm = summary.pos_specific_score_matrix()
rep_dict = summary.replacement_dictionary(skip_chars=None, letters=letters)
e_freq = 1.0 / len(letters)
all_letters = all_letters.upper() + all_letters.lower()
e_freq_table = dict.fromkeys(all_letters, e_freq)
info_content = summary.information_content(
e_freq_table=e_freq_table, chars_to_ignore=["N", "X"]
)
def check_summary_pir(self, alignment):
letters = IUPACData.unambiguous_dna_letters
summary = AlignInfo.SummaryInfo(alignment)
dumb_consensus = summary.dumb_consensus()
# gap_consensus = summary.gap_consensus()
pssm = summary.pos_specific_score_matrix()
rep_dict = summary.replacement_dictionary(skip_chars=None, letters=letters)
e_freq = 1.0 / len(letters)
all_letters = letters.upper() + letters.lower()
e_freq_table = dict.fromkeys(all_letters, e_freq)
info_content = summary.information_content(e_freq_table=e_freq_table)
def test_reading_alignments_clustal1(self):
path = "Clustalw/clustalw.aln"
self.check_iterator_for_loop_handle(path, "clustal", 1, 2)
self.check_iterator_for_loop_filename(path, "clustal", 1)
self.check_iterator_next(path, "clustal", 1)
self.check_iterator_next_and_list(path, "clustal", 1)
self.check_iterator_next_for_loop(path, "clustal", 1)
self.check_write_three_times_and_read(path, "clustal", 2)
alignment = self.check_read(path, "clustal", 2, 601)
self.check_alignment_rows(
alignment,
[
(
"gi|4959044|gb|AAD34209.1|AF069",
"MENSDSNDKGSDQSAAQRRSQMDRLDREEAFYQF...SVV",
),
(
"gi|671626|emb|CAA85685.1|",
"---------MSPQTETKASVGFKAGVKEYKLTYY...---",
),
],
{
"clustal_consensus": " * *: :: :. :* : :. : . :* :: .: ** **:... *.*** .. .:* * *: .* :* : :* .* *::. . .:: :*..* :* .* .. . : . : *. .:: : . .* . : *.: ..:: * . :: : .*. :. :. . . .* **.*.. :.. *.. . . ::* :.: .*: : * :: *** . * :. . . : *: .:: ::: .. . : : :: * * : .. :.* . ::. :: * : : * * :.. * .. * :** . .:. .. :*. ..: :. . .:* * : : * . ..*:. .** *.*... : :: :* .* ::* : :. :. : "
},
)
self.check_summary(alignment, "protein")
def test_reading_alignments_clustal2(self):
path = "Clustalw/opuntia.aln"
self.check_iterator_for_loop_handle(path, "clustal", 1, 7)
self.check_iterator_for_loop_filename(path, "clustal", 1)
self.check_iterator_next(path, "clustal", 1)
self.check_iterator_next_and_list(path, "clustal", 1)
self.check_iterator_next_for_loop(path, "clustal", 1)
self.check_write_three_times_and_read(path, "clustal", 7)
alignment = self.check_read(path, "clustal", 7, 156)
self.check_alignment_columns(
alignment,
["TTTTTTT", "AAAAAAA", "TTTTTTT", "AAAAAAA", "CCCCCCC", "AAAAAAA"],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_clustal3(self):
path = "Clustalw/hedgehog.aln"
self.check_iterator_for_loop_handle(path, "clustal", 1, 5)
self.check_iterator_for_loop_filename(path, "clustal", 1)
self.check_iterator_next(path, "clustal", 1)
self.check_iterator_next_and_list(path, "clustal", 1)
self.check_iterator_next_for_loop(path, "clustal", 1)
self.check_write_three_times_and_read(path, "clustal", 5)
alignment = self.check_read(path, "clustal", 5, 447)
self.check_alignment_columns(
alignment, ["M----", "F----", "N----", "L----", "V----", "---SS"]
)
self.check_summary(alignment, "protein")
def test_reading_alignments_clustal4(self):
path = "Clustalw/odd_consensus.aln"
self.check_iterator_for_loop_handle(path, "clustal", 1, 2)
self.check_iterator_for_loop_filename(path, "clustal", 1)
self.check_iterator_next(path, "clustal", 1)
self.check_iterator_next_and_list(path, "clustal", 1)
self.check_iterator_next_for_loop(path, "clustal", 1)
self.check_write_three_times_and_read(path, "clustal", 2)
alignment = self.check_read(path, "clustal", 2, 687)
self.check_alignment_rows(
alignment,
[
("AT3G20900.1-CDS", "----------------------------------...TAG"),
("AT3G20900.1-SEQ", "ATGAACAAAGTAGCGAGGAAGAACAAAACATCAG...TAG"),
],
{
"clustal_consensus": " * * *** ***** * * ** *******************************************************************************************************************************************************************************"
},
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_clustal5(self):
path = "Clustalw/protein.aln"
self.check_iterator_for_loop_handle(path, "clustal", 1, 20)
self.check_iterator_for_loop_filename(path, "clustal", 1)
self.check_iterator_next(path, "clustal", 1)
self.check_iterator_next_and_list(path, "clustal", 1)
self.check_iterator_next_for_loop(path, "clustal", 1)
self.check_write_three_times_and_read(path, "clustal", 20)
alignment = self.check_read(path, "clustal", 20, 411)
self.check_alignment_columns(
alignment,
[
"-M------------------",
"-T------------------",
"-V------------------",
"-L-----------------M",
"-E---------------MMS",
"-------------------T",
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_clustal6(self):
path = "Clustalw/promals3d.aln"
self.check_iterator_for_loop_handle(path, "clustal", 1, 20)
self.check_iterator_for_loop_filename(path, "clustal", 1)
self.check_iterator_next(path, "clustal", 1)
self.check_iterator_next_and_list(path, "clustal", 1)
self.check_iterator_next_for_loop(path, "clustal", 1)
self.check_write_three_times_and_read(path, "clustal", 20)
alignment = self.check_read(path, "clustal", 20, 414)
self.check_alignment_columns(
alignment,
[
"MMMMMMMMMMMMMMMM-M--",
"-----------------T--",
"-----------------V--",
"-----------------L--",
"-S---------------E--",
"-T------------------",
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_fasta(self):
path = "GFF/multi.fna" # Trivial nucleotide alignment
self.check_iterator_for_loop_handle(path, "fasta", 1, 3)
self.check_iterator_for_loop_filename(path, "fasta", 1)
self.check_iterator_next(path, "fasta", 1)
self.check_iterator_next_and_list(path, "fasta", 1)
self.check_iterator_next_for_loop(path, "fasta", 1)
self.check_write_three_times_and_read(path, "fasta", 3)
alignment = self.check_read(path, "fasta", 3, 8)
self.check_alignment_rows(
alignment,
[("test1", "ACGTCGCG"), ("test2", "GGGGCCCC"), ("test3", "AAACACAC")],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_nexus1(self):
path = "Nexus/test_Nexus_input.nex"
self.check_iterator_for_loop_handle(path, "nexus", 1, 9)
self.check_iterator_for_loop_filename(path, "nexus", 1)
self.check_iterator_next(path, "nexus", 1)
self.check_iterator_next_and_list(path, "nexus", 1)
self.check_iterator_next_for_loop(path, "nexus", 1)
alignment = self.check_read(path, "nexus", 9, 48)
self.check_alignment_columns(
alignment,
[
"AAAAAAAAc",
"-----c?tc",
"CCCCCCCCc",
"--c-?a-tc",
"GGGGGGGGc",
"tt--?ag?c",
],
)
self.check_summary_simple(alignment)
def test_reading_alignments_nexus2(self):
path = "Nexus/codonposset.nex"
self.check_iterator_for_loop_handle(path, "nexus", 1, 2)
self.check_iterator_for_loop_filename(path, "nexus", 1)
self.check_iterator_next(path, "nexus", 1)
self.check_iterator_next_and_list(path, "nexus", 1)
self.check_iterator_next_for_loop(path, "nexus", 1)
alignment = self.check_read(path, "nexus", 2, 22)
self.check_alignment_rows(
alignment,
[
("Aegotheles", "AAAAAGGCATTGTGGTGGGAAT"),
("Aerodramus", "?????????TTGTGGTGGGAAT"),
],
)
self.check_summary_simple(alignment)
def test_reading_alignments_msf1(self):
path = "msf/DOA_prot.msf"
with self.assertRaisesRegex(
ValueError,
"GCG MSF header said alignment length 62, "
"but 11 of 12 sequences said Len: 250",
):
AlignIO.read(path, "msf")
def test_reading_alignments_msf2(self):
path = "msf/W_prot.msf"
with warnings.catch_warnings(record=True) as w:
self.check_iterator_for_loop_handle(path, "msf", 1, 11)
self.check_iterator_for_loop_filename(path, "msf", 1)
self.check_iterator_next(path, "msf", 1)
self.check_iterator_next_and_list(path, "msf", 1)
self.check_iterator_next_for_loop(path, "msf", 1)
alignment = self.check_read(path, "msf", 11, 99)
warning_msgs = {str(_.message) for _ in w}
self.assertIn(
"One of more alignment sequences were truncated and have been gap padded",
warning_msgs,
)
self.check_alignment_columns(
alignment,
[
"GGGGGGGGGGG",
"LLLLLLLLLLL",
"TTTTTTTTTTT",
"PPPPPPPPPPP",
"FFFFFFSSSSS",
# ...
"LLLLLL----L",
],
)
self.check_summary_simple(alignment)
def test_reading_alignments_stockholm1(self):
path = "Stockholm/simple.sth"
self.check_iterator_for_loop_handle(path, "stockholm", 1, 2)
self.check_iterator_for_loop_filename(path, "stockholm", 1)
self.check_iterator_next(path, "stockholm", 1)
self.check_iterator_next_and_list(path, "stockholm", 1)
self.check_iterator_next_for_loop(path, "stockholm", 1)
self.check_write_three_times_and_read(path, "stockholm", 2)
alignment = self.check_read(path, "stockholm", 2, 104)
self.check_alignment_rows(
alignment,
[
("AE007476.1", "AAAAUUGAAUAUCGUUUUACUUGUUUAU-GUCGU...GAU"),
("AP001509.1", "UUAAUCGAGCUCAACACUCUUCGUAUAUCCUC-U...UGU"),
],
{
"secondary_structure": ".................<<<<<<<<...<<<<<<<........>>>>>>>........<<<<<<<.......>>>>>>>..>>>>>>>>..............."
},
)
self.check_summary(alignment, "RNA")
def test_reading_alignments_stockholm2(self):
path = "Stockholm/funny.sth"
self.check_iterator_for_loop_handle(path, "stockholm", 1, 6)
self.check_iterator_for_loop_filename(path, "stockholm", 1)
self.check_iterator_next(path, "stockholm", 1)
self.check_iterator_next_and_list(path, "stockholm", 1)
self.check_iterator_next_for_loop(path, "stockholm", 1)
self.check_write_three_times_and_read(path, "stockholm", 6)
alignment = self.check_read(path, "stockholm", 6, 43)
self.check_alignment_columns(
alignment, ["MMMEEE", "TQIVVV", "CHEMMM", "RVALLL", "ASDTTT", "SYSEEE"]
)
self.check_summary(alignment, "protein")
def test_reading_alignments_phylip1(self):
path = "Phylip/reference_dna.phy"
self.check_iterator_for_loop_handle(path, "phylip", 1, 6)
self.check_iterator_for_loop_filename(path, "phylip", 1)
self.check_iterator_next(path, "phylip", 1)
self.check_iterator_next_and_list(path, "phylip", 1)
self.check_iterator_next_for_loop(path, "phylip", 1)
self.check_write_three_times_and_read(path, "phylip", 6)
alignment = self.check_read(path, "phylip", 6, 13)
self.check_alignment_columns(
alignment, ["CCTTCG", "GGAAAG", "ATAAAC", "TTTTAA", "GAGGAG", "CTTTTC"]
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_phylip2(self):
path = "Phylip/reference_dna2.phy"
self.check_iterator_for_loop_handle(path, "phylip", 1, 6)
self.check_iterator_for_loop_filename(path, "phylip", 1)
self.check_iterator_next(path, "phylip", 1)
self.check_iterator_next_and_list(path, "phylip", 1)
self.check_iterator_next_for_loop(path, "phylip", 1)
self.check_write_three_times_and_read(path, "phylip", 6)
alignment = self.check_read(path, "phylip", 6, 39)
self.check_alignment_columns(
alignment, ["CCTTCG", "GGAAAG", "ATAAAC", "TTTTAA", "GAGGAG", "CTTTTC"]
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_phylip3(self):
path = "Phylip/hennigian.phy"
self.check_iterator_for_loop_handle(path, "phylip", 1, 10)
self.check_iterator_for_loop_filename(path, "phylip", 1)
self.check_iterator_next(path, "phylip", 1)
self.check_iterator_next_and_list(path, "phylip", 1)
self.check_iterator_next_for_loop(path, "phylip", 1)
self.check_write_three_times_and_read(path, "phylip", 10)
alignment = self.check_read(path, "phylip", 10, 40)
self.check_alignment_columns(
alignment,
[
"CCCCCAAAAA",
"AAAAACCCCC",
"CCCAAAAAAA",
"AAACCAAAAA",
"CCAAAAAAAA",
"AAAAAAAAAA",
],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_phylip4(self):
path = "Phylip/horses.phy"
self.check_iterator_for_loop_handle(path, "phylip", 1, 10)
self.check_iterator_for_loop_filename(path, "phylip", 1)
self.check_iterator_next(path, "phylip", 1)
self.check_iterator_next_and_list(path, "phylip", 1)
self.check_iterator_next_for_loop(path, "phylip", 1)
self.check_write_three_times_and_read(path, "phylip", 10)
alignment = self.check_read(path, "phylip", 10, 40)
self.check_alignment_columns(
alignment,
[
"AACCCCCCCC",
"AAAACCCCCC",
"AAAAAAAAAC",
"ACAAAAAAAA",
"ACACCCCCCC",
"AAAAAAAAAA",
],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_phylip5(self):
path = "Phylip/random.phy"
self.check_iterator_for_loop_handle(path, "phylip", 1, 10)
self.check_iterator_for_loop_filename(path, "phylip", 1)
self.check_iterator_next(path, "phylip", 1)
self.check_iterator_next_and_list(path, "phylip", 1)
self.check_iterator_next_for_loop(path, "phylip", 1)
self.check_write_three_times_and_read(path, "phylip", 10)
alignment = self.check_read(path, "phylip", 10, 40)
self.check_alignment_columns(
alignment,
[
"CAAAACAAAC",
"AACAACCACC",
"CAAAACAAAA",
"ACAACACACA",
"CCAAAACCAA",
"AAAAAAAAAA",
],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_phylip6(self):
path = "Phylip/interlaced.phy"
self.check_iterator_for_loop_handle(path, "phylip", 1, 3)
self.check_iterator_for_loop_filename(path, "phylip", 1)
self.check_iterator_next(path, "phylip", 1)
self.check_iterator_next_and_list(path, "phylip", 1)
self.check_iterator_next_for_loop(path, "phylip", 1)
self.check_write_three_times_and_read(path, "phylip", 3)
alignment = self.check_read(path, "phylip", 3, 384)
self.check_alignment_rows(
alignment,
[
("ALEU_HORVU", "MAHARVLLLALAVLATAAVAVASSSSFADSNPIR...VAA"),
("CATH_HUMAN", "------MWATLPLLCAGAWLLGV--------PVC...PLV"),
("CYS1_DICDI", "-----MKVILLFVLAVFTVFVSS-----------...I--"),
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_phylip7(self):
path = "Phylip/interlaced2.phy"
self.check_iterator_for_loop_handle(path, "phylip", 1, 4)
self.check_iterator_for_loop_filename(path, "phylip", 1)
self.check_iterator_next(path, "phylip", 1)
self.check_iterator_next_and_list(path, "phylip", 1)
self.check_iterator_next_for_loop(path, "phylip", 1)
self.check_write_three_times_and_read(path, "phylip", 4)
alignment = self.check_read(path, "phylip", 4, 131)
self.check_alignment_rows(
alignment,
[
("IXI_234", "TSPASIRPPAGPSSRPAMVSSRRTRPSPPGPRRP...SHE"),
("IXI_235", "TSPASIRPPAGPSSR---------RPSPPGPRRP...SHE"),
("IXI_236", "TSPASIRPPAGPSSRPAMVSSR--RPSPPPPRRP...SHE"),
("IXI_237", "TSPASLRPPAGPSSRPAMVSSRR-RPSPPGPRRP...SHE"),
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_phylip8(self):
|
def test_reading_alignments_phylip9(self):
path = "Phylip/sequential.phy"
self.check_iterator_for_loop_handle(path, "phylip-sequential", 1, 3)
self.check_iterator_for_loop_filename(path, "phylip-sequential", 1)
self.check_iterator_next(path, "phylip-sequential", 1)
self.check_iterator_next_and_list(path, "phylip-sequential", 1)
self.check_iterator_next_for_loop(path, "phylip-sequential", 1)
self.check_write_three_times_and_read(path, "phylip-sequential", 3)
alignment = self.check_read(path, "phylip-sequential", 3, 384)
self.check_alignment_rows(
alignment,
[
("ALEU_HORVU", "MAHARVLLLALAVLATAAVAVASSSSFADSNPIR...VAA"),
("CATH_HUMAN", "------MWATLPLLCAGAWLLGV--------PVC...PLV"),
("CYS1_DICDI", "-----MKVILLFVLAVFTVFVSS-----------...I--"),
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_phylip10(self):
path = "Phylip/sequential2.phy"
self.check_iterator_for_loop_handle(path, "phylip-sequential", 1, 4)
self.check_iterator_for_loop_filename(path, "phylip-sequential", 1)
self.check_iterator_next(path, "phylip-sequential", 1)
self.check_iterator_next_and_list(path, "phylip-sequential", 1)
self.check_iterator_next_for_loop(path, "phylip-sequential", 1)
self.check_write_three_times_and_read(path, "phylip-sequential", 4)
alignment = self.check_read(path, "phylip-sequential", 4, 131)
self.check_alignment_rows(
alignment,
[
("IXI_234", "TSPASIRPPAGPSSRPAMVSSRRTRPSPPGPRRP...SHE"),
("IXI_235", "TSPASIRPPAGPSSR---------RPSPPGPRRP...SHE"),
("IXI_236", "TSPASIRPPAGPSSRPAMVSSR--RPSPPPPRRP...SHE"),
("IXI_237", "TSPASLRPPAGPSSRPAMVSSRR-RPSPPGPRRP...SHE"),
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_emboss1(self):
path = "Emboss/alignret.txt"
self.check_iterator_for_loop_handle(path, "emboss", 1, 4)
self.check_iterator_for_loop_filename(path, "emboss", 1)
self.check_iterator_next(path, "emboss", 1)
self.check_iterator_next_and_list(path, "emboss", 1)
self.check_iterator_next_for_loop(path, "emboss", 1)
alignment = self.check_read(path, "emboss", 4, 131)
self.check_alignment_rows(
alignment,
[
("IXI_234", "TSPASIRPPAGPSSRPAMVSSRRTRPSPPGPRRP...SHE"),
("IXI_235", "TSPASIRPPAGPSSR---------RPSPPGPRRP...SHE"),
("IXI_236", "TSPASIRPPAGPSSRPAMVSSR--RPSPPPPRRP...SHE"),
("IXI_237", "TSPASLRPPAGPSSRPAMVSSRR-RPSPPGPRRP...SHE"),
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_emboss2(self):
path = "Emboss/needle.txt"
alignments = self.check_iterator_for_loop_handle(path, "emboss", 5, 2)
self.check_iterator_for_loop_filename(path, "emboss", 5)
self.check_iterator_next(path, "emboss", 5)
self.check_iterator_next_and_list(path, "emboss", 5)
self.check_iterator_next_for_loop(path, "emboss", 5)
self.check_read_fails(path, "emboss")
# Show the alignment
self.assertEqual(alignments[0].get_alignment_length(), 124)
self.assertEqual(alignments[1].get_alignment_length(), 119)
self.assertEqual(alignments[2].get_alignment_length(), 120)
self.assertEqual(alignments[3].get_alignment_length(), 118)
self.assertEqual(alignments[4].get_alignment_length(), 125)
self.check_alignment_rows(
alignments[0],
[
("gi|94968718|receiver", "-VLLADDHALVRRGFRLMLED--DPEIEIVAEAG...GET"),
("ref_rec", "KILIVDD----QYGIRILLNEVFNKEGYQTFQAA...---"),
],
)
self.check_alignment_rows(
alignments[1],
[
("gi|94968761|receiver", "-ILIVDDEANTLASLSRAFRLAGHEATVCDNAVR...LKR"),
("ref_rec", "KILIVDDQYGIRILLNEVFNKEGYQTFQAANGLQ...---"),
],
)
self.check_alignment_rows(
alignments[2],
[
("gi|94967506|receiver", "LHIVVVDDDPGTCVYIESVFAELGHTCKSFVRPE...HKE"),
("ref_rec", "-KILIVDDQYGIRILLNEVFNKEGYQTFQAANGL...---"),
],
)
self.check_alignment_rows(
alignments[3],
[
("gi|94970045|receiver", "-VLLVEDEEALRAAAGDFLETRGYKIMTARDGTE...EVL"),
("ref_rec", "KILIVDDQYGIRILLNEVFNKEGYQTFQAANGLQ...DAV"),
],
)
self.check_alignment_rows(
alignments[4],
[
("gi|94970041|receiver", "TVLLVEDEEGVRKLVRGILSRQGYHVLEATSGEE...KRQ"),
("ref_rec", "KILIVDDQYGIRILLNEVFNKEGYQTFQAANGLQ...---"),
],
)
self.check_summary(alignments[0], "protein")
self.check_reverse_write_read(alignments)
def test_reading_alignments_emboss3(self):
path = "Emboss/needle_asis.txt"
self.check_iterator_for_loop_handle(path, "emboss", 1, 2)
self.check_iterator_for_loop_filename(path, "emboss", 1)
self.check_iterator_next(path, "emboss", 1)
self.check_iterator_next_and_list(path, "emboss", 1)
self.check_iterator_next_for_loop(path, "emboss", 1)
alignment = self.check_read(path, "emboss", 2, 3653)
self.check_alignment_rows(
alignment,
[
("asis", "----------------------------------...GAA"),
("asis", "TATTTTTTGGATTTTTTTCTAGATTTTCTAGGTT...GAA"),
],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_emboss4(self):
path = "Emboss/water.txt"
self.check_iterator_for_loop_handle(path, "emboss", 1, 2)
self.check_iterator_for_loop_filename(path, "emboss", 1)
self.check_iterator_next(path, "emboss", 1)
self.check_iterator_next_and_list(path, "emboss", 1)
self.check_iterator_next_for_loop(path, "emboss", 1)
alignment = self.check_read(path, "emboss", 2, 131)
self.check_alignment_rows(
alignment,
[
("IXI_234", "TSPASIRPPAGPSSRPAMVSSRRTRPSPPGPRRP...SHE"),
("IXI_235", "TSPASIRPPAGPSSR---------RPSPPGPRRP...SHE"),
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_emboss5(self):
path = "Emboss/water2.txt"
self.check_iterator_for_loop_handle(path, "emboss", 1, 2)
self.check_iterator_for_loop_filename(path, "emboss", 1)
self.check_iterator_next(path, "emboss", 1)
self.check_iterator_next_and_list(path, "emboss", 1)
self.check_iterator_next_for_loop(path, "emboss", 1)
alignment = self.check_read(path, "emboss", 2, 18)
self.check_alignment_rows(
alignment, [("asis", "CGTTTGAGT-CTGGGATG"), ("asis", "CGTTTGAGTACTGGGATG")]
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_emboss6(self):
path = "Emboss/matcher_simple.txt"
self.check_iterator_for_loop_handle(path, "emboss", 1, 2)
self.check_iterator_for_loop_filename(path, "emboss", 1)
self.check_iterator_next(path, "emboss", 1)
self.check_iterator_next_and_list(path, "emboss", 1)
self.check_iterator_next_for_loop(path, "emboss", 1)
alignment = self.check_read(path, "emboss", 2, 16)
self.check_alignment_rows(
alignment,
[("AF069992_1", "GPPPQSPDENRAGESS"), ("CAA85685.1", "GVPPEEAGAAVAAESS")],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_emboss7(self):
path = "Emboss/matcher_pair.txt"
alignments = self.check_iterator_for_loop_handle(path, "emboss", 5, 2)
self.check_iterator_for_loop_filename(path, "emboss", 5)
self.check_iterator_next(path, "emboss", 5)
self.check_iterator_next_and_list(path, "emboss", 5)
self.check_iterator_next_for_loop(path, "emboss", 5)
self.check_read_fails(path, "emboss")
self.assertEqual(alignments[0].get_alignment_length(), 145)
self.assertEqual(alignments[1].get_alignment_length(), 13)
self.assertEqual(alignments[2].get_alignment_length(), 18)
self.assertEqual(alignments[3].get_alignment_length(), 10)
self.assertEqual(alignments[4].get_alignment_length(), 10)
self.check_alignment_rows(
alignments[0],
[
("HBA_HUMAN", "LSPADKTNVKAAWGKVGAHAGEYGAEALERMFLS...SKY"),
("HBB_HUMAN", "LTPEEKSAVTALWGKV--NVDEVGGEALGRLLVV...HKY"),
],
)
self.check_alignment_rows(
alignments[1],
[("HBA_HUMAN", "KKVADALTNAVAH"), ("HBB_HUMAN", "QKVVAGVANALAH")],
)
self.check_alignment_rows(
alignments[2],
[("HBA_HUMAN", "KLRVDPVNFKLLSHCLLV"), ("HBB_HUMAN", "KVNVDEVGGEALGRLLVV")],
)
self.check_alignment_rows(
alignments[3], [("HBA_HUMAN", "LSALSDLHAH"), ("HBB_HUMAN", "LGAFSDGLAH")]
)
self.check_alignment_rows(
alignments[4], [("HBA_HUMAN", "VKAAWGKVGA"), ("HBB_HUMAN", "VQAAYQKVVA")]
)
self.check_summary(alignments[0], "protein")
self.check_reverse_write_read(alignments)
def test_reading_alignments_emboss8(self):
path = "Emboss/emboss_pair_aln_full_blank_line.txt"
self.check_iterator_for_loop_handle(path, "emboss", 1, 2)
self.check_iterator_for_loop_filename(path, "emboss", 1)
self.check_iterator_next(path, "emboss", 1)
self.check_iterator_next_and_list(path, "emboss", 1)
self.check_iterator_next_for_loop(path, "emboss", 1)
alignment = self.check_read(path, "emboss", 2, 1450)
self.check_alignment_rows(
alignment,
[
(
"hg38_chrX_131691529_131830643_47210_48660",
"GGCAGGTGCATAGCTTGAGCCTAGGAGTTCAAGT...AAA",
),
(
"mm10_chrX_50555743_50635321_27140_27743",
"G--------------------------TTCAAGG...AAA",
),
],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_fasta_m10_1(self):
path = "Fasta/output001.m10"
alignments = self.check_iterator_for_loop_handle(path, "fasta-m10", 4, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 4)
self.check_iterator_next(path, "fasta-m10", 4)
self.check_iterator_next_and_list(path, "fasta-m10", 4)
self.check_iterator_next_for_loop(path, "fasta-m10", 4)
self.check_read_fails(path, "fasta-m10")
self.assertEqual(alignments[0].get_alignment_length(), 108)
self.check_alignment_rows(
alignments[0],
[
(
"gi|10955263|ref|NP_052604.1|",
"SGSNT-RRRAISRPVRLTAEED---QEIRKRAAE...LSR",
),
(
"gi|152973457|ref|YP_001338508.1|",
"AGSGAPRRRGSGLASRISEQSEALLQEAAKHAAE...LSR",
),
],
)
self.assertEqual(alignments[1].get_alignment_length(), 64)
self.check_alignment_rows(
alignments[1],
[
(
"gi|10955263|ref|NP_052604.1|",
"AAECGKTVSGFLRAAALGKKVNSLTDDRVLKEV-...AIT",
),
(
"gi|152973588|ref|YP_001338639.1|",
"ASRQGCTVGG--KMDSVQDKASDKDKERVMKNIN...TLT",
),
],
)
self.assertEqual(alignments[2].get_alignment_length(), 38)
self.check_alignment_rows(
alignments[2],
[
(
"gi|10955264|ref|NP_052605.1|",
"MKKDKKYQIEAIKNKDKTLFIVYATDIYSPSEFFSKIE",
),
(
"gi|152973462|ref|YP_001338513.1|",
"IKKDLGVSFLKLKNREKTLIVDALKKKYPVAELLSVLQ",
),
],
)
self.assertEqual(alignments[3].get_alignment_length(), 43)
self.check_alignment_rows(
alignments[3],
[
(
"gi|10955265|ref|NP_052606.1|",
"SELHSKLPKSIDKIHEDIKKQLSC-SLIMKKIDV...TYC",
),
(
"gi|152973545|ref|YP_001338596.1|",
"SRINSDVARRIPGIHRDPKDRLSSLKQVEEALDM...EYC",
),
],
)
self.check_summary(alignments[0], "protein")
self.check_reverse_write_read(alignments)
def test_reading_alignments_fasta_m10_2(self):
path = "Fasta/output002.m10"
alignments = self.check_iterator_for_loop_handle(path, "fasta-m10", 6, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 6)
self.check_iterator_next(path, "fasta-m10", 6)
self.check_iterator_next_and_list(path, "fasta-m10", 6)
self.check_iterator_next_for_loop(path, "fasta-m10", 6)
self.check_read_fails(path, "fasta-m10")
self.assertEqual(alignments[0].get_alignment_length(), 88)
self.check_alignment_rows(
alignments[0],
[
(
"gi|10955263|ref|NP_052604.1|",
"SGSNTRRRAISRPVR--LTAEEDQEIRKRAAECG...AEV",
),
(
"gi|162139799|ref|NP_309634.2|",
"SQRSTRRKPENQPTRVILFNKPYDVLPQFTDEAG...VQV",
),
],
)
self.assertEqual(alignments[1].get_alignment_length(), 53)
self.check_alignment_rows(
alignments[1],
[
(
"gi|10955263|ref|NP_052604.1|",
"EIRKRAAECGKTVSGFLRAAA-LGKKV----NSL...KKL",
),
(
"gi|15831859|ref|NP_310632.1|",
"EIKPRGTSKGEAIAAFMQEAPFIGRTPVFLGDDL...VKI",
),
],
)
self.assertEqual(alignments[2].get_alignment_length(), 92)
self.check_alignment_rows(
alignments[2],
[
(
"gi|10955264|ref|NP_052605.1|",
"SEFFSKIESDLKKKKSKGDVFFDLIIPNG-----...ATS",
),
(
"gi|15829419|ref|NP_308192.1|",
"TELNSELAKAMKVDAQRG-AFVSQVLPNSSAAKA...QSS",
),
],
)
self.assertEqual(alignments[5].get_alignment_length(), 157)
self.check_alignment_rows(
alignments[5],
[
(
"gi|10955265|ref|NP_052606.1|",
"QYIMTTSNGDRVRAKIYKRGSIQFQGKYLQIASL...REI",
),
(
"gi|15833861|ref|NP_312634.1|",
"EFIRLLSDHDQFEKDQISELTVAANALKLEVAK-...KKV",
),
],
)
self.check_summary(alignments[0], "protein")
self.check_reverse_write_read(alignments)
def test_reading_alignments_fasta_m10_3(self):
path = "Fasta/output003.m10"
alignments = self.check_iterator_for_loop_handle(path, "fasta-m10", 3, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 3)
self.check_iterator_next(path, "fasta-m10", 3)
self.check_iterator_next_and_list(path, "fasta-m10", 3)
self.check_iterator_next_for_loop(path, "fasta-m10", 3)
self.check_read_fails(path, "fasta-m10")
self.assertEqual(alignments[0].get_alignment_length(), 55)
self.check_alignment_rows(
alignments[0],
[
(
"gi|10955263|ref|NP_052604.1|",
"VRLTAEEDQ--EIRKRAAECG-KTVSGFLRAAAL...LGA",
),
(
"gi|152973837|ref|YP_001338874.1|",
"ISISNNKDQYEELQKEQGERDLKTVDQLVRIAAA...IAA",
),
],
)
self.assertEqual(alignments[1].get_alignment_length(), 22)
self.check_alignment_rows(
alignments[1],
[
("gi|10955265|ref|NP_052606.1|", "DDRANLFEFLSEEGITITEDNN"),
("gi|152973840|ref|YP_001338877.1|", "DDAEHLFRTLSSR-LDALQDGN"),
],
)
self.assertEqual(alignments[2].get_alignment_length(), 63)
self.check_alignment_rows(
alignments[2],
[
(
"gi|10955264|ref|NP_052605.1|",
"VYTSFN---GEKFSSYTLNKVTKTDEYNDLSELS...KGI",
),
(
"gi|152973841|ref|YP_001338878.1|",
"VFGSFEQPKGEHLSGQVSEQ--RDTAFADQNEQV...QAM",
),
],
)
self.check_summary(alignments[0], "protein")
self.check_reverse_write_read(alignments)
def test_reading_alignments_fasta_m10_4(self):
path = "Fasta/output004.m10"
self.check_iterator_for_loop_handle(path, "fasta-m10", 1, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 1)
self.check_iterator_next(path, "fasta-m10", 1)
self.check_iterator_next_and_list(path, "fasta-m10", 1)
self.check_iterator_next_for_loop(path, "fasta-m10", 1)
alignment = self.check_read(path, "fasta-m10", 2, 102)
self.check_alignment_rows(
alignment,
[
(
"ref|NC_002127.1|:c1351-971",
"AAAAAAGATAAAAAATATCAAATAGAAGCAATAA...TCA",
),
(
"ref|NC_002695.1|:1970775-1971404",
"AGAGAAAATAAAACAAGTAATAAAATATTAATGG...ACA",
),
],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_fasta_m10_5(self):
path = "Fasta/output005.m10"
self.check_iterator_for_loop_handle(path, "fasta-m10", 1, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 1)
self.check_iterator_next(path, "fasta-m10", 1)
self.check_iterator_next_and_list(path, "fasta-m10", 1)
self.check_iterator_next_for_loop(path, "fasta-m10", 1)
alignment = self.check_read(path, "fasta-m10", 2, 110)
self.check_alignment_rows(
alignment,
[
(
"gi|10955264|ref|NP_052605.1|",
"IKNKDKTLFIVYAT-DIYSPSEFFSKIESDLKKK...LSK",
),
(
"gi|10955282|ref|NP_052623.1|",
"IKDELPVAFCSWASLDLECEVKYINDVTSLYAKD...MSE",
),
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_fasta_m10_6(self):
path = "Fasta/output006.m10"
self.check_iterator_for_loop_handle(path, "fasta-m10", 1, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 1)
self.check_iterator_next(path, "fasta-m10", 1)
self.check_iterator_next_and_list(path, "fasta-m10", 1)
self.check_iterator_next_for_loop(path, "fasta-m10", 1)
alignment = self.check_read(path, "fasta-m10", 2, 131)
self.check_alignment_rows(
alignment,
[
(
"gi|116660610|gb|EG558221.1|EG558221",
"GCAACGCTTCAAGAACTGGAATTAGGAACCGTGA...CAT",
),
("query", "GCAACGCTTCAAGAACTGGAATTAGGAACCGTGA...CAT"),
],
)
self.check_summary(alignment, "DNA")
def test_reading_alignments_fasta_m10_7(self):
path = "Fasta/output007.m10"
alignments = self.check_iterator_for_loop_handle(path, "fasta-m10", 9, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 9)
self.check_iterator_next(path, "fasta-m10", 9)
self.check_iterator_next_and_list(path, "fasta-m10", 9)
self.check_iterator_next_for_loop(path, "fasta-m10", 9)
self.check_read_fails(path, "fasta-m10")
self.assertEqual(alignments[0].get_alignment_length(), 108)
self.check_alignment_rows(
alignments[0],
[
(
"gi|10955263|ref|NP_052604.1|",
"SGSNT-RRRAISRPVRLTAEED---QEIRKRAAE...LSR",
),
(
"gi|152973457|ref|YP_001338508.1|",
"AGSGAPRRRGSGLASRISEQSEALLQEAAKHAAE...LSR",
),
],
)
self.assertEqual(alignments[1].get_alignment_length(), 64)
self.check_alignment_rows(
alignments[1],
[
(
"gi|10955263|ref|NP_052604.1|",
"AAECGKTVSGFLRAAALGKKVNSLTDDRVLKEV-...AIT",
),
(
"gi|152973588|ref|YP_001338639.1|",
"ASRQGCTVGG--KMDSVQDKASDKDKERVMKNIN...TLT",
),
],
)
self.assertEqual(alignments[2].get_alignment_length(), 45)
self.check_alignment_rows(
alignments[2],
[
(
"gi|10955263|ref|NP_052604.1|",
"EIRKRAAECGKTVSGFLRAAA-----LGKKVNSL...VMR",
),
(
"gi|152973480|ref|YP_001338531.1|",
"ELVKLIADMGISVRALLRKNVEPYEELGLEEDKF...MLQ",
),
],
)
self.assertEqual(alignments[8].get_alignment_length(), 64)
self.check_alignment_rows(
alignments[8],
[
(
"gi|10955265|ref|NP_052606.1|",
"ISGTYKGIDFLIKLMPSGGNTTIGRASGQNNTYF...FSD",
),
(
"gi|152973505|ref|YP_001338556.1|",
"IDGVITAFD-LRTGMNISKDKVVAQIQGMDPVW-...YPD",
),
],
)
self.check_summary(alignments[0], "protein")
self.check_reverse_write_read(alignments)
def test_reading_alignments_fasta_m10_8(self):
path = "Fasta/output008.m10"
alignments = self.check_iterator_for_loop_handle(path, "fasta-m10", 12, 2)
self.check_iterator_for_loop_filename(path, "fasta-m10", 12)
self.check_iterator_next(path, "fasta-m10", 12)
self.check_iterator_next_and_list(path, "fasta-m10", 12)
self.check_iterator_next_for_loop(path, "fasta-m10", 12)
self.check_read_fails(path, "fasta-m10")
self.assertEqual(alignments[0].get_alignment_length(), 65)
self.check_alignment_rows(
alignments[0],
[
(
"gi|283855822|gb|GQ290312.1|",
"IPHQLPHALRHRPAQEAAHASQLHPAQPGCGQPL...GLL",
),
("sp|Q9NSY1|BMP2K_HUMAN", "LQHRHPHQQQQQQQQQQQQQQQQQQQQQQQQQQQ...QML"),
],
)
self.assertEqual(alignments[1].get_alignment_length(), 201)
self.check_alignment_rows(
alignments[1],
[
(
"gi|57163782|ref|NM_001009242.1|",
"GPELLRALLQQNGCGTQPLRVPTVLPG*AMAVLH...QKS",
),
("sp|Q9NSY1|BMP2K_HUMAN", "GPEIL---LGQ-GPPQQPPQQHRVLQQLQQGDWR...NRS"),
],
)
self.assertEqual(alignments[2].get_alignment_length(), 348)
self.check_alignment_rows(
alignments[2],
[
(
"gi|57163782|ref|NM_001009242.1|",
"MNGTEGPNFYVPFSNKTGVVRSPFEYPQYYLAEP...APA",
),
("sp|P08100|OPSD_HUMAN", "MNGTEGPNFYVPFSNATGVVRSPFEYPQYYLAEP...APA"),
],
)
self.assertEqual(alignments[11].get_alignment_length(), 31)
self.check_alignment_rows(
alignments[11],
[
("gi|283855822|gb|GQ290312.1|", "SQQIRNATTMMMTMRVTSFSAFWVVADSCCW"),
("sp|P08100|OPSD_HUMAN", "AQQQESATTQKAEKEVTRMVIIMVIAFLICW"),
],
)
self.check_summary(alignments[0], "protein")
self.check_reverse_write_read(alignments)
def test_reading_alignments_ig(self):
path = "IntelliGenetics/VIF_mase-pro.txt"
self.check_iterator_for_loop_handle(path, "ig", 1, 16)
self.check_iterator_for_loop_filename(path, "ig", 1)
self.check_iterator_next(path, "ig", 1)
self.check_iterator_next_and_list(path, "ig", 1)
self.check_iterator_next_for_loop(path, "ig", 1)
self.check_write_three_times_and_read(path, "ig", 16)
alignment = self.check_read(path, "ig", 16, 298)
self.check_alignment_columns(
alignment,
[
"MMMMMMMMMMMMMMMM",
"EEEEEEETEEEENEEE",
"NNNNNNNAEEEEQRKK",
"--------DEEEEE--",
"--------KKKKKK--",
"HHHHHHH-AAAAL-R-",
],
)
self.check_summary(alignment, "protein")
def test_reading_alignments_pir(self):
path = "NBRF/clustalw.pir"
self.check_iterator_for_loop_handle(path, "pir", 1, 2)
self.check_iterator_for_loop_filename(path, "pir", 1)
self.check_iterator_next(path, "pir", 1)
self.check_iterator_next_and_list(path, "pir", 1)
self.check_iterator_next_for_loop(path, "pir", 1)
self.check_write_three_times_and_read(path, "pir", 2)
alignment = self.check_read(path, "pir", 2, 2527)
self.check_alignment_rows(
alignment,
[
(
"804Angiostrongylus_cantonensis",
"----------------------------------...---",
),
(
"815Parelaphostrongylus_odocoil",
"----------------------------------...---",
),
],
)
self.check_summary_pir(alignment)
def test_reading_alignments_maf1(self):
path = "MAF/humor.maf"
alignments = self.check_iterator_for_loop_handle(path, "maf", 2, 3)
self.check_iterator_for_loop_filename(path, "maf", 2)
self.check_iterator_next(path, "maf", 2)
self.check_iterator_next_and_list(path, "maf", 2)
self.check_iterator_next_for_loop(path, "maf", 2)
self.check_read_fails(path, "maf")
self.assertEqual(alignments[0].get_alignment_length(), 5486)
self.check_alignment_rows(
alignments[0],
[
("NM_006987", "gcacagcctttactccctgactgcgtttatattc...CCG"),
("mm3", "gcacagcctttactccctgactgcgtttatattc...TTG"),
("rn3", "gcacagcctttactccctgactgcgtttatattc...CCG"),
],
)
self.assertEqual(alignments[1].get_alignment_length(), 5753)
self.check_alignment_rows(
alignments[1],
[
("NM_018289", "tttgtccatgttggtcaggctggtctcgaactcc...GGT"),
("mm3", "tttgtccatgttggtcaggctggtctcgaactcc...GGT"),
("rn3", "tttgtccatgttggtcaggctggtctcgaactcc...GGT"),
],
)
self.check_summary(alignments[1], "DNA")
self.check_reverse_write_read(alignments)
def test_reading_alignments_maf2(self):
path = "MAF/bug2453.maf"
alignments = self.check_iterator_for_loop_handle(path, "maf", 3)
self.check_iterator_for_loop_filename(path, "maf", 3)
self.check_iterator_next(path, "maf", 3)
self.check_iterator_next_and_list(path, "maf", 3)
self.check_iterator_next_for_loop(path, "maf", 3)
self.check_read_fails(path, "maf")
self.assertEqual(len(alignments[0]), 5)
self.assertEqual(alignments[0].get_alignment_length(), 42)
self.check_alignment_columns(
alignments[0], ["AAA--", "AAAAA", "AAAAA", "---T-", "GGGGG", "GGGGG"]
)
self.assertEqual(len(alignments[1]), 5)
self.assertEqual(alignments[1].get_alignment_length(), 6)
self.check_alignment_columns(
alignments[1], ["TTTTt", "AAAAa", "AAAAa", "AAAAg", "GGGGg", "AAAAa"]
)
self.assertEqual(len(alignments[2]), 4)
self.assertEqual(alignments[2].get_alignment_length(), 13)
self.check_alignment_rows(
alignments[2],
[
("baboon", "gcagctgaaaaca"),
("hg16.chr7", "gcagctgaaaaca"),
("mm4.chr6", "ACAGCTGAAAATA"),
("panTro1.chr6", "gcagctgaaaaca"),
],
)
self.check_summary(alignments[1], "DNA")
self.check_reverse_write_read(alignments)
def test_reading_alignments_maf3(self):
path = "MAF/ucsc_test.maf"
alignments = self.check_iterator_for_loop_handle(path, "maf", 3)
self.check_iterator_for_loop_filename(path, "maf", 3)
self.check_iterator_next(path, "maf", 3)
self.check_iterator_next_and_list(path, "maf", 3)
self.check_iterator_next_for_loop(path, "maf", 3)
self.check_read_fails(path, "maf")
self.assertEqual(len(alignments[0]), 5)
self.assertEqual(alignments[0].get_alignment_length(), 42)
self.check_alignment_columns(
alignments[0], ["AAA--", "AAAAA", "AAAAA", "---T-", "GGGGG", "GGGGG"]
)
self.assertEqual(len(alignments[1]), 5)
self.assertEqual(alignments[1].get_alignment_length(), 6)
self.check_alignment_columns(
alignments[1], ["TTTTt", "AAAAa", "AAAAa", "AAAAg", "GGGGg", "AAAAa"]
)
self.assertEqual(len(alignments[2]), 4)
self.assertEqual(alignments[2].get_alignment_length(), 13)
self.check_alignment_columns(
alignments[2], ["gggA", "cccC", "aaaA", "gggG", "cccC", "aaaA"]
)
self.check_summary(alignments[2], "DNA")
self.check_reverse_write_read(alignments)
def test_reading_alignments_maf4(self):
path = "MAF/ucsc_mm9_chr10.maf"
alignments = self.check_iterator_for_loop_handle(path, "maf", 48)
self.check_iterator_for_loop_filename(path, "maf", 48)
self.check_iterator_next(path, "maf", 48)
self.check_iterator_next_and_list(path, "maf", 48)
self.check_iterator_next_for_loop(path, "maf", 48)
self.check_read_fails(path, "maf")
self.assertEqual(len(alignments[0]), 2)
self.assertEqual(alignments[0].get_alignment_length(), 164)
self.check_alignment_rows(
alignments[0],
[
("mm9.chr10", "TCATAGGTATTTATTTTTAAATATGGTTTGCTTT...GTT"),
("oryCun1.scaffold_133159", "TCACAGATATTTACTATTAAATATGGTTTGTTAT...GTT"),
],
)
self.assertEqual(len(alignments[1]), 4)
self.assertEqual(alignments[1].get_alignment_length(), 466)
self.check_alignment_rows(
alignments[1],
[
("hg18.chr6", "AGTCTTCATAAGTGGAAATATAAGTTTTAATTAT...TTC"),
("mm9.chr10", "AGTCTTTCCAATGGGACCTGTGAGTCCTAACTAT...CTG"),
("panTro2.chr6", "AGTCTTCATAAGTGGAAATATAAGTTTTAATTAT...TTC"),
("ponAbe2.chr6", "AGTCTTCATAAGTGGAAATATAAGTTTTAATTAT...TTC"),
],
)
self.assertEqual(len(alignments[2]), 5)
self.assertEqual(alignments[2].get_alignment_length(), 127)
self.check_alignment_columns(
alignments[2], ["TTTTT", "GGGGG", "GGGGG", "GGGGG", "TTTTC", "CCCCC"]
)
self.assertEqual(len(alignments[47]), 6)
self.assertEqual(alignments[47].get_alignment_length(), 46)
self.check_alignment_columns(
alignments[47], ["TTTTTT", "GGGGGG", "TTTTTT", "TTTTTT", "TGGGAT", "tTTTT-"]
)
self.check_summary(alignments[47], "DNA")
self.check_reverse_write_read(alignments)
def test_reading_alignments_mauve(self):
path = "Mauve/simple.xmfa"
alignments = self.check_iterator_for_loop_handle(path, "mauve", 5)
self.check_iterator_for_loop_filename(path, "mauve", 5)
self.check_iterator_next(path, "mauve", 5)
self.check_iterator_next_and_list(path, "mauve", 5)
self.check_iterator_next_for_loop(path, "mauve", 5)
self.check_read_fails(path, "mauve")
self.assertEqual(len(alignments[0]), 2)
self.assertEqual(alignments[0].get_alignment_length(), 5670)
self.check_alignment_rows(
alignments[0],
[
("1/0-5670", "ATATTAGGTTTTTACCTACCCAGGAAAAGCCAAC...AAT"),
("2/0-5670", "ATATTAGGTTTTTACCTACCCAGGAAAAGCCAAC...AAT"),
],
)
self.assertEqual(len(alignments[1]), 2)
self.assertEqual(alignments[1].get_alignment_length(), 4420)
self.check_alignment_rows(
alignments[1],
[
("1/5670-9940", "GAACATCAGCACCTGAGTTGCTAAAGTCATTTAG...CTC"),
("2/7140-11410", "GAACATCAGCACCTGAGTTGCTAAAGTCATTTAG...CTC"),
],
)
self.assertEqual(len(alignments[2]), 1)
self.assertEqual(alignments[2].get_alignment_length(), 4970)
self.check_alignment_rows(
alignments[2],
[("1/9940-14910", "TCTACCAACCACCACAGACATCAATCACTTCTGC...GAC")],
)
self.assertEqual(len(alignments[3]), 1)
self.assertEqual(alignments[3].get_alignment_length(), 1470)
self.assertEqual(len(alignments[4]), 1)
self.assertEqual(alignments[4].get_alignment_length(), 1470)
self.check_alignment_rows(
alignments[4],
[("2/11410-12880", "ATTCGCACATAAGAATGTACCTTGCTGTAATTTA...ATA")],
)
self.check_summary(alignments[4], "DNA")
self.check_reverse_write_read(alignments)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
|
path = "ExtendedPhylip/primates.phyx"
self.check_iterator_for_loop_handle(path, "phylip-relaxed", 1, 12)
self.check_iterator_for_loop_filename(path, "phylip-relaxed", 1)
self.check_iterator_next(path, "phylip-relaxed", 1)
self.check_iterator_next_and_list(path, "phylip-relaxed", 1)
self.check_iterator_next_for_loop(path, "phylip-relaxed", 1)
self.check_write_three_times_and_read(path, "phylip-relaxed", 12)
alignment = self.check_read(path, "phylip-relaxed", 12, 898)
self.check_alignment_columns(
alignment,
[
"AAAAAAAAAAAA",
"AAAAAAAAAAAA",
"GGGGGGGGGGGG",
"TCCCCCCCCCCC",
"TTTTTTTTTTTT",
"TTTTTTTTTTTT",
],
)
self.check_summary(alignment, "DNA")
|
param_helper_test.go
|
// _ _
// __ _____ __ ___ ___ __ _| |_ ___
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
// \ V V / __/ (_| |\ V /| | (_| | || __/
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
//
// Copyright © 2016 - 2021 SeMI Technologies B.V. All rights reserved.
//
// CONTACT: [email protected]
//
package spellcheck
import (
"reflect"
"testing"
)
type fakeNearText struct {
Values []string
}
type fakeAsk struct {
Question string
}
func T
|
t *testing.T) {
type args struct {
argumentModuleParams map[string]interface{}
}
tests := []struct {
name string
args args
want string
want1 []string
wantErr bool
}{
{
name: "should get values from nearText",
args: args{
argumentModuleParams: map[string]interface{}{
"nearText": fakeNearText{Values: []string{"a", "b"}},
},
},
want: "nearText",
want1: []string{"a", "b"},
wantErr: false,
},
{
name: "should get values from ask",
args: args{
argumentModuleParams: map[string]interface{}{
"ask": fakeAsk{Question: "a"},
},
},
want: "ask",
want1: []string{"a"},
wantErr: false,
},
{
name: "should be empty",
args: args{
argumentModuleParams: map[string]interface{}{},
},
want: "",
want1: []string{},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := ¶mHelper{}
got, got1, err := p.getTexts(tt.args.argumentModuleParams)
if (err != nil) != tt.wantErr {
t.Errorf("paramHelper.getTexts() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("paramHelper.getTexts() got = %v, want %v", got, tt.want)
}
if !reflect.DeepEqual(got1, tt.want1) {
t.Errorf("paramHelper.getTexts() got1 = %v, want %v", got1, tt.want1)
}
})
}
}
|
est_paramHelper_getTexts(
|
fusion_skills.pipe.ts
|
import { ArgumentMetadata, BadRequestException, PipeTransform } from "@nestjs/common";
export class
|
implements PipeTransform {
transform(skills: any, metadata: ArgumentMetadata): any{
if(!skills || !Array.isArray(skills))
throw new BadRequestException();
skills = skills.map(skill => skill.toUpperCase())
skills = skills.join("-");
return skills;
}
}
|
FusionSkillsPipe
|
survey_extra.py
|
# -*- coding: utf-8 -*-
from django import template
register = template.Library()
@register.filter(name='times')
def
|
(value, arg):
return value * int(arg)
|
times
|
bmi_struct.rs
|
// RustでBMI肥満度判定(判定表を利用)
// BMIの判定用構造体 --- (*1)
struct BmiRange {
min: f64, // min以上
max: f64, // max未満
label: &'static str, // 判定ラベル
}
fn main() {
// 身長と体重の入力 --- (*2)
let height_cm = input("身長(cm)は? ");
let weight = input("体重(kg)は? ");
// BMIの計算
let height = height_cm / 100.0;
let bmi = weight / height.powf(2.0);
// 肥満度判定表をベクター型で用意 --- (*3)
let bmi_list = vec![
BmiRange { min: 0.0, max: 18.5, label: "低体重" },
BmiRange { min: 18.5, max: 25.0, label: "普通体重" },
BmiRange { min: 25.0, max: 30.0, label: "肥満1度" },
BmiRange { min: 30.0, max: 35.0, label: "肥満2度" },
BmiRange { min: 35.0, max: 40.0, label: "肥満3度" },
BmiRange { min: 40.0, max: 99.0, label: "肥満4度" },
];
// 肥満度判定 --- (*4)
let mut result = "不明";
|
for range in bmi_list {
if range.min <= bmi && bmi < range.max {
result = range.label;
break;
}
}
// 結果表示
println!("BMI={:.1},判定={}", bmi, result);
}
// 一行読み取ってf64で返す --- (*5)
fn input(prompt: &str) -> f64 {
println!("{}", prompt);
let mut s = String::new();
std::io::stdin().read_line(&mut s).expect("入力エラー");
s.trim().parse().expect("数値変換エラー")
}
| |
crdsecurityrule.go
|
package rest
import (
"bytes"
"encoding/json"
"errors"
"fmt"
cmetav1 "github.com/neuvector/k8s/apis/meta/v1"
"sort"
// metav1 "github.com/neuvector/k8s/apis/meta/v1"
"github.com/ghodss/yaml"
"github.com/julienschmidt/httprouter"
"github.com/neuvector/neuvector/controller/access"
"github.com/neuvector/neuvector/controller/api"
"github.com/neuvector/neuvector/controller/cache"
"github.com/neuvector/neuvector/controller/common"
"github.com/neuvector/neuvector/controller/kv"
"github.com/neuvector/neuvector/controller/nvk8sapi/nvvalidatewebhookcfg"
"github.com/neuvector/neuvector/controller/nvk8sapi/nvvalidatewebhookcfg/admission"
"github.com/neuvector/neuvector/controller/resource"
"github.com/neuvector/neuvector/controller/ruleid"
"github.com/neuvector/neuvector/share"
"github.com/neuvector/neuvector/share/cluster"
"github.com/neuvector/neuvector/share/global"
"github.com/neuvector/neuvector/share/utils"
log "github.com/sirupsen/logrus"
"github.com/spaolacci/murmur3"
"io/ioutil"
admissionv1beta1 "k8s.io/api/admission/v1beta1"
"net/http"
"path/filepath"
"reflect"
"strings"
"time"
)
type nvCrdHandler struct {
kvLocked bool
lockKey string
lock cluster.LockInterface
acc *access.AccessControl
}
type nvCrdParser struct {
}
func (h *nvCrdHandler) Init(lockKey string) {
h.lockKey = lockKey
h.acc = access.NewReaderAccessControl()
}
func (h *nvCrdHandler) AcquireLock(wait time.Duration) bool {
if h.kvLocked {
return true
}
if h.lockKey != "" {
var err error
h.lock, err = clusHelper.AcquireLock(h.lockKey, wait)
if err != nil {
e := "Failed to acquire cluster lock"
log.WithFields(log.Fields{"error": err}).Error(e)
return false
}
h.kvLocked = true
}
return true
}
func (h *nvCrdHandler) ReleaseLock() {
if !h.kvLocked {
return
}
if h.lock != nil {
clusHelper.ReleaseLock(h.lock)
}
h.kvLocked = false
}
func group2RESTConfig(group *api.RESTGroup) *api.RESTCrdGroupConfig
|
func crdConfig2groupConfig(group *api.RESTCrdGroupConfig) *api.RESTGroupConfig {
r := api.RESTGroupConfig{
Name: group.Name,
Criteria: group.Criteria,
CfgType: api.CfgTypeGround,
Comment: &group.Comment,
}
if r.Criteria != nil {
entries := *r.Criteria
sort.Slice(entries, func(i, j int) bool {
if entries[i].Key != entries[j].Key {
return entries[i].Key < entries[j].Key
} else {
return entries[i].Value < entries[j].Value
}
})
r.Criteria = &entries
}
return &r
}
func CrdDelAll(k8sKind, kvCrdKind, lockKey string, recordList map[string]*share.CLUSCrdSecurityRule) []string {
if clusHelper == nil {
clusHelper = kv.GetClusterHelper()
}
var crdHandler nvCrdHandler
crdHandler.Init(lockKey)
if !crdHandler.AcquireLock(clusterLockWait) {
return nil
}
defer crdHandler.ReleaseLock()
return crdHandler.crdDelAll(k8sKind, kvCrdKind, recordList)
}
func (h *nvCrdHandler) crdDelAll(k8sKind, kvCrdKind string, recordList map[string]*share.CLUSCrdSecurityRule) []string {
var removed []string
if recordList == nil {
recordList = clusHelper.GetCrdSecurityRuleRecordList(kvCrdKind)
}
for recordName, gw := range recordList {
tokens := strings.Split(recordName, "-")
if tokens[0] != k8sKind {
continue
}
e := fmt.Sprintf(" %s %s related config removed by cross check due to this crd removed from Kubernate\n", k8sKind, tokens[2])
removed = append(removed, e)
switch k8sKind {
case resource.NvAdmCtrlSecurityRuleKind:
h.crdDeleteAdmCtrlRules()
setAdmCtrlStateInCluster(nil, nil, nil, nil, nil, share.UserCreated)
h.crdDeleteRecord(k8sKind, recordName)
case resource.NvSecurityRuleKind, resource.NvClusterSecurityRuleKind:
h.crdDeleteRules(gw.Rules)
h.crdHandleGroupRecordDel(gw, gw.Groups, false)
h.crdDeleteRecordEx(resource.NvSecurityRuleKind, recordName, gw.ProfileName)
case resource.NvDlpSecurityRuleKind:
deleteDlpSensor(nil, gw.DlpSensor, share.ReviewTypeCRD, true, h.acc, nil)
h.crdDeleteRecord(k8sKind, recordName)
case resource.NvWafSecurityRuleKind:
deleteWafSensor(nil, gw.WafSensor, share.ReviewTypeCRD, true, h.acc, nil)
h.crdDeleteRecord(k8sKind, recordName)
}
}
return removed
}
// Create all the group and return group added
func (h *nvCrdHandler) crdHandleGroupsAdd(groups []api.RESTCrdGroupConfig, targetGroup string) ([]string, bool) {
// record the groups in a new record, then later compare with cached record to add/del
var groupAdded []string
var targetGroupDlpWAF bool
for _, group := range groups {
if group.Name == api.LearnedExternal || group.Name == api.AllHostGroup {
updateKV := false
cg, _, err := clusHelper.GetGroup(group.Name, h.acc)
if cg == nil {
log.WithFields(log.Fields{"error": err}).Error()
cg = &share.CLUSGroup{
Name: group.Name,
CfgType: share.GroundCfg,
Comment: group.Comment,
Reserved: true,
Criteria: []share.CLUSCriteriaEntry{},
}
if group.Name == api.AllHostGroup {
cg.Kind = share.GroupKindNode
cg.PolicyMode = share.PolicyModeLearn // default
cg.ProfileMode = share.PolicyModeLearn // default
cg.BaselineProfile = share.ProfileBasic // group "nodes" is always at "basic" baseline profile(not configurable by design)
} else {
cg.Kind = share.GroupKindExternal
}
updateKV = true
} else if cg.CfgType != share.GroundCfg {
cg.CfgType = share.GroundCfg // update its type
updateKV = true
}
if updateKV {
clusHelper.PutGroup(cg, false)
}
groupAdded = append(groupAdded, group.Name)
continue
}
isNvIpGroup := strings.HasPrefix(group.Name, api.LearnedSvcGroupPrefix)
cg, _, _ := clusHelper.GetGroup(group.Name, h.acc)
if cg != nil {
// group update case
updateKV := false
if isNvIpGroup {
// existing nv.ip.xxx group(learned or crd) found. promote it to crd if necessary
if cg.CfgType != share.GroundCfg {
if cg.CfgType == share.FederalCfg {
log.WithFields(log.Fields{"group": group.Name, "cfgType": cg.CfgType}).Error()
} else {
cg.CfgType = share.GroundCfg
updateKV = true
}
}
if cg.Comment != group.Comment {
cg.Comment = group.Comment
updateKV = true
}
} else {
updateKV = true
cg.Criteria = nil
cg.Kind = share.GroupKindContainer
cg.CfgType = share.GroundCfg
cg.Comment = group.Comment
cg.Criteria = make([]share.CLUSCriteriaEntry, 0, len(*group.Criteria))
for _, ct := range *group.Criteria {
cg.Criteria = append(cg.Criteria, share.CLUSCriteriaEntry{
Key: ct.Key,
Value: ct.Value,
Op: ct.Op,
})
if ct.Key == share.CriteriaKeyAddress {
cg.Kind = share.GroupKindAddress
}
}
}
if updateKV {
if err := clusHelper.PutGroup(cg, false); err != nil {
log.WithFields(log.Fields{"error": err}).Error()
// cache.AuthLog(cache.LOGEV_GROUP_CREATE_FAILED)
//return errors.New("Put groupd error")
continue
}
}
groupAdded = append(groupAdded, group.Name)
} else {
// new group add
cg = &share.CLUSGroup{
Name: group.Name,
CfgType: share.GroundCfg,
CreaterDomains: h.acc.GetAdminDomains(share.PERMS_RUNTIME_POLICIES),
Kind: share.GroupKindContainer,
Comment: group.Comment,
}
if isNvIpGroup {
cg.Kind = share.GroupKindIPService
}
if utils.DoesGroupHavePolicyMode(group.Name) {
cg.PolicyMode = cacher.GetNewServicePolicyMode()
fmt.Println("New learned svc ", group.Name, "set service as ", cg.PolicyMode)
}
cg.Criteria = make([]share.CLUSCriteriaEntry, 0, len(*group.Criteria))
for _, ct := range *group.Criteria {
if isNvIpGroup && ct.Key != share.CriteriaKeyDomain {
// when creating a new crd nv.ip.xxx group, only keep "domain" key in its criteria
continue
}
cg.Criteria = append(cg.Criteria, share.CLUSCriteriaEntry{
Key: ct.Key,
Value: ct.Value,
Op: ct.Op,
})
if ct.Key == share.CriteriaKeyAddress {
cg.Kind = share.GroupKindAddress
}
if ct.Key == share.CriteriaKeyDomain && strings.HasPrefix(group.Name, api.LearnedGroupPrefix) {
cg.Domain = ct.Value
}
}
if err := clusHelper.PutGroup(cg, true); err != nil {
log.WithFields(log.Fields{"error": err}).Error()
//return errors.New("Put groupd error")
continue
}
groupAdded = append(groupAdded, group.Name)
}
if cg.Name == targetGroup && cg.Kind == share.GroupKindContainer {
targetGroupDlpWAF = true
}
}
return groupAdded, targetGroupDlpWAF
}
func (h *nvCrdHandler) crdDeleteRules(delRules map[string]uint32) {
if len(delRules) == 0 {
return
}
txn := cluster.Transact()
defer txn.Close()
delRuleIDs := utils.NewSet()
crhs := clusHelper.GetPolicyRuleList()
crhsNew := make([]*share.CLUSRuleHead, 0, len(crhs))
for _, id := range delRules {
delRuleIDs.Add(id)
clusHelper.DeletePolicyRuleTxn(txn, id)
}
for _, crh := range crhs {
if crh.CfgType != share.GroundCfg || !delRuleIDs.Contains(crh.ID) {
crhsNew = append(crhsNew, crh)
}
}
clusHelper.PutPolicyRuleListTxn(txn, crhs)
txn.Apply()
}
func (h *nvCrdHandler) crdDeleteAdmCtrlRules() {
txn := cluster.Transact()
defer txn.Close()
for _, ruleType := range []string{api.ValidatingExceptRuleType, api.ValidatingDenyRuleType} {
arhs, _ := clusHelper.GetAdmissionRuleList(admission.NvAdmValidateType, ruleType)
ruleHead := make([]*share.CLUSRuleHead, 0, len(arhs))
for _, arh := range arhs {
if arh.CfgType == share.GroundCfg {
if arh.ID < api.StartingLocalAdmCtrlRuleID {
// default rules cannot be deleted
if r := clusHelper.GetAdmissionRule(admission.NvAdmValidateType, ruleType, arh.ID); r != nil {
if r.CfgType != share.UserCreated {
r.CfgType = share.UserCreated
clusHelper.PutAdmissionRuleTxn(txn, admission.NvAdmValidateType, ruleType, r)
}
}
ruleHead = append(ruleHead, arh)
} else {
clusHelper.DeleteAdmissionRuleTxn(txn, admission.NvAdmValidateType, ruleType, arh.ID)
}
} else {
ruleHead = append(ruleHead, arh)
}
}
clusHelper.PutAdmissionRuleListTxn(txn, admission.NvAdmValidateType, ruleType, ruleHead)
}
txn.Apply()
}
// Compare the added group with the group in the record, find the group removed from crd to delete
func findAbsentGroups(cacheRecord *share.CLUSCrdSecurityRule, groupNew []string) []string {
var groupToDel []string
for _, cur := range cacheRecord.Groups {
found := false
for _, newMember := range groupNew {
if cur == newMember {
found = true
break
}
}
if !found {
groupToDel = append(groupToDel, cur)
}
}
return groupToDel
}
func (h *nvCrdHandler) crdDeleteGroup(delGroup []string) {
for _, name := range delGroup {
cg, _, _ := clusHelper.GetGroup(name, h.acc)
if cg == nil {
log.WithFields(log.Fields{"name": name}).Error("Group doesn't exist")
continue
}
kv.DeletePolicyByGroup(name)
kv.DeleteResponseRuleByGroup(name)
clusHelper.DeleteDlpGroup(name)
clusHelper.DeleteWafGroup(name)
if err := clusHelper.DeleteGroup(name); err != nil {
log.WithFields(log.Fields{"error": err}).Error()
continue
}
}
}
func (h *nvCrdHandler) crdUpdateGroup(updateGroup []string) {
txn := cluster.Transact()
defer txn.Close()
for _, name := range updateGroup {
cg, _, _ := clusHelper.GetGroup(name, h.acc)
if cg == nil {
log.WithFields(log.Fields{"name": name}).Error("Group doesn't exist")
continue
}
if utils.IsGroupLearned(name) || name == api.LearnedExternal {
cg.CfgType = share.Learned
} else {
cg.CfgType = share.UserCreated
}
clusHelper.PutGroupTxn(txn, cg)
dlpGroup := clusHelper.GetDlpGroup(name)
if dlpGroup == nil {
log.WithFields(log.Fields{"name": name}).Error("DLP group doesn't exist")
continue
}
dlpGroup.CfgType = share.UserCreated
clusHelper.PutDlpGroupTxn(txn, dlpGroup)
wafGroup := clusHelper.GetWafGroup(name)
if wafGroup == nil {
log.WithFields(log.Fields{"name": name}).Error("WAF group doesn't exist")
continue
}
wafGroup.CfgType = share.UserCreated
clusHelper.PutWafGroupTxn(txn, wafGroup)
}
txn.Apply()
}
func (h *nvCrdHandler) crdUpdateDlpSensors() {
txn := cluster.Transact()
defer txn.Close()
defSensor := clusHelper.GetDlpSensor(share.CLUSDlpDefaultSensor)
if defSensor != nil {
var modified bool
for _, rule := range defSensor.RuleList {
if rule.CfgType == share.GroundCfg {
rule.CfgType = share.UserCreated
modified = true
}
}
if modified {
clusHelper.PutDlpSensorTxn(txn, defSensor)
}
}
for _, sensor := range clusHelper.GetAllDlpSensors() {
if sensor.CfgType == share.GroundCfg {
sensor.CfgType = share.UserCreated
clusHelper.PutDlpSensorTxn(txn, sensor)
}
}
txn.Apply()
}
func (h *nvCrdHandler) crdUpdateWafSensors() {
txn := cluster.Transact()
defer txn.Close()
defSensor := clusHelper.GetWafSensor(share.CLUSWafDefaultSensor)
if defSensor != nil {
var modified bool
for _, rule := range defSensor.RuleList {
if rule.CfgType == share.GroundCfg {
rule.CfgType = share.UserCreated
modified = true
}
}
if modified {
clusHelper.PutWafSensorTxn(txn, defSensor)
}
}
for _, sensor := range clusHelper.GetAllWafSensors() {
if sensor.CfgType == share.GroundCfg {
sensor.CfgType = share.UserCreated
clusHelper.PutWafSensorTxn(txn, sensor)
}
}
txn.Apply()
}
// Group removed from the CRD, try delete from system.
// 1. If group not created by crd then ignore process
// 2. If Group have memeber+LearnedName, then don't remove but rather change cfgtype(add to update)
// 3. If Group also exist on other crd, then ignore process
// 4. If Group have autolearned or user created policy, then change cfgtype(add to update)
func (h *nvCrdHandler) crdHandleGroupRecordDel(cacheRecord *share.CLUSCrdSecurityRule, groupsDel []string, kvOnly bool) {
if len(groupsDel) == 0 {
return
}
var groupToUpdate []string
var groupToDel []string
gwrecordlist := clusHelper.GetCrdSecurityRuleRecordList(resource.NvSecurityRuleKind)
LOOPALLDEL:
for _, cur := range groupsDel {
for recordName, gw := range gwrecordlist {
if recordName == cacheRecord.Name {
continue
}
// if the group find on another CRD, then do nothing
for _, gwgroup := range gw.Groups {
if cur == gwgroup {
continue LOOPALLDEL
}
}
}
// at this point no crd using the group
// In cfg import case can't relay on cacher as at this point, can't be sure cacher is written
// in regular case check if group was used by user created policy.
// however, in restart case, this function could be called before any group/policy kv callback is called.
// in this case the group & all policies related to it will be deleted.
if kvOnly {
groupToUpdate = append(groupToUpdate, cur)
} else {
group, _ := cacher.GetGroup(cur, "", false, h.acc)
if group != nil {
// if group exist before crd apply when delete we should not touch it
if group.CfgType != api.CfgTypeGround {
continue LOOPALLDEL
}
if strings.HasPrefix(group.Name, api.LearnedSvcGroupPrefix) {
// nv.ip crd groups are created without address criterion. address criterion is learned later.
// we delete nv.ip crd group only when its address is not learned yet
for _, ct := range group.Criteria {
if ct.Key == share.CriteriaKeyAddress {
groupToUpdate = append(groupToUpdate, cur)
continue LOOPALLDEL
}
}
}
// check other process and file proiles
if !h.crdReadyToDeleteProfiles(cacheRecord.Name, group) {
groupToUpdate = append(groupToUpdate, cur)
continue LOOPALLDEL
}
// crd created as learned group, now it has member in it. we should convert.
if strings.HasPrefix(group.Name, api.LearnedGroupPrefix) && len(group.Members) > 0 {
groupToUpdate = append(groupToUpdate, cur)
continue LOOPALLDEL
}
// crd created group but have user defined policy on it, we should convert
for _, idx := range group.PolicyRules {
if !isSecurityPolicyID(idx) {
// keep the group but change to different CFGTYPE
groupToUpdate = append(groupToUpdate, cur)
continue LOOPALLDEL
}
}
}
groupToDel = append(groupToDel, cur)
}
}
h.crdDeleteGroup(groupToDel)
h.crdUpdateGroup(groupToUpdate)
}
func (h *nvCrdHandler) crdHandleProcessProfile(group, mode string, profile *api.RESTProcessProfile, reviewType share.TReviewType) error {
var cfgType share.TCfgType = share.GroundCfg
if reviewType == share.ReviewTypeImportGroup {
cfgType = share.UserCreated
txn := cluster.Transact()
// force overwrite process profile kv key
cacher.CreateProcessProfileTxn(txn, group, mode, profile.Baseline, cfgType)
txn.Apply()
txn.Close()
}
pp := clusHelper.GetProcessProfile(group)
if pp == nil {
cacher.CreateProcessProfile(group, mode, profile.Baseline, cfgType)
pp = clusHelper.GetProcessProfile(group)
if pp == nil {
log.Error("failed to obtain profile") // failure at CreateProcessProfile()
return fmt.Errorf("failed to obtain profile")
}
}
if profile != nil {
// update mode
pp.Mode = mode
pp.Baseline = profile.Baseline
list := make([]*share.CLUSProcessProfileEntry, 0)
if reviewType == share.ReviewTypeCRD {
// remove all crd entries
for i, proc := range pp.Process {
if proc.CfgType != share.GroundCfg {
list = append(list, pp.Process[i])
}
}
}
pp.Process = list
// fill in the merge crd items
for _, proc := range profile.ProcessList {
p := &share.CLUSProcessProfileEntry{
Name: proc.Name,
Path: proc.Path,
Action: proc.Action,
CfgType: cfgType,
Uuid: ruleid.NewUuid(),
AllowFileUpdate: proc.AllowFileUpdate,
}
if ret, ok := common.MergeProcess(pp.Process, p, true); ok {
pp.Process = ret
}
}
if err := clusHelper.PutProcessProfile(group, pp); err != nil {
log.WithFields(log.Fields{"error": err}).Error()
return err
}
}
return nil
}
func (h *nvCrdHandler) crdHandleFileProfile(group, mode string, profile *api.RESTFileMonitorProfile, reviewType share.TReviewType) error {
var cfgType share.TCfgType = share.GroundCfg
bLearnedGrp := strings.HasPrefix(group, api.LearnedGroupPrefix) // exclude "nodes"
if reviewType == share.ReviewTypeImportGroup {
if bLearnedGrp {
cfgType = share.Learned
} else {
cfgType = share.UserCreated
}
txn := cluster.Transact()
// force overwrite file monitor/rule kv keys with default file rules
cacher.CreateGroupFileMonitorTxn(txn, group, mode, cfgType)
txn.Apply()
txn.Close()
}
mon, rev_m := clusHelper.GetFileMonitorProfile(group)
far, rev_a := clusHelper.GetFileAccessRule(group) // associated with "mon"
if mon == nil || far == nil {
cacher.CreateGroupFileMonitor(group, mode, cfgType)
mon, rev_m = clusHelper.GetFileMonitorProfile(group)
far, rev_a = clusHelper.GetFileAccessRule(group)
}
if far.Filters == nil {
far.Filters = make(map[string]*share.CLUSFileAccessFilterRule)
}
if far.FiltersCRD == nil {
far.FiltersCRD = make(map[string]*share.CLUSFileAccessFilterRule)
}
if profile != nil {
pmap := make(map[string]*api.RESTFileMonitorFilter, len(profile.Filters))
for _, ffp := range profile.Filters {
// local reference
pmap[ffp.Filter] = ffp
}
// deleting non-exist and updating CRD entry from the profile
for i, ffm := range mon.FiltersCRD {
ffp, ok := pmap[ffm.Filter] // same filter (primary key)
if ok {
mon.FiltersCRD[i].CustomerAdd = true
mon.FiltersCRD[i].Behavior = ffp.Behavior
mon.FiltersCRD[i].Recursive = ffp.Recursive
} else {
mon.FiltersCRD[i].Behavior = "delete" // invalid type
}
}
// rebuild current file profile
target := make([]share.CLUSFileMonitorFilter, 0)
for _, ffm := range mon.FiltersCRD {
if ffm.Behavior == "delete" {
key := utils.FilterIndexKey(ffm.Path, ffm.Regex)
delete(far.FiltersCRD, key)
// Service group: do not remove predefined filter but turn it back
predef, ok := cacher.IsPrdefineFileGroup(ffm.Filter, ffm.Recursive)
if bLearnedGrp && ok {
_, exist := far.Filters[key]
if !exist {
// log.WithFields(log.Fields{"Filter": ffm.Filter}).Debug("CRD: restore predefined")
a := &share.CLUSFileAccessFilterRule{ // restore
Apps: make([]string, 0), // empty
Behavior: share.FileAccessBehaviorMonitor,
CustomerAdd: false,
CreatedAt: time.Now().UTC(),
UpdatedAt: time.Now().UTC(),
}
far.Filters[key] = a
mon.Filters = append(mon.Filters, *predef) // restore
}
}
continue
}
target = append(target, ffm)
}
mon.FiltersCRD = target
var monFilters []share.CLUSFileMonitorFilter
var farFilters map[string]*share.CLUSFileAccessFilterRule
if reviewType == share.ReviewTypeCRD {
farFilters = far.FiltersCRD
} else if reviewType == share.ReviewTypeImportGroup {
monFilters = mon.Filters
farFilters = far.Filters
}
// adding new CRD entries
for _, ffp := range profile.Filters {
// access filter
base, regex, _ := parseFileFilter(ffp.Filter) // should be validated before
key := utils.FilterIndexKey(base, regex)
if a, exist := farFilters[key]; exist {
a.Behavior = ffp.Behavior
a.Apps = append([]string(nil), ffp.Apps...)
a.UpdatedAt = time.Now().UTC()
farFilters[key] = a
} else {
log.WithFields(log.Fields{"key": key, "behavior": ffp.Behavior}).Debug("CRD: new entry")
if reviewType == share.ReviewTypeCRD {
log.WithFields(log.Fields{"key": key, "behavior": ffp.Behavior}).Debug("CRD: new entry")
if _, exist := far.Filters[key]; exist {
if _, ok := cacher.IsPrdefineFileGroup(ffp.Filter, ffp.Recursive); ok {
for i, _ := range mon.Filters {
if mon.Filters[i].Filter == ffp.Filter && mon.Filters[i].CustomerAdd == false {
// remove the predefined from the main filters
delete(far.Filters, key)
mon.Filters = append(mon.Filters[:i], mon.Filters[i+1:]...)
break
}
}
}
}
}
a := &share.CLUSFileAccessFilterRule{
CustomerAdd: true,
Behavior: ffp.Behavior,
Apps: append([]string(nil), ffp.Apps...),
CreatedAt: time.Now().UTC(),
UpdatedAt: time.Now().UTC(),
}
farFilters[key] = a
f := &share.CLUSFileMonitorFilter{
Filter: ffp.Filter,
Path: base,
Regex: regex,
Recursive: ffp.Recursive,
Behavior: ffp.Behavior,
CustomerAdd: true,
}
monFilters = append(monFilters, *f)
}
}
if reviewType == share.ReviewTypeCRD {
mon.FiltersCRD = monFilters
far.FiltersCRD = farFilters
} else if reviewType == share.ReviewTypeImportGroup {
mon.Filters = monFilters
far.Filters = farFilters
}
}
if err := clusHelper.PutFileMonitorProfile(group, mon, rev_m); err != nil {
log.WithFields(log.Fields{"error": err, "group": group}).Error("CRD: monitor file")
return err
}
if err := clusHelper.PutFileAccessRule(group, far, rev_a); err != nil {
log.WithFields(log.Fields{"error": err, "group": group}).Error("CRD: access file")
return err
}
return nil
}
func (h *nvCrdHandler) crdHandlePolicyMode(policyModeCfg *api.RESTServiceConfig, profile_mode string) {
if policyModeCfg != nil {
grp, _, _ := clusHelper.GetGroup(policyModeCfg.Name, h.acc)
if grp == nil {
log.WithFields(log.Fields{"name": policyModeCfg.Name}).Error("Service doesn't exist or access denied")
return
}
var changed bool = false
if profile_mode != "" {
if grp.ProfileMode != profile_mode {
grp.ProfileMode = profile_mode
changed = true
}
}
if policyModeCfg.PolicyMode != nil {
if grp.PolicyMode != *policyModeCfg.PolicyMode {
grp.PolicyMode = *policyModeCfg.PolicyMode
changed = true
}
}
if changed {
log.WithFields(log.Fields{"group": grp}).Debug("CRD:")
err := configPolicyMode(grp)
if err != nil {
return
}
if err = clusHelper.PutGroup(grp, false); err != nil {
log.WithFields(log.Fields{"error": err}).Error()
return
}
}
}
}
func (h *nvCrdHandler) crdHandleRules(rules []api.RESTPolicyRuleConfig, cacheRecord *share.CLUSCrdSecurityRule) *map[string]uint32 {
if len(rules) == 0 && len(cacheRecord.Rules) == 0 {
// when there is no network rule in this crd record & we don't remember any network rule for this crd record as well,
// return empty map which means this crd rule doesn't create any crd network policy
ret := make(map[string]uint32)
return &ret
}
var cr *share.CLUSPolicyRule
crhs := clusHelper.GetPolicyRuleList()
ids := utils.NewSet()
ruleHead := make(map[uint32]*share.CLUSRuleHead)
startIdx := 0 // the idx of first non-fed rule in crhs
startFind := false
endIdx := 0 // the idx of first non-fed/non-crd in crhs
endFind := false
for i, crh := range crhs {
if crh.CfgType == share.GroundCfg {
ids.Add(crh.ID)
ruleHead[crh.ID] = crh
}
if crh.CfgType != share.FederalCfg {
if !startFind {
startIdx = i
startFind = true
}
if crh.CfgType != share.GroundCfg && !endFind {
endIdx = i
endFind = true
}
}
}
news := make([]*share.CLUSRuleHead, 0)
newRules := make(map[string]uint32, 0)
txn := cluster.Transact()
defer txn.Close()
for _, ruleConf := range rules {
if val, ok := cacheRecord.Rules[*ruleConf.Comment]; ok {
ruleConf.ID = val
} else {
ruleConf.ID = common.GetAvailablePolicyID(ids, share.GroundCfg)
cacheRecord.Rules[*ruleConf.Comment] = ruleConf.ID
ids.Add(ruleConf.ID)
}
if cr, _ = clusHelper.GetPolicyRule(ruleConf.ID); cr == nil {
cr = &share.CLUSPolicyRule{
ID: ruleConf.ID,
CreatedAt: time.Now().UTC(),
Disable: false,
CfgType: share.GroundCfg,
}
news = append(news, &share.CLUSRuleHead{
ID: ruleConf.ID,
CfgType: share.GroundCfg,
Priority: ruleConf.Priority,
})
} else {
if _, ok := ruleHead[ruleConf.ID]; ok {
ruleHead[ruleConf.ID].Priority = ruleConf.Priority
} else {
// We have issue with config lose, policy and rulehead may out of sync
news = append(news, &share.CLUSRuleHead{
ID: ruleConf.ID,
CfgType: share.GroundCfg,
Priority: ruleConf.Priority,
})
}
}
if ruleConf.From != nil {
cr.From = *ruleConf.From
}
if ruleConf.To != nil {
cr.To = *ruleConf.To
}
if ruleConf.Ports != nil {
cr.Ports = *ruleConf.Ports
}
if ruleConf.Applications != nil {
cr.Applications = appNames2IDs(*ruleConf.Applications)
}
if ruleConf.Action != nil {
cr.Action = *ruleConf.Action
}
if ruleConf.Comment != nil {
cr.Comment = *ruleConf.Comment
}
cr.LastModAt = time.Now().UTC()
cr.Priority = ruleConf.Priority
if err := clusHelper.PutPolicyRuleTxn(txn, cr); err != nil {
log.WithFields(log.Fields{"error": err}).Error()
continue
}
newRules[*ruleConf.Comment] = ruleConf.ID
}
// Check if the rules in the cacheRecord not in the newRules added, if so delete it.
for cacheName, cacheId := range cacheRecord.Rules {
if newId, ok := newRules[cacheName]; ok && newId == cacheId {
continue
}
clusHelper.DeletePolicyRuleTxn(txn, cacheId)
delete(ruleHead, cacheId)
}
// Newly added crd rules merge with existing rule by priority order
for _, v := range ruleHead {
news = append(news, v)
}
sort.Slice(news[:], func(i, j int) bool {
return news[i].Priority > news[j].Priority
})
var newPlus []*share.CLUSRuleHead // for: new CRD network rules + learned/user-created network rules
if endFind {
// there is non-fed/non-crd network rule so endIdx really means the idx of first non-fed/non-crd in crhs
newPlus = append(news, crhs[endIdx:]...)
} else {
// there is no non-fed/non-crd network rule
newPlus = news
}
crhs = append(crhs[:startIdx], newPlus...)
clusHelper.PutPolicyRuleListTxn(txn, crhs)
txn.Apply()
return &newRules
}
func (h *nvCrdHandler) crdHandleAdmCtrlRules(scope string, allAdmCtrlRules map[string][]*resource.NvCrdAdmCtrlRule, cacheRecord *share.CLUSCrdSecurityRule,
reviewType share.TReviewType) map[string]uint32 {
var rulesCount int
for _, rules := range allAdmCtrlRules {
rulesCount += len(rules)
}
if rulesCount == 0 && len(cacheRecord.AdmCtrlRules) == 0 {
// when there is no admission control rule in this crd record,
// return empty map which means this crd rule doesn't create any crd admission control rule
return make(map[string]uint32)
} else if cacheRecord.AdmCtrlRules == nil {
cacheRecord.AdmCtrlRules = make(map[string]uint32)
}
newRules := make(map[string]uint32, rulesCount)
delRules := utils.NewSet()
var cfgType share.TCfgType = share.GroundCfg
if reviewType == share.ReviewTypeImportAdmCtrl {
cfgType = share.UserCreated
}
txn := cluster.Transact()
defer txn.Close()
ids := utils.NewSet()
crdIDs := utils.NewSet()
clusArhsNew := make(map[string][]*share.CLUSRuleHead, 2)
for _, ruleType := range []string{api.ValidatingExceptRuleType, api.ValidatingDenyRuleType} {
arhs, _ := clusHelper.GetAdmissionRuleList(admission.NvAdmValidateType, ruleType)
clusArhsNew[ruleType] = arhs
for _, arh := range arhs {
ids.Add(arh.ID)
}
}
// iterate thru rules in CRD request. cacheRecord.AdmCtrlRules has the prior imported crd rules info(rule name -> rule id)
for ruleType, rules := range allAdmCtrlRules {
for _, ruleConf := range rules {
var ruleID uint32
var cr *share.CLUSAdmissionRule
ruleName := fmt.Sprintf("%s-%d", ruleType, admCtrlRuleHashFromCriteria(ruleConf.Criteria))
if ruleConf.ID > 0 && ruleConf.ID < api.StartingLocalAdmCtrlRuleID {
// yaml says it's for default rule
if cr = clusHelper.GetAdmissionRule(admission.NvAdmValidateType, ruleType, ruleConf.ID); cr != nil {
ruleID = ruleConf.ID // use existing default rule id
}
}
if ruleID == 0 {
if id, ok := cacheRecord.AdmCtrlRules[ruleName]; ok {
// found a same-name rule(same ruleType & criteria hash is the same) in cached record
if cr = clusHelper.GetAdmissionRule(admission.NvAdmValidateType, ruleType, id); cr != nil {
ruleID = id // use existing non-default rule id
}
}
}
if ruleID == 0 { // will create a new rule for it
ruleID = getAvailableRuleID(ruleTypeAdmCtrl, ids, cfgType)
}
if cr == nil && ruleID >= api.StartingLocalAdmCtrlRuleID {
cr = &share.CLUSAdmissionRule{
ID: ruleID,
Category: admission.AdmRuleCatK8s,
RuleType: ruleType,
}
}
if cr != nil {
if reviewType == share.ReviewTypeCRD {
crdIDs.Add(ruleID)
}
if !ids.Contains(ruleID) {
arhs, _ := clusArhsNew[ruleType]
arh := &share.CLUSRuleHead{
ID: ruleID,
CfgType: cfgType,
}
clusArhsNew[ruleType] = append(arhs, arh)
ids.Add(ruleID)
}
if ruleID < api.StartingLocalAdmCtrlRuleID {
// it's default rule
if reviewType == share.ReviewTypeCRD || cr.CfgType == share.UserCreated {
// default rules can be enabled/disabled when (1) thru crd or (2) they are not crd-type thru rest api import
cr.Disable = ruleConf.Disabled
}
} else {
// it's non-default rule
cr.Criteria, _ = cache.AdmCriteria2CLUS(ruleConf.Criteria)
cr.Comment = ruleConf.Comment
}
cr.CfgType = cfgType
clusHelper.PutAdmissionRuleTxn(txn, admission.NvAdmValidateType, ruleType, cr)
newRules[ruleName] = ruleID
}
}
}
if reviewType == share.ReviewTypeCRD {
// delete those rules in the cacheRecord that are not in the newRules. they will be removed from header list later.
for cacheName, cacheId := range cacheRecord.AdmCtrlRules {
if newId, ok := newRules[cacheName]; !ok || newId != cacheId {
// a crd rule is in old yaml file but not in new yaml file
ss := strings.Split(cacheName, "-") // cacheName is in the format "{ruleType}-{hash}"
if len(ss) != 2 {
log.WithFields(log.Fields{"cacheName": cacheName, "cacheId": cacheId}).Error()
} else {
ruleType := ss[0] // ss[0] is ruleType
if cacheId >= api.AdmCtrlCrdRuleIDBase && cacheId < api.AdmCtrlCrdRuleIDMax {
clusHelper.DeleteAdmissionRuleTxn(txn, admission.NvAdmValidateType, ruleType, cacheId)
delRules.Add(cacheId)
} else {
// default rule cannot be deleted
if cr := clusHelper.GetAdmissionRule(admission.NvAdmValidateType, ruleType, cacheId); cr != nil {
if cr.CfgType != share.UserCreated {
cr.CfgType = share.UserCreated
clusHelper.PutAdmissionRuleTxn(txn, admission.NvAdmValidateType, ruleType, cr)
}
}
}
}
}
}
}
for ruleType, arhsNew := range clusArhsNew {
if reviewType == share.ReviewTypeCRD {
// remove those crd rules from rule header list that was created by old yaml but not exist in new yaml
ruleHead := make([]*share.CLUSRuleHead, 0, len(arhsNew))
for _, arh := range arhsNew {
if !delRules.Contains(arh.ID) {
if reviewType == share.ReviewTypeCRD && crdIDs.Contains(arh.ID) {
arh.CfgType = cfgType
}
ruleHead = append(ruleHead, arh)
}
}
clusHelper.PutAdmissionRuleListTxn(txn, admission.NvAdmValidateType, ruleType, ruleHead)
} else {
clusHelper.PutAdmissionRuleListTxn(txn, admission.NvAdmValidateType, ruleType, arhsNew)
}
}
txn.Apply()
return newRules
}
func (h *nvCrdHandler) crdHandleAdmCtrlConfig(scope string, crdConfig *resource.NvCrdAdmCtrlConfig, cacheRecord *share.CLUSCrdSecurityRule, reviewType share.TReviewType) error {
if crdConfig == nil {
if reviewType == share.ReviewTypeCRD { // meaning do not control admission control config thru crd anymore
setAdmCtrlStateInCluster(nil, nil, nil, nil, nil, share.UserCreated)
}
return nil
}
defaultAction := share.AdmCtrlActionAllow
var cfgType share.TCfgType = share.GroundCfg
if reviewType == share.ReviewTypeImportAdmCtrl {
cfgType = share.UserCreated
}
failurePolicy := resource.IgnoreLower
status, code, origConf, cconf := setAdmCtrlStateInCluster(&crdConfig.Enable, &crdConfig.Mode, &defaultAction, &crdConfig.AdmClientMode, &failurePolicy, cfgType)
if status != http.StatusOK {
return fmt.Errorf(restErrMessage[code])
}
time.Sleep(time.Second)
if ctrlState, exist := cconf.CtrlStates[admission.NvAdmValidateType]; exist {
// we should be notified by k8s watcher later
failurePolicy := resource.Ignore
k8sResInfo := admission.ValidatingWebhookConfigInfo{
Name: resource.NvAdmValidatingName,
WebhooksInfo: []*admission.WebhookInfo{
&admission.WebhookInfo{
Name: resource.NvAdmValidatingWebhookName,
ClientConfig: admission.ClientConfig{
ClientMode: crdConfig.AdmClientMode,
ServiceName: resource.NvAdmSvcName,
Path: ctrlState.Uri,
},
FailurePolicy: failurePolicy,
TimeoutSeconds: resource.DefTimeoutSeconds,
},
&admission.WebhookInfo{
Name: resource.NvStatusValidatingWebhookName,
ClientConfig: admission.ClientConfig{
ClientMode: crdConfig.AdmClientMode,
ServiceName: resource.NvAdmSvcName,
Path: ctrlState.NvStatusUri,
},
FailurePolicy: resource.Ignore,
TimeoutSeconds: resource.DefTimeoutSeconds,
},
},
}
skip, err := admission.ConfigK8sAdmissionControl(k8sResInfo, ctrlState)
if !skip {
alog := share.CLUSEventLog{ReportedAt: time.Now().UTC()}
if err == nil {
var msgState string
if cconf.Enable {
msgState = "enabled"
} else {
msgState = "disabled"
}
alog.Event = share.CLUSEvAdmCtrlK8sConfigured
alog.Msg = fmt.Sprintf("Admission control is %s.", msgState)
} else {
alog.Event = share.CLUSEvAdmCtrlK8sConfigFailed
alog.Msg = "Failed to configure admission control state."
}
evqueue.Append(&alog)
}
if err != nil {
status, code, _, _ := setAdmCtrlStateInCluster(&origConf.Enable, &origConf.Mode, &origConf.DefaultAction, &origConf.AdmClientMode, &origConf.FailurePolicy, origConf.CfgType)
if status != http.StatusOK {
log.WithFields(log.Fields{"status": status, "code": code}).Info("Failed to revert admission control state in cluster")
}
return err
}
}
return nil
}
func (h *nvCrdHandler) crdHandleDlpGroup(name string, dlpGroupCfg *api.RESTCrdDlpGroupConfig, cfgType share.TCfgType) []string {
if dlpGroupCfg != nil {
sensors := make([]string, len(dlpGroupCfg.RepSensors))
settings := make([]*share.CLUSDlpSetting, len(dlpGroupCfg.RepSensors))
for idx, setting := range dlpGroupCfg.RepSensors {
sensors[idx] = setting.Name
settings[idx] = &share.CLUSDlpSetting{
Name: setting.Name,
Action: setting.Action,
}
}
dlpGroup := &share.CLUSDlpGroup{
Name: name,
Sensors: settings,
CfgType: cfgType,
}
dlpGroup.Status = dlpGroupCfg.Status
clusHelper.PutDlpGroup(dlpGroup, false)
return sensors
}
return nil
}
// caller must own CLUSLockPolicyKey lock
func (h *nvCrdHandler) crdHandleDlpSensor(scope string, dlpSensorConf *api.RESTDlpSensorConfig,
cacheRecord *share.CLUSCrdSecurityRule, reviewType share.TReviewType) error {
var err error
var comment string
var ruleList []api.RESTDlpRule
var cfgType share.TCfgType = share.GroundCfg
if reviewType == share.ReviewTypeImportDLP {
cfgType = share.UserCreated
}
if dlpSensorConf.Comment != nil {
comment = *dlpSensorConf.Comment
}
conf := &api.RESTDlpSensorConfig{
Name: dlpSensorConf.Name,
Rules: &ruleList,
Comment: &comment,
}
if dlpSensorConf.Rules == nil || len(*dlpSensorConf.Rules) == 0 {
ruleList = make([]api.RESTDlpRule, 0)
} else {
ruleList = make([]api.RESTDlpRule, len(*dlpSensorConf.Rules))
for idx, ruleConf := range *dlpSensorConf.Rules {
ruleList[idx] = api.RESTDlpRule{
Name: ruleConf.Name,
Patterns: ruleConf.Patterns,
CfgType: cfgTypeMap2Api[cfgType],
}
}
}
conf.Rules = &ruleList
sensor := clusHelper.GetDlpSensor(dlpSensorConf.Name)
if sensor == nil {
err = createDlpSensor(nil, conf, cfgType)
} else {
err = updateDlpSensor(nil, conf, reviewType, sensor)
}
return err
}
func (h *nvCrdHandler) crdHandleWafGroup(name string, wafGroupCfg *api.RESTCrdWafGroupConfig, cfgType share.TCfgType) []string {
if wafGroupCfg != nil {
sensors := make([]string, len(wafGroupCfg.RepSensors))
settings := make([]*share.CLUSWafSetting, len(wafGroupCfg.RepSensors))
for idx, setting := range wafGroupCfg.RepSensors {
sensors[idx] = setting.Name
settings[idx] = &share.CLUSWafSetting{
Name: setting.Name,
Action: setting.Action,
}
}
wafGroup := &share.CLUSWafGroup{
Name: name,
Sensors: settings,
CfgType: cfgType,
}
wafGroup.Status = wafGroupCfg.Status
clusHelper.PutWafGroup(wafGroup, false)
return sensors
}
return nil
}
// caller must own CLUSLockPolicyKey lock
func (h *nvCrdHandler) crdHandleWafSensor(scope string, wafSensorConf *api.RESTWafSensorConfig,
cacheRecord *share.CLUSCrdSecurityRule, reviewType share.TReviewType) error {
var err error
var comment string
var ruleList []api.RESTWafRule
var cfgType share.TCfgType = share.GroundCfg
if reviewType == share.ReviewTypeImportWAF {
cfgType = share.UserCreated
}
if wafSensorConf.Comment != nil {
comment = *wafSensorConf.Comment
}
conf := &api.RESTWafSensorConfig{
Name: wafSensorConf.Name,
Rules: &ruleList,
Comment: &comment,
}
if wafSensorConf.Rules == nil || len(*wafSensorConf.Rules) == 0 {
ruleList = make([]api.RESTWafRule, 0)
} else {
ruleList = make([]api.RESTWafRule, len(*wafSensorConf.Rules))
for idx, ruleConf := range *wafSensorConf.Rules {
ruleList[idx] = api.RESTWafRule{
Name: ruleConf.Name,
Patterns: ruleConf.Patterns,
CfgType: cfgTypeMap2Api[cfgType],
}
}
}
conf.Rules = &ruleList
sensor := clusHelper.GetWafSensor(wafSensorConf.Name)
if sensor == nil {
err = createWafSensor(nil, conf, cfgType)
} else {
err = updateWafSensor(nil, conf, reviewType, sensor)
}
return err
}
func compareCLUSCriteria(src, dst []api.RESTCriteriaEntry, selfComp bool) bool {
var dupFind bool
if len(src) != len(dst) {
return false
}
OUTER:
for _, srcC := range src {
dupFind = false
for i, dstC := range dst {
if reflect.DeepEqual(srcC, dstC) {
if !selfComp {
dst = append(dst[:i], dst[i+1:]...)
continue OUTER
} else {
if dupFind {
return false
} else {
dupFind = true
}
}
}
}
if !selfComp {
return false
}
}
return true
}
func groupNameHashFromCriteria(gCriteria []api.RESTCriteriaEntry, reviewType share.TReviewType) uint32 {
var name string
sort.Slice(gCriteria[:], func(i, j int) bool {
if gCriteria[i].Key != gCriteria[j].Key {
return gCriteria[i].Key < gCriteria[j].Key
} else {
return gCriteria[i].Value < gCriteria[j].Value
}
})
for _, criteria := range gCriteria {
if reviewType == share.ReviewTypeCRD {
name += fmt.Sprintf("%s%s%s", criteria.Key, criteria.Value, criteria.Op)
} else {
name += fmt.Sprintf("%s%s%s%d", criteria.Key, criteria.Value, criteria.Op, reviewType)
}
}
hasher := murmur3.New32()
hasher.Write([]byte(name))
return hasher.Sum32()
}
/*
rules:
. ignore Fed/external/node
. pass basic format validate
. same group in crd/importGroup appear multiple times need have same criteria/policyMode
. for crd, if group already exists:
for learned group can't modify criteria/policyMode
for regular group generate group with new name based on modified criteria
. for importGroup:
if a crd group already exists, return error
if a learned/regualr group already exists, replace it
*/
func (h *nvCrdHandler) parseCrdGroup(crdgroupCfg *api.RESTCrdGroupConfig, curGroups *[]api.RESTCrdGroupConfig, recordName string,
crdCfgRet *resource.NvSecurityParse, reviewType share.TReviewType, reviewTypeDisplay string) (string, int) {
var err int
var retMsg string
groupCfg := crdConfig2groupConfig(crdgroupCfg)
isLearnedGroupName := strings.HasPrefix(groupCfg.Name, api.LearnedGroupPrefix)
if reviewType == share.ReviewTypeImportGroup {
if isLearnedGroupName {
groupCfg.CfgType = api.CfgTypeLearned
} else {
groupCfg.CfgType = api.CfgTypeUserCreated
}
}
crdgroupCfg.OriginalName = crdgroupCfg.Name
if strings.HasPrefix(groupCfg.Name, api.FederalGroupPrefix) {
retMsg = fmt.Sprintf("%s Rule format error: Cannot use reserved name %s", reviewTypeDisplay, groupCfg.Name)
return retMsg, api.RESTErrInvalidName
} else if isLearnedGroupName {
err, msg := validateLearnGroupConfig(groupCfg)
if err > 0 {
retMsg = fmt.Sprintf("%s Rule format error: Group %s validate error %s", reviewTypeDisplay, groupCfg.Name, msg)
return retMsg, err
}
} else if strings.HasPrefix(groupCfg.Name, api.LearnedWorkloadPrefix) &&
groupCfg.Name[len(api.LearnedWorkloadPrefix):] == api.EndpointIngress {
// Learned already processed, now skip Workload:ingress
return "", 0
} else if groupCfg.Name == api.AllHostGroup || groupCfg.Name == api.LearnedExternal { // reserved group
if groupCfg.Criteria == nil || len(*groupCfg.Criteria) == 0 {
// correct criteria
*curGroups = append(*curGroups, *crdgroupCfg)
return "", 0
}
retMsg = fmt.Sprintf("%s Rule format error: %s validate error", reviewTypeDisplay, groupCfg.Name)
return retMsg, api.RESTErrInvalidName
} else if groupCfg.Name == api.AllContainerGroup { // reserved group
if len(*groupCfg.Criteria) == 1 &&
(*groupCfg.Criteria)[0].Key == "container" &&
(*groupCfg.Criteria)[0].Op == "=" &&
(*groupCfg.Criteria)[0].Value == "*" {
// correct criteria
*curGroups = append(*curGroups, *crdgroupCfg)
return "", 0
}
retMsg = fmt.Sprintf("%s Rule format error: %s validate error", reviewTypeDisplay, groupCfg.Name)
return retMsg, api.RESTErrInvalidName
} else {
err, msg := validateGroupConfig(groupCfg, true)
if err > 0 {
retMsg = fmt.Sprintf("%s Rule format error: Group %s validate error %s", reviewTypeDisplay, groupCfg.Name, msg)
return retMsg, err
}
if groupCfg.Criteria == nil || len(*groupCfg.Criteria) == 0 {
retMsg = fmt.Sprintf("%s Rule format error: Group %s must have criteria", reviewTypeDisplay, groupCfg.Name)
return retMsg, api.RESTErrInvalidRequest
} else {
if err, msg, hasAddrCT := validateGroupConfigCriteria(groupCfg, access.NewAdminAccessControl()); err > 0 {
retMsg = fmt.Sprintf("%s Rule format error: Group %s validate error %s", reviewTypeDisplay, groupCfg.Name, msg)
return retMsg, err
} else if hasAddrCT && crdCfgRet != nil && (crdCfgRet.DlpGroupCfg != nil || crdCfgRet.WafGroupCfg != nil) {
retMsg = fmt.Sprintf("%s Rule format error: Group %s with address criterion cannot have DLP/WAF policy", reviewTypeDisplay, groupCfg.Name)
return retMsg, api.RESTErrInvalidRequest
}
}
// make sure Criteria didn't duplicate
dst := append([]api.RESTCriteriaEntry(nil), *groupCfg.Criteria...)
if !compareCLUSCriteria(*groupCfg.Criteria, dst, true) {
retMsg = fmt.Sprintf("%s Rule format error: Group %s Group have duplicate/conflict Criteria", reviewTypeDisplay, groupCfg.Name)
log.WithFields(log.Fields{"name": groupCfg.Name}).Error(retMsg)
err = api.RESTErrDuplicateName
return retMsg, err
}
}
// If non-nv.ip.xxx group already added in this crd and have different criteria, reject.
// For nv.ip.xxx group, we don't compare its criteria because it may be from existing learned nv.ip.xxx group
isNvIpGroup := strings.HasPrefix(groupCfg.Name, api.LearnedSvcGroupPrefix)
for _, g := range *curGroups {
if !isNvIpGroup && (g.OriginalName == groupCfg.Name) {
e := "Group already added"
log.WithFields(log.Fields{"name": groupCfg.Name}).Error(e)
dst := append([]api.RESTCriteriaEntry(nil), *groupCfg.Criteria...)
if !compareCLUSCriteria(*g.Criteria, dst, false) {
retMsg = fmt.Sprintf("%s Rule format error: Group %s Group added with different Criteria", reviewTypeDisplay, groupCfg.Name)
log.WithFields(log.Fields{"name": groupCfg.Name}).Error(retMsg)
err = api.RESTErrInvalidRequest
return retMsg, err
}
crdgroupCfg.Name = g.Name
return "", 0
}
}
acc := access.NewReaderAccessControl()
// for non-nv.ip.xxx groups:
// 1. If group already exist and have different criteria, use hashed name to create new one if avialable otherwise faile.
// 2. if group already exist and have same criteria, keep the original name.
// 3. If group didn't exist yet and by hash name group exist, use hashed name to continue.
// 4. If group didn't exist yet and by hash name group also not exist, use original name to create group.
// for nv.ip.xxx groups, because we only import 'domain' key in its criteria, theoretically there is no need to create nv.ip.xxx-<hash> group:
// 1. If the learned group already exists, promote it to crd later.
// 2. If the crd group already exists, keep the existing crd group unchanged.
// 3. If group doesn't exist yet, create a crd nv.ip.xxx group that has "domain" key(if applicable) in criteria (i.e. drop "address" & other criteria).
if g, _, _ := clusHelper.GetGroup(groupCfg.Name, acc); g != nil {
if g.Kind != share.GroupKindContainer && (crdCfgRet != nil && (crdCfgRet.DlpGroupCfg != nil || crdCfgRet.WafGroupCfg != nil)) {
retMsg = fmt.Sprintf("%s Rule format error: Group %s cannot have DLP/WAF policy", reviewTypeDisplay, groupCfg.Name)
return retMsg, api.RESTErrInvalidRequest
}
rg_criteria := criteria2REST(g.Criteria)
if isNvIpGroup {
// found existing nv.ip.xxx group(learned or crd). use its criteria as this group's criteria.
crdgroupCfg.Criteria = &rg_criteria
} else {
if !compareCLUSCriteria(*groupCfg.Criteria, rg_criteria, false) {
hashval := groupNameHashFromCriteria(*groupCfg.Criteria, reviewType)
var newName string
if reviewType == share.ReviewTypeCRD {
newName = fmt.Sprintf("%s-%s", groupCfg.Name, fmt.Sprint(hashval))
} else {
newName = fmt.Sprintf("%s-%s-%d", groupCfg.Name, fmt.Sprint(hashval), reviewType)
}
// Make sure alternative group name is avialiable for non-nv.ip.xxx group
if variation_g, _, _ := clusHelper.GetGroup(newName, acc); variation_g != nil {
vrg_criteria := criteria2REST(variation_g.Criteria)
if !compareCLUSCriteria(*groupCfg.Criteria, vrg_criteria, false) {
retMsg = fmt.Sprintf("%s Rule format error: Group %s and alternative name %s both taken", reviewTypeDisplay, groupCfg.Name, newName)
return retMsg, api.RESTErrInvalidName
}
}
if reviewType == share.ReviewTypeCRD {
if g.CfgType != share.GroundCfg {
// conflict with user created group, use hash name create new one if available
crdgroupCfg.Name = newName
} else {
gwrecordlist := clusHelper.GetCrdSecurityRuleRecordList(resource.NvSecurityRuleKind)
LOOPALL:
for _, gw := range gwrecordlist {
if recordName == gw.Name {
// if only conflict with own crd, then it is update. keep the name
continue LOOPALL
}
// conflict with group in other crd, use hash name to create new one
// or if the hash name already using, then keep use it.
for _, gwgroup := range gw.Groups {
if gwgroup == groupCfg.Name || gwgroup == newName {
crdgroupCfg.Name = newName
break LOOPALL
}
}
}
}
} else if reviewType == share.ReviewTypeImportGroup {
// imported group cannot override CRD group
if g.CfgType == share.GroundCfg {
crdgroupCfg.Name = newName
}
}
} else if reviewType == share.ReviewTypeImportGroup {
// if there is a CRD group with the same name, create a new group because we may need to update process profile/file monitor later
if g.CfgType == share.GroundCfg {
hashval := groupNameHashFromCriteria(*groupCfg.Criteria, reviewType)
crdgroupCfg.Name = fmt.Sprintf("%s-%s-%d", groupCfg.Name, fmt.Sprint(hashval), reviewType)
}
}
}
} else {
// new group add
if isNvIpGroup {
// when creating a new nv.ip.xxx group, only keep "domain" key in its criteria. hopefully it should be learned later
crdGroupCriteria := crdgroupCfg.Criteria
criteria := make([]api.RESTCriteriaEntry, 0, 1)
if crdGroupCriteria != nil {
for _, ct := range *crdGroupCriteria {
if ct.Key == share.CriteriaKeyDomain {
criteria = append(criteria, ct)
break
}
}
}
crdgroupCfg.Criteria = &criteria
} else {
if reviewType == share.ReviewTypeCRD {
hashval := groupNameHashFromCriteria(*groupCfg.Criteria, reviewType)
newName := fmt.Sprintf("%s-%s", groupCfg.Name, fmt.Sprint(hashval))
// the group based on variation
// 1. if already created then it is duplicated create, so keep the variation
// 2. if not exist, then use the original name to create the group
if variation_g, _, _ := clusHelper.GetGroup(newName, acc); variation_g != nil {
crdgroupCfg.Name = newName
}
}
}
}
crdgroupCfg.Comment = crdgroupCfg.Comment
*curGroups = append(*curGroups, *crdgroupCfg)
return "", err
}
func (h *nvCrdHandler) parseCrdFwRule(from, to, recordName string, ruleDetail resource.NvSecurityRuleDetail, ruleSet utils.Set,
reviewType share.TReviewType) (api.RESTPolicyRuleConfig, string, int) {
var buffer bytes.Buffer
ruleCfg := api.RESTPolicyRuleConfig{
Ports: &ruleDetail.Ports,
Applications: &ruleDetail.Applications,
Action: &ruleDetail.Action,
Comment: &ruleDetail.Name,
}
ruleCfg.From = &from
ruleCfg.To = &to
if reviewType == share.ReviewTypeCRD {
if ruleCfg.Comment == nil || *ruleCfg.Comment == "" {
buffer.WriteString(" rule need name")
return ruleCfg, buffer.String(), 1
}
if ruleSet.Contains(*ruleCfg.Comment) {
buffer.WriteString(fmt.Sprintf("Duplicated rule name: %s", *ruleCfg.Comment))
return ruleCfg, buffer.String(), 1
}
ruleCfg.Priority = ruleDetail.Priority
}
if err := validateRestPolicyRuleConfig(&ruleCfg); err != nil {
log.WithFields(log.Fields{"error": err}).Error()
buffer.WriteString(err.Error())
return ruleCfg, buffer.String(), 1
}
return ruleCfg, "", 0
}
func (h *nvCrdHandler) validateCrdProcessRules(baseline string, rules []*api.RESTProcessProfileEntry) (string, int) {
var buffer bytes.Buffer
ruleSet := utils.NewSet()
errCnt := 0
for i, r := range rules {
r.Name = strings.TrimSpace(r.Name)
r.Path = strings.TrimSpace(r.Path)
r.Action = strings.TrimSpace(r.Action)
msg := fmt.Sprintf("Name:%v, Path:%v, Action:%v", r.Name, r.Path, r.Action)
if r.Path == "*" || r.Path == "" {
// possibly wildcard
} else if strings.HasSuffix(r.Path, "/") || !strings.HasPrefix(r.Path, "/") || strings.ContainsAny(r.Path, "<>") || strings.Count(r.Path, "*") > 1 {
buffer.WriteString(fmt.Sprintf(" validate error: process[%s], invalid path format: %s \n", r.Name, r.Path))
errCnt++
}
if r.Path != "" {
path := r.Path
r.Path = filepath.Clean(r.Path)
if r.Path == "." || r.Path == "/" {
buffer.WriteString(fmt.Sprintf(" validate error: process[%s], unknown path format: %s[%s] \n", r.Name, path, r.Path))
errCnt++
}
}
if r.Name == "" {
if r.Path == "*" || r.Path == "" || r.Path == "." || r.Path == "/" {
buffer.WriteString(fmt.Sprintf(" validate error: process needs a name: Name: %s \n", msg))
errCnt++
} else {
index := strings.LastIndexByte(r.Path, '/')
r.Name = r.Path[index+1:]
// log.WithFields(log.Fields{"name": r.Name, "path": r.Path}).Debug("CRD: patch Name")
}
}
if r.Name == "*" && r.Path == "" {
buffer.WriteString(fmt.Sprintf(" validate error: process needs a non-empty path: Name: %s \n", msg))
errCnt++
}
key := fmt.Sprintf("%s:%s:%s", r.Name, r.Path, r.Action)
if ruleSet.Contains(key) {
buffer.WriteString(fmt.Sprintf(" Duplicated process rule entry: : %s \n", msg))
errCnt++
} else {
ruleSet.Add(key)
}
// avoid deny all entry
if r.Name == "*" && (r.Path == "*" || r.Path == "/*") && r.Action == share.PolicyActionDeny {
buffer.WriteString(fmt.Sprintf(" invalid process entry: deny all: %s \n", msg))
errCnt++
}
// update final values
rules[i].Name = r.Name
rules[i].Path = r.Path
rules[i].Action = r.Action
}
return buffer.String(), errCnt
}
func (h *nvCrdHandler) validateCrdFileRules(rules []*api.RESTFileMonitorFilter) (string, int) {
var buffer bytes.Buffer
ruleSet := utils.NewSet()
errCnt := 0
for i, r := range rules {
flt := r.Filter
r.Filter = strings.TrimSpace(r.Filter)
r.Behavior = strings.TrimSpace(r.Behavior)
r.Filter = filepath.Clean(r.Filter)
if r.Filter == "." || r.Filter == "/" {
buffer.WriteString(fmt.Sprintf(" validate error: filter: %s[%s] \n", flt, r.Filter))
errCnt++
} else {
_, _, ok := parseFileFilter(r.Filter)
if !ok {
buffer.WriteString(fmt.Sprintf(" validate error: unsupported filter: %s[%s] \n", flt, r.Filter))
errCnt++
}
}
apps := make([]string, 0, len(r.Apps))
for _, app := range r.Apps {
apps = append(apps, strings.TrimSpace(app))
}
key := fmt.Sprintf("%s:%s:%v", r.Filter, r.Behavior, r.Recursive)
if ruleSet.Contains(key) {
buffer.WriteString(fmt.Sprintf(" Duplicated file rule entry: : %s \n", key))
errCnt++
} else {
ruleSet.Add(key)
}
// update final values
rules[i].Filter = r.Filter
rules[i].Behavior = r.Behavior
rules[i].Apps = apps
}
return buffer.String(), errCnt
}
func (h *nvCrdHandler) validateCrdDlpWafGroup(spec *resource.NvSecurityRuleSpec) (string, int) {
var errCnt int
var buffer bytes.Buffer
if spec.DlpGroup != nil {
for _, s := range spec.DlpGroup.Settings {
if s.Name == share.CLUSDlpDefaultSensor {
buffer.WriteString(fmt.Sprintf(" validate error: cannot use reserved sensor name%s \n", s.Name))
errCnt++
}
if s.Action != share.PolicyActionAllow && s.Action != share.PolicyActionDeny {
buffer.WriteString(fmt.Sprintf(" validate error: action %s \n", s.Action))
errCnt++
}
}
}
if spec.WafGroup != nil {
for _, s := range spec.WafGroup.Settings {
if s.Name == share.CLUSWafDefaultSensor {
buffer.WriteString(fmt.Sprintf(" validate error: cannot use reserved sensor name%s \n", s.Name))
errCnt++
}
if s.Action != share.PolicyActionAllow && s.Action != share.PolicyActionDeny {
buffer.WriteString(fmt.Sprintf(" validate error: action %s \n", s.Action))
errCnt++
}
}
}
return buffer.String(), errCnt
}
// for CRD & group import
func (h *nvCrdHandler) parseCurCrdContent(gfwrule *resource.NvSecurityRule, reviewType share.TReviewType,
reviewTypeDisplay string) (*resource.NvSecurityParse, int, string, string) {
var buffer bytes.Buffer
var errNo int
var errMsg, ruleNs string
var crdCfgRet resource.NvSecurityParse
// var GroupCfgs []api.RESTCrdGroupConfig
// var RuleCfgs []api.RESTPolicyRuleConfig
var ruleCfg api.RESTPolicyRuleConfig
var recordName string
ruleSet := utils.NewSet()
// var groupName string
errCount := 0
if gfwrule == nil || gfwrule.Metadata == nil || gfwrule.Metadata.Name == nil {
errMsg := fmt.Sprintf("%s file format error: validation error", reviewTypeDisplay)
return nil, 1, errMsg, ""
}
if reviewType == share.ReviewTypeCRD {
if *gfwrule.Kind == resource.NvClusterSecurityRuleKind {
ruleNs = "default"
} else {
ruleNs = *gfwrule.Metadata.Namespace
}
recordName = fmt.Sprintf("%s-%s-%s", *gfwrule.Kind, ruleNs, *gfwrule.Metadata.Name)
} else {
ruleNs = *gfwrule.Metadata.Namespace
recordName = gfwrule.Spec.Target.Selector.Name
}
// 1. Get the DLP/WAF group settings
errMsg, errNo = h.validateCrdDlpWafGroup(&gfwrule.Spec)
if errNo > 0 {
buffer.WriteString(errMsg)
errCount += errNo
} else {
if gfwrule.Spec.DlpGroup != nil {
crdCfgRet.DlpGroupCfg = &api.RESTCrdDlpGroupConfig{
Status: gfwrule.Spec.DlpGroup.Status,
RepSensors: gfwrule.Spec.DlpGroup.Settings,
}
}
if gfwrule.Spec.WafGroup != nil {
crdCfgRet.WafGroupCfg = &api.RESTCrdWafGroupConfig{
Status: gfwrule.Spec.WafGroup.Status,
RepSensors: gfwrule.Spec.WafGroup.Settings,
}
}
}
// 2. Get the target group and do validation. crdCfgRet.GroupCfgs collects all the mentioned groups in this security rule.
errMsg, errNo = h.parseCrdGroup(&gfwrule.Spec.Target.Selector, &crdCfgRet.GroupCfgs, recordName, &crdCfgRet, reviewType, reviewTypeDisplay)
if errNo > 0 {
errCount++
return nil, errCount, errMsg, recordName
}
// if the rule was for certain namespace, then the target must belong to same namespace.
// neuvector namespace was used for general in/export
if *gfwrule.Kind == resource.NvSecurityRuleKind {
for _, ct := range *gfwrule.Spec.Target.Selector.Criteria {
if ct.Key == share.CriteriaKeyDomain {
if ct.Op == share.CriteriaOpEqual && ct.Value == ruleNs {
goto targetpass
}
}
}
errMsg = fmt.Sprintf("%s Rule format error: SecurityRule in nameSpace %s need target group %s belong to it",
reviewTypeDisplay, ruleNs, gfwrule.Spec.Target.Selector.Name)
errCount++
return nil, errCount, errMsg, recordName
}
targetpass:
crdCfgRet.TargetName = gfwrule.Spec.Target.Selector.Name
policyModeCfg := gfwrule.Spec.Target.PolicyMode
if utils.DoesGroupHavePolicyMode(gfwrule.Spec.Target.Selector.Name) {
if policyModeCfg != nil {
if *policyModeCfg != share.PolicyModeLearn &&
*policyModeCfg != share.PolicyModeEvaluate &&
*policyModeCfg != share.PolicyModeEnforce {
errMsg = fmt.Sprintf("%s Rule format error: Target group %s invalide policy mode %s",
reviewTypeDisplay, gfwrule.Spec.Target.Selector.Name, *policyModeCfg)
errCount++
return nil, errCount, errMsg, recordName
} else {
pmode := api.RESTServiceConfig{
Name: gfwrule.Spec.Target.Selector.Name,
PolicyMode: policyModeCfg,
}
crdCfgRet.PolicyModeCfg = &pmode
}
} else {
//tmp := share.PolicyModeLearn
tmp := cacher.GetNewServicePolicyMode()
pmode := api.RESTServiceConfig{
Name: gfwrule.Spec.Target.Selector.Name,
PolicyMode: &tmp,
}
crdCfgRet.PolicyModeCfg = &pmode
}
} else {
if policyModeCfg != nil && (*policyModeCfg != "" && *policyModeCfg != share.PolicyModeUnavailable) {
errMsg = fmt.Sprintf("%s Rule format error: Target group %s does not support policy mode",
reviewTypeDisplay, gfwrule.Spec.Target.Selector.Name)
errCount++
return nil, errCount, errMsg, recordName
}
}
//Pare target group done.
// 3. Get the ingress policy and From Group, the target group will be used as To Group
for _, ruleDetail := range gfwrule.Spec.IngressRule {
errMsg, errNo = h.parseCrdGroup(&ruleDetail.Selector, &crdCfgRet.GroupCfgs, recordName, nil, reviewType, reviewTypeDisplay)
if errNo > 0 {
errCount++
return nil, errCount, errMsg, recordName
}
ruleCfg, errMsg, errNo = h.parseCrdFwRule(ruleDetail.Selector.Name, gfwrule.Spec.Target.Selector.Name,
recordName, ruleDetail, ruleSet, reviewType)
if errNo > 0 {
buffer.WriteString(errMsg)
errCount++
continue
}
if reviewType == share.ReviewTypeCRD {
ruleSet.Add(*ruleCfg.Comment)
}
crdCfgRet.RuleCfgs = append(crdCfgRet.RuleCfgs, ruleCfg)
}
// 4. Get the egress policy and To Group, the target group will be used as From Group
for _, ruleDetail := range gfwrule.Spec.EgressRule {
errMsg, errNo = h.parseCrdGroup(&ruleDetail.Selector, &crdCfgRet.GroupCfgs, recordName, nil, reviewType, reviewTypeDisplay)
if errNo > 0 {
errCount++
return nil, errCount, errMsg, recordName
}
ruleCfg, errMsg, errNo = h.parseCrdFwRule(gfwrule.Spec.Target.Selector.Name, ruleDetail.Selector.Name,
recordName, ruleDetail, ruleSet, reviewType)
if errNo > 0 {
buffer.WriteString(errMsg)
errCount++
continue
}
if reviewType == share.ReviewTypeCRD {
ruleSet.Add(*ruleCfg.Comment)
}
crdCfgRet.RuleCfgs = append(crdCfgRet.RuleCfgs, ruleCfg)
}
// 5. Get process and file profiles
if gfwrule.Spec.Target.Selector.Name != "" && utils.HasGroupProfiles(gfwrule.Spec.Target.Selector.Name) {
// Process profile
mode := "" // user-created group
if gfwrule.Spec.Target.PolicyMode != nil {
mode = *gfwrule.Spec.Target.PolicyMode
}
baseline := share.ProfileZeroDrift
if utils.DoesGroupHavePolicyMode(gfwrule.Spec.Target.Selector.Name) {
if gfwrule.Spec.ProcessProfile != nil && gfwrule.Spec.ProcessProfile.Baseline != nil {
blValue := *gfwrule.Spec.ProcessProfile.Baseline
if blValue == share.ProfileBasic {
baseline = share.ProfileBasic
} else if blValue != share.ProfileDefault && blValue != share.ProfileShield && blValue != share.ProfileZeroDrift {
errMsg = fmt.Sprintf("%s Rule format error: invalid baseline %s", reviewTypeDisplay, blValue)
buffer.WriteString(errMsg)
errCount += errNo
}
}
}
pprofile := api.RESTProcessProfile{
Group: gfwrule.Spec.Target.Selector.Name,
Baseline: baseline,
Mode: mode,
ProcessList: make([]*api.RESTProcessProfileEntry, 0, len(gfwrule.Spec.ProcessRule)),
}
for _, pp := range gfwrule.Spec.ProcessRule {
p := &api.RESTProcessProfileEntry{
Name: pp.Name,
Path: pp.Path,
Action: pp.Action,
AllowFileUpdate: pp.AllowFileUpdate,
}
pprofile.ProcessList = append(pprofile.ProcessList, p)
}
// the contents will be justified
errMsg, errNo = h.validateCrdProcessRules(pprofile.Baseline, pprofile.ProcessList)
if errNo > 0 {
buffer.WriteString(errMsg)
errCount += errNo
}
crdCfgRet.ProcessProfileCfg = &pprofile
// File profile
fprofile := api.RESTFileMonitorProfile{
Group: gfwrule.Spec.Target.Selector.Name,
Filters: make([]*api.RESTFileMonitorFilter, 0, len(gfwrule.Spec.FileRule)),
}
if gfwrule.Spec.Target.Selector.Name == api.AllHostGroup {
if len(gfwrule.Spec.FileRule) > 0 {
errMsg = fmt.Sprintf(" %s Rule file format error: profile is not supported for \"nodes\"", reviewTypeDisplay)
buffer.WriteString(errMsg)
errCount += errNo
}
} else {
for _, ff := range gfwrule.Spec.FileRule {
f := &api.RESTFileMonitorFilter{
Filter: ff.Filter,
Recursive: ff.Recursive,
Behavior: ff.Behavior,
Apps: ff.App,
}
fprofile.Filters = append(fprofile.Filters, f)
}
// the contents will be justified
errMsg, errNo = h.validateCrdFileRules(fprofile.Filters)
if errNo > 0 {
buffer.WriteString(errMsg)
errCount += errNo
}
}
crdCfgRet.FileProfileCfg = &fprofile
}
return &crdCfgRet, errCount, buffer.String(), recordName
}
func admCtrlRuleHashFromCriteria(rCriteria []*api.RESTAdmRuleCriterion) uint32 {
if len(rCriteria) == 0 {
return 0
}
var name string
for _, criteria := range rCriteria {
if len(criteria.SubCriteria) > 1 {
sort.Slice(criteria.SubCriteria[:], func(i, j int) bool {
if criteria.SubCriteria[i].Name != criteria.SubCriteria[j].Name {
return criteria.SubCriteria[i].Name < criteria.SubCriteria[j].Name
} else if criteria.SubCriteria[i].Op != criteria.SubCriteria[j].Op {
return criteria.SubCriteria[i].Op < criteria.SubCriteria[j].Op
} else {
return criteria.SubCriteria[i].Value < criteria.SubCriteria[j].Value
}
})
}
}
sort.Slice(rCriteria[:], func(i, j int) bool {
if rCriteria[i].Name != rCriteria[j].Name {
return rCriteria[i].Name < rCriteria[j].Name
} else if rCriteria[i].Op != rCriteria[j].Op {
return rCriteria[i].Op < rCriteria[j].Op
} else if rCriteria[i].Value != rCriteria[j].Value {
return rCriteria[i].Value < rCriteria[j].Value
} else {
if len(rCriteria[i].SubCriteria) != len(rCriteria[j].SubCriteria) {
return len(rCriteria[i].SubCriteria) < len(rCriteria[j].SubCriteria)
} else {
for idx, subCrit1 := range rCriteria[i].SubCriteria {
subCrit2 := rCriteria[j].SubCriteria[idx]
if subCrit1.Name != subCrit2.Name {
return subCrit1.Name < subCrit2.Name
} else if subCrit1.Op != subCrit2.Op {
return subCrit1.Op < subCrit2.Op
} else {
return subCrit1.Value < subCrit2.Value
}
}
}
}
return false
})
for _, criteria := range rCriteria {
name += fmt.Sprintf("%s%s%s", criteria.Name, criteria.Value, criteria.Op)
if len(criteria.SubCriteria) > 0 {
name += fmt.Sprintf("-%d", admCtrlRuleHashFromCriteria(criteria.SubCriteria))
}
}
hasher := murmur3.New32()
hasher.Write([]byte(name))
return hasher.Sum32()
}
// for CRD admission control import
func (h *nvCrdHandler) parseCurCrdAdmCtrlContent(admCtrlSecRule *resource.NvAdmCtrlSecurityRule, reviewType share.TReviewType,
reviewTypeDisplay string) (*resource.NvSecurityParse, int, string, string) {
if admCtrlSecRule == nil || admCtrlSecRule.Metadata == nil || admCtrlSecRule.Metadata.Name == nil {
errMsg := fmt.Sprintf("%s file format error: validation error", reviewTypeDisplay)
return nil, 1, errMsg, ""
}
name := *admCtrlSecRule.Metadata.Name
if reviewType == share.ReviewTypeCRD {
if name != share.ScopeLocal { // for crd, metadata name must be "local". if it's not, ignore it
return nil, 0, "", ""
}
}
var buffer bytes.Buffer
errCount := 0
crdCfgRet := &resource.NvSecurityParse{}
recordName := fmt.Sprintf("%s-default-%s", *admCtrlSecRule.Kind, name)
if admCtrlSecRule.Spec.Config != nil {
// Get the admission control config
cfg := admCtrlSecRule.Spec.Config
if cfg.Enable == nil || cfg.Mode == nil || cfg.AdmClientMode == nil ||
(*cfg.Mode != share.AdmCtrlModeMonitor && *cfg.Mode != share.AdmCtrlModeProtect) ||
(*cfg.AdmClientMode != share.AdmClientModeSvc && *cfg.AdmClientMode != share.AdmClientModeUrl) {
errMsg := fmt.Sprintf("%s file format error: validation error in %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
crdCfgRet.AdmCtrlCfg = &resource.NvCrdAdmCtrlConfig{
Enable: *cfg.Enable,
Mode: *cfg.Mode,
AdmClientMode: *cfg.AdmClientMode,
}
}
if len(admCtrlSecRule.Spec.Rules) > 0 {
crdRuleIDs := utils.NewSet()
crdCfgRet.AdmCtrlRulesCfg = make(map[string][]*resource.NvCrdAdmCtrlRule)
admRuleTypes := []string{api.ValidatingExceptRuleType, api.ValidatingDenyRuleType}
admRuleOptions := make(map[string]*api.RESTAdmCatOptions, len(admRuleTypes))
admRulesCfg := make(map[string][]*resource.NvCrdAdmCtrlRule, len(admRuleTypes))
for _, ruleType := range admRuleTypes {
admRuleOptions[ruleType] = nvsysadmission.GetAdmRuleTypeOptions(ruleType)
admRulesCfg[ruleType] = make([]*resource.NvCrdAdmCtrlRule, 0, len(admCtrlSecRule.Spec.Rules))
}
// Get the admission control rules
acc := access.NewAdminAccessControl()
for idx, crdRule := range admCtrlSecRule.Spec.Rules {
var errMsg string
if (crdRule.Action == nil || (*crdRule.Action != api.ValidatingAllowRuleType && *crdRule.Action != api.ValidatingDenyRuleType)) ||
len(crdRule.Criteria) == 0 {
errMsg := fmt.Sprintf("%s file format error: validation error in %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
crdRuleType := *crdRule.Action
if crdRuleType == api.ValidatingAllowRuleType {
crdRuleType = api.ValidatingExceptRuleType
}
var err error
var criteria []*share.CLUSAdmRuleCriterion
if criteria, err = cache.AdmCriteria2CLUS(crdRule.Criteria); err == nil {
options, _ := admRuleOptions[crdRuleType]
err = validateAdmCtrlCriteria(criteria, options.K8sOptions.RuleOptions)
}
if err != nil {
errMsg = fmt.Sprintf("%s Rule format error: Rule #%d in %s validatation error %s", reviewTypeDisplay, idx, name, err.Error())
} else {
if crdRule.ID != nil && *crdRule.ID > 0 && *crdRule.ID < api.StartingLocalAdmCtrlRuleID {
// if it's for default rule, default rule can only be enabled/disabled
if rule, err := cacher.GetAdmissionRule(admission.NvAdmValidateType, crdRuleType, *crdRule.ID, acc); err == nil {
if !reflect.DeepEqual(rule.Criteria, crdRule.Criteria) ||
(reviewType == share.ReviewTypeImportAdmCtrl && rule.CfgType == api.CfgTypeGround) {
errMsg = fmt.Sprintf("%s Rule error: Default rule(id=%d) cannot be modified", reviewTypeDisplay, *crdRule.ID)
}
} else {
errMsg = fmt.Sprintf("%s Rule error: Default rule(id=%d) not found", reviewTypeDisplay, *crdRule.ID)
}
}
if errMsg == "" {
ruleCfg := &resource.NvCrdAdmCtrlRule{
RuleType: crdRuleType,
Criteria: crdRule.Criteria,
}
if crdRule.ID != nil && *crdRule.ID < api.StartingLocalAdmCtrlRuleID {
ruleCfg.ID = *crdRule.ID
}
if crdRule.Comment != nil {
ruleCfg.Comment = *crdRule.Comment
}
if crdRule.Disabled != nil {
ruleCfg.Disabled = *crdRule.Disabled
}
rulesCfg, _ := admRulesCfg[crdRuleType]
admRulesCfg[crdRuleType] = append(rulesCfg, ruleCfg)
crdRuleIDs.Add(crdRule.ID)
}
}
if errMsg != "" {
buffer.WriteString(errMsg)
errCount++
continue
}
}
for ruleType, rulesCfg := range admRulesCfg {
if len(rulesCfg) > 0 {
crdCfgRet.AdmCtrlRulesCfg[ruleType] = rulesCfg
}
}
}
return crdCfgRet, errCount, buffer.String(), recordName
}
// for CRD DLP sensor import
func (h *nvCrdHandler) parseCurCrdDlpContent(dlpSecRule *resource.NvDlpSecurityRule, reviewType share.TReviewType,
reviewTypeDisplay string) (*resource.NvSecurityParse, int, string, string) {
if dlpSecRule == nil || dlpSecRule.Metadata == nil || dlpSecRule.Metadata.Name == nil {
errMsg := fmt.Sprintf("%s file format error: validation error", reviewTypeDisplay)
return nil, 1, errMsg, ""
}
var cfgType string = api.CfgTypeUserCreated
if reviewType == share.ReviewTypeCRD {
cfgType = api.CfgTypeGround
}
var buffer bytes.Buffer
errCount := 0
crdCfgRet := &resource.NvSecurityParse{}
name := *dlpSecRule.Metadata.Name
recordName := fmt.Sprintf("%s-default-%s", *dlpSecRule.Kind, name)
if dlpSecRule.Spec.Sensor != nil {
sensor := dlpSecRule.Spec.Sensor
if sensor.Name != name {
errMsg := fmt.Sprintf("%s file format error: mismatched name in sensor and metadata %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if !isObjectNameValid(sensor.Name) {
errMsg := fmt.Sprintf("%s file format error: invalid characters in name %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if sensor.Name == share.CLUSDlpDefaultSensor || strings.HasPrefix(sensor.Name, api.FederalGroupPrefix) {
errMsg := fmt.Sprintf("%s file format error: cannot create sensor with reserved name %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if cs, _ := cacher.GetDlpSensor(sensor.Name, access.NewReaderAccessControl()); cs != nil && cs.Predefine {
errMsg := fmt.Sprintf("%s file format error: cannot modify predefined sensor %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if sensor.Comment != nil && len(*sensor.Comment) > api.DlpRuleCommentMaxLen {
errMsg := fmt.Sprintf("%s file format error: comment exceed max %d characters!", reviewTypeDisplay, api.DlpRuleCommentMaxLen)
return nil, 1, errMsg, recordName
}
ruleList := make([]api.RESTDlpRule, len(sensor.RuleList))
for idx, rule := range sensor.RuleList {
ruleList[idx] = api.RESTDlpRule{
Name: *rule.Name,
Patterns: rule.Patterns,
CfgType: cfgType,
}
}
if err := validateDlpRuleConfig(ruleList); err != nil {
errMsg := fmt.Sprintf("%s file format error: %s", reviewTypeDisplay, err.Error())
return nil, 1, errMsg, recordName
}
crdCfgRet.DlpSensorCfg = &api.RESTDlpSensorConfig{
Name: sensor.Name,
Comment: sensor.Comment,
Rules: &ruleList,
}
} else {
crdCfgRet.DlpSensorCfg = &api.RESTDlpSensorConfig{
Name: *dlpSecRule.Metadata.Name,
}
}
return crdCfgRet, errCount, buffer.String(), recordName
}
// for CRD WAF sensor import
func (h *nvCrdHandler) parseCurCrdWafContent(wafSecRule *resource.NvWafSecurityRule, reviewType share.TReviewType,
reviewTypeDisplay string) (*resource.NvSecurityParse, int, string, string) {
if wafSecRule == nil || wafSecRule.Metadata == nil || wafSecRule.Metadata.Name == nil {
errMsg := fmt.Sprintf("%s file format error: validation error", reviewTypeDisplay)
return nil, 1, errMsg, ""
}
var cfgType string = api.CfgTypeUserCreated
if reviewType == share.ReviewTypeCRD {
cfgType = api.CfgTypeGround
}
var buffer bytes.Buffer
errCount := 0
crdCfgRet := &resource.NvSecurityParse{}
name := *wafSecRule.Metadata.Name
recordName := fmt.Sprintf("%s-default-%s", *wafSecRule.Kind, name)
if wafSecRule.Spec.Sensor != nil {
sensor := wafSecRule.Spec.Sensor
if sensor.Name != name {
errMsg := fmt.Sprintf("%s file format error: mismatched name in sensor and metadata %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if !isObjectNameValid(sensor.Name) {
errMsg := fmt.Sprintf("%s file format error: invalid characters in name %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if sensor.Name == share.CLUSWafDefaultSensor || strings.HasPrefix(sensor.Name, api.FederalGroupPrefix) {
errMsg := fmt.Sprintf("%s file format error: cannot create sensor with reserved name %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if cs, _ := cacher.GetWafSensor(sensor.Name, access.NewReaderAccessControl()); cs != nil && cs.Predefine {
errMsg := fmt.Sprintf("%s file format error: cannot modify predefined sensor %s", reviewTypeDisplay, name)
return nil, 1, errMsg, recordName
}
if sensor.Comment != nil && len(*sensor.Comment) > api.DlpRuleCommentMaxLen {
errMsg := fmt.Sprintf("%s file format error: comment exceed max %d characters!", reviewTypeDisplay, api.DlpRuleCommentMaxLen)
return nil, 1, errMsg, recordName
}
ruleList := make([]api.RESTWafRule, len(sensor.RuleList))
for idx, rule := range sensor.RuleList {
ruleList[idx] = api.RESTWafRule{
Name: *rule.Name,
Patterns: rule.Patterns,
CfgType: cfgType,
}
}
if err := validateWafRuleConfig(ruleList); err != nil {
errMsg := fmt.Sprintf("%s file format error: %s", reviewTypeDisplay, err.Error())
return nil, 1, errMsg, recordName
}
crdCfgRet.WafSensorCfg = &api.RESTWafSensorConfig{
Name: sensor.Name,
Comment: sensor.Comment,
Rules: &ruleList,
}
} else {
crdCfgRet.WafSensorCfg = &api.RESTWafSensorConfig{
Name: *wafSecRule.Metadata.Name,
}
}
return crdCfgRet, errCount, buffer.String(), recordName
}
// Process the group and network rule list get from the crd. caller must own CLUSLockPolicyKey lock
func (h *nvCrdHandler) crdGFwRuleProcessRecord(crdCfgRet *resource.NvSecurityParse, kind, recordName string) {
crdRecord := clusHelper.GetCrdSecurityRuleRecord(kind, recordName)
if crdRecord == nil {
crdRecord = &share.CLUSCrdSecurityRule{
Name: recordName,
Groups: make([]string, 0),
Rules: make(map[string]uint32),
}
}
groupNew, targetGroupDlpWAF := h.crdHandleGroupsAdd(crdCfgRet.GroupCfgs, crdCfgRet.TargetName)
absentGroup := findAbsentGroups(crdRecord, groupNew)
h.crdHandleGroupRecordDel(crdRecord, absentGroup, false)
log.WithFields(log.Fields{"name": recordName, "target": crdCfgRet.TargetName, "targetDlpWAF": targetGroupDlpWAF}).Debug()
var profile_mode string
if crdCfgRet.ProcessProfileCfg != nil {
// nodes, containers, service or user-defined groups
profile_mode = crdCfgRet.ProcessProfileCfg.Mode
crdRecord.ProfileName = crdCfgRet.TargetName
crdRecord.ProfileMode = profile_mode
crdRecord.ProcessProfile = share.CLUSCrdProcessProfile{Baseline: crdCfgRet.ProcessProfileCfg.Baseline}
crdRecord.ProcessRules = h.crdGetProcessRules(crdCfgRet.ProcessProfileCfg)
crdRecord.FileRules = h.crdGetFileRules(crdCfgRet.FileProfileCfg)
}
// handle rule part of crd
ruleNew := h.crdHandleRules(crdCfgRet.RuleCfgs, crdRecord)
crdRecord.Groups = groupNew
crdRecord.Rules = *ruleNew
if targetGroupDlpWAF {
if crdCfgRet.DlpGroupCfg == nil {
crdCfgRet.DlpGroupCfg = &api.RESTCrdDlpGroupConfig{RepSensors: make([]api.RESTCrdDlpGroupSetting, 0)}
}
if crdCfgRet.WafGroupCfg == nil {
crdCfgRet.WafGroupCfg = &api.RESTCrdWafGroupConfig{RepSensors: make([]api.RESTCrdWafGroupSetting, 0)}
}
crdRecord.DlpGroupSensors = h.crdHandleDlpGroup(crdCfgRet.TargetName, crdCfgRet.DlpGroupCfg, share.GroundCfg)
crdRecord.WafGroupSensors = h.crdHandleWafGroup(crdCfgRet.TargetName, crdCfgRet.WafGroupCfg, share.GroundCfg)
}
clusHelper.PutCrdSecurityRuleRecord(kind, recordName, crdRecord)
if crdRecord.ProfileName != "" {
profile_mode = h.crdRebuildGroupProfiles(crdRecord.ProfileName, nil, share.ReviewTypeCRD)
}
h.crdHandlePolicyMode(crdCfgRet.PolicyModeCfg, profile_mode)
}
// Process the admission control rule list get from the crd. caller must own CLUSLockAdmCtrlKey lock
func (h *nvCrdHandler) crdAdmCtrlRuleRecord(crdCfgRet *resource.NvSecurityParse, kind, recordName string) {
crdRecord := clusHelper.GetCrdSecurityRuleRecord(kind, recordName)
if crdRecord == nil {
crdRecord = &share.CLUSCrdSecurityRule{
Name: recordName,
AdmCtrlRules: make(map[string]uint32),
}
}
log.WithFields(log.Fields{"name": recordName}).Debug()
// handle admission control rule part of crd
ruleNew := h.crdHandleAdmCtrlRules(share.ScopeLocal, crdCfgRet.AdmCtrlRulesCfg, crdRecord, share.ReviewTypeCRD)
crdRecord.AdmCtrlRules = ruleNew
h.crdHandleAdmCtrlConfig(share.ScopeLocal, crdCfgRet.AdmCtrlCfg, crdRecord, share.ReviewTypeCRD)
clusHelper.PutCrdSecurityRuleRecord(kind, recordName, crdRecord)
}
// Process DLP sensor get from the crd. caller must own CLUSLockPolicyKey lock
func (h *nvCrdHandler) crdDlpSensorRecord(crdCfgRet *resource.NvSecurityParse, kind, recordName string) {
crdRecord := clusHelper.GetCrdSecurityRuleRecord(kind, recordName)
if crdRecord == nil {
crdRecord = &share.CLUSCrdSecurityRule{
Name: recordName,
DlpSensor: crdCfgRet.DlpSensorCfg.Name,
}
}
log.WithFields(log.Fields{"name": recordName}).Debug()
// handle dlp part of crd (dlp sensor definition, not per-group's sensors association)
h.crdHandleDlpSensor(share.ScopeLocal, crdCfgRet.DlpSensorCfg, crdRecord, share.ReviewTypeCRD)
clusHelper.PutCrdSecurityRuleRecord(kind, recordName, crdRecord)
}
// Process WAF sensor get from the crd. caller must own CLUSLockPolicyKey lock
func (h *nvCrdHandler) crdWafSensorRecord(crdCfgRet *resource.NvSecurityParse, kind, recordName string) {
crdRecord := clusHelper.GetCrdSecurityRuleRecord(kind, recordName)
if crdRecord == nil {
crdRecord = &share.CLUSCrdSecurityRule{
Name: recordName,
WafSensor: crdCfgRet.WafSensorCfg.Name,
}
}
log.WithFields(log.Fields{"name": recordName}).Debug()
// handle waf part of crd (waf sensor definition, not per-group's sensors association)
h.crdHandleWafSensor(share.ScopeLocal, crdCfgRet.WafSensorCfg, crdRecord, share.ReviewTypeCRD)
clusHelper.PutCrdSecurityRuleRecord(kind, recordName, crdRecord)
}
// for CRD only
func (h *nvCrdHandler) parseCrdContent(raw []byte) (*resource.NvSecurityParse, int, string, string, string) {
var crdCfgRet *resource.NvSecurityParse
var secRulePartial resource.NvSecurityRulePartial
var gfwrule resource.NvSecurityRule
var admCtrlSecRule resource.NvAdmCtrlSecurityRule
var dlpSecRule resource.NvDlpSecurityRule
var wafSecRule resource.NvWafSecurityRule
var buffer bytes.Buffer
var errMsg, recordName string
var kind string
var err error
if err = json.Unmarshal(raw, &secRulePartial); err == nil {
if secRulePartial.Metadata == nil || secRulePartial.Metadata.Name == nil {
err = fmt.Errorf("no metadata name")
} else {
kind = *secRulePartial.Kind
switch kind {
case resource.NvSecurityRuleKind, resource.NvClusterSecurityRuleKind:
err = json.Unmarshal(raw, &gfwrule)
case resource.NvAdmCtrlSecurityRuleKind:
err = json.Unmarshal(raw, &admCtrlSecRule)
case resource.NvDlpSecurityRuleKind:
err = json.Unmarshal(raw, &dlpSecRule)
case resource.NvWafSecurityRuleKind:
err = json.Unmarshal(raw, &wafSecRule)
default:
err = errors.New("unsupported Kubernetese resource kind")
}
}
}
// var groupName string
errCount := 0
if err != nil {
buffer.WriteString(" CRD Rule format error: ")
buffer.WriteString(err.Error())
errMsg = buffer.String()
errCount++
} else {
switch kind {
case resource.NvSecurityRuleKind, resource.NvClusterSecurityRuleKind:
crdCfgRet, errCount, errMsg, recordName = h.parseCurCrdContent(&gfwrule, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
case resource.NvAdmCtrlSecurityRuleKind:
crdCfgRet, errCount, errMsg, recordName = h.parseCurCrdAdmCtrlContent(&admCtrlSecRule, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
case resource.NvDlpSecurityRuleKind:
crdCfgRet, errCount, errMsg, recordName = h.parseCurCrdDlpContent(&dlpSecRule, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
case resource.NvWafSecurityRuleKind:
crdCfgRet, errCount, errMsg, recordName = h.parseCurCrdWafContent(&wafSecRule, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
}
}
if errCount > 0 {
var err1 error
log.Printf("CRD validate fail : %s\n", errMsg)
switch kind {
case resource.NvSecurityRuleKind:
err1 = global.ORCH.DeleteResource(resource.RscTypeCrdSecurityRule, &gfwrule)
case resource.NvClusterSecurityRuleKind:
r := resource.NvClusterSecurityRule(gfwrule)
err1 = global.ORCH.DeleteResource(resource.RscTypeCrdClusterSecurityRule, &r)
case resource.NvAdmCtrlSecurityRuleKind:
err1 = global.ORCH.DeleteResource(resource.RscTypeCrdAdmCtrlSecurityRule, &admCtrlSecRule)
case resource.NvDlpSecurityRuleKind:
err1 = global.ORCH.DeleteResource(resource.RscTypeCrdDlpSecurityRule, &dlpSecRule)
case resource.NvWafSecurityRuleKind:
err1 = global.ORCH.DeleteResource(resource.RscTypeCrdWafSecurityRule, &wafSecRule)
}
if err1 != nil {
log.WithFields(log.Fields{"error": err1}).Error(recordName)
}
}
return crdCfgRet, errCount, errMsg, recordName, kind
}
func (h *nvCrdHandler) crdGFwRuleHandler(req *admissionv1beta1.AdmissionRequest) {
if clusHelper == nil {
clusHelper = kv.GetClusterHelper()
}
var detail []string
switch req.Operation {
case "DELETE":
var recordName string
var ruleNs string = "default"
var kind string = req.Kind.Kind
if req.Kind.Kind == resource.NvSecurityRuleKind || req.Kind.Kind == resource.NvClusterSecurityRuleKind {
kind = resource.NvSecurityRuleKind
if req.Kind.Kind == resource.NvSecurityRuleKind {
ruleNs = req.Namespace
}
}
recordName = fmt.Sprintf("%s-%s-%s", req.Kind.Kind, ruleNs, req.Name)
crdRecord := clusHelper.GetCrdSecurityRuleRecord(kind, recordName)
if crdRecord != nil {
log.WithFields(log.Fields{"name": req.Name, "kind": req.Kind.Kind, "ns": req.Namespace}).Info("deleting CRD ...")
switch req.Kind.Kind {
case resource.NvAdmCtrlSecurityRuleKind:
h.crdDeleteAdmCtrlRules()
setAdmCtrlStateInCluster(nil, nil, nil, nil, nil, share.UserCreated)
h.crdDeleteRecord(req.Kind.Kind, recordName)
case resource.NvDlpSecurityRuleKind:
deleteDlpSensor(nil, crdRecord.DlpSensor, share.ReviewTypeCRD, true, h.acc, nil)
h.crdDeleteRecord(req.Kind.Kind, recordName)
case resource.NvWafSecurityRuleKind:
deleteWafSensor(nil, crdRecord.WafSensor, share.ReviewTypeCRD, true, h.acc, nil)
h.crdDeleteRecord(req.Kind.Kind, recordName)
case resource.NvSecurityRuleKind, resource.NvClusterSecurityRuleKind:
h.crdDeleteRules(crdRecord.Rules)
h.crdHandleGroupRecordDel(crdRecord, crdRecord.Groups, false)
h.crdDeleteRecordEx(resource.NvSecurityRuleKind, recordName, crdRecord.ProfileName)
}
e := fmt.Sprintf("CustomResourceDefinition %s", req.Kind.Kind)
msg := fmt.Sprintf("%s deleted.", recordName)
detail = append(detail, msg)
k8sResourceLog(share.CLUSEvCrdRemoved, e, detail)
log.WithFields(log.Fields{"crdName": recordName}).Info("CRD deleted")
}
case "CREATE", "UPDATE":
var kind string
var errCount int
var err, recordName string
var crdCfgRet *resource.NvSecurityParse
log.WithFields(log.Fields{"name": req.Name, "kind": req.Kind.Kind, "ns": req.Namespace}).Info("processing CRD ...")
// First parse the crd content, validate for error and generate final list if no error
crdCfgRet, errCount, err, recordName, kind = h.parseCrdContent(req.Object.Raw)
if errCount == 0 {
// process the parse result.
switch kind {
case resource.NvSecurityRuleKind, resource.NvClusterSecurityRuleKind:
h.crdGFwRuleProcessRecord(crdCfgRet, resource.NvSecurityRuleKind, recordName)
case resource.NvAdmCtrlSecurityRuleKind:
if crdCfgRet != nil { // for NvAdmissionControlSecurityRule resource objects with metadata name other than "local", ignore them
h.crdAdmCtrlRuleRecord(crdCfgRet, kind, recordName)
}
case resource.NvDlpSecurityRuleKind:
h.crdDlpSensorRecord(crdCfgRet, kind, recordName)
case resource.NvWafSecurityRuleKind:
h.crdWafSensorRecord(crdCfgRet, kind, recordName)
default:
errCount = 1
err = "unsupported Kubernetese resource kind"
}
}
if errCount > 0 {
e := fmt.Sprintf("CRD %s Removed", recordName)
detail = append(detail, err)
k8sResourceLog(share.CLUSEvCrdErrDetected, e, detail)
log.WithFields(log.Fields{"crdName": recordName}).Error("Failed to add CRD")
} else {
e := fmt.Sprintf("CRD %s Processed", recordName)
detail = append(detail, recordName)
k8sResourceLog(share.CLUSEvCrdImported, e, detail)
log.WithFields(log.Fields{"crdName": recordName}).Info("CRD processed")
}
}
return
}
func isExportSkipGroupName(name string, acc *access.AccessControl) (bool, *api.RESTGroup) {
// allow group with prefix "nv.ip."
if strings.HasPrefix(name, api.LearnedHostPrefix) {
return true, nil
} else if strings.HasPrefix(name, api.LearnedWorkloadPrefix) {
if name[len(api.LearnedWorkloadPrefix):] != api.EndpointIngress {
return true, nil
}
return false, nil
} else {
group, _ := cacher.GetGroup(name, "", false, acc)
if group == nil || group.CfgType == api.CfgTypeFederal {
return true, nil
}
return false, group
}
}
func exportAttachRule(rule *api.RESTPolicyRule, useFrom bool, acc *access.AccessControl, cnt int) *resource.NvSecurityRuleDetail {
var detail resource.NvSecurityRuleDetail
var group *api.RESTGroup
var skip bool
detail.Applications = rule.Applications
detail.Ports = rule.Ports
detail.Action = rule.Action
detail.Priority = rule.Priority
if useFrom {
if skip, group = isExportSkipGroupName(rule.From, acc); skip {
e := "Skip special group export"
log.WithFields(log.Fields{"name": rule.From}).Error(e)
return nil
}
detail.Name = fmt.Sprintf("%s-ingress-%d", rule.To, cnt)
detail.Selector.Name = rule.From
} else {
if skip, group = isExportSkipGroupName(rule.To, acc); skip {
e := "Skip special group export"
log.WithFields(log.Fields{"name": rule.To}).Error(e)
return nil
}
detail.Name = fmt.Sprintf("%s-egress-%d", rule.To, cnt)
detail.Selector.Name = rule.To
}
if group != nil {
detail.Selector.Criteria = &group.Criteria
}
return &detail
}
func handlerGroupCfgExport(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
log.WithFields(log.Fields{"URL": r.URL.String()}).Debug()
defer r.Body.Close()
var data []byte
var inCount, eCount int
var group *api.RESTGroup
var skip bool
policy_ids := utils.NewSet()
acc, _ := getAccessControl(w, r, access.AccessOPRead) // handlerGroupCfgExport() is used for both GET/POST so we force the op to be AccessOPRead for access control
if acc == nil {
return
}
body, _ := ioutil.ReadAll(r.Body)
var rconf api.RESTGroupExport
err := json.Unmarshal(body, &rconf)
if err != nil {
log.WithFields(log.Fields{"error": err}).Error("Request error")
restRespError(w, http.StatusBadRequest, api.RESTErrInvalidRequest)
return
}
apiVersion := resource.NvSecurityRuleVersion
filename := "cfgExport.yaml"
resp := resource.NvSecurityRuleList{
Kind: &resource.NvListKind,
ApiVersion: &apiVersion,
}
lock, err := clusHelper.AcquireLock(share.CLUSLockPolicyKey, clusterLockWait)
if err != nil {
e := "Failed to acquire cluster lock"
log.WithFields(log.Fields{"error": err}).Error(e)
return
}
defer clusHelper.ReleaseLock(lock)
for _, gname := range rconf.Groups {
if skip, group = isExportSkipGroupName(gname, acc); skip {
e := "Skip special group export"
log.WithFields(log.Fields{"name": gname}).Error(e)
continue
}
if group == nil || group.CfgType == api.CfgTypeFederal {
continue
}
tgroup := group2RESTConfig(group)
kindName := dns1123NameChg(gname)
targetNs := ""
targetKind := resource.NvClusterSecurityRuleKind
apiversion := fmt.Sprintf("%s/%s", common.OEMClusterSecurityRuleGroup, resource.NvClusterSecurityRuleVersion)
for _, ct := range group.Criteria {
if ct.Key == share.CriteriaKeyDomain {
targetNs = ct.Value
targetKind = resource.NvSecurityRuleKind
apiversion = fmt.Sprintf("%s/%s", common.OEMSecurityRuleGroup, resource.NvSecurityRuleVersion)
}
}
resptmp := resource.NvSecurityRule{
//Kind: &resource.NvSecurityRuleKind,
Kind: &targetKind,
Metadata: &cmetav1.ObjectMeta{
Name: &kindName,
Namespace: &targetNs,
},
ApiVersion: &apiversion,
Spec: resource.NvSecurityRuleSpec{
Target: resource.NvSecurityTarget{
Selector: *tgroup,
},
IngressRule: make([]resource.NvSecurityRuleDetail, 0),
EgressRule: make([]resource.NvSecurityRuleDetail, 0),
ProcessRule: make([]resource.NvSecurityProcessRule, 0),
FileRule: make([]resource.NvSecurityFileRule, 0),
},
}
// If Learned group add the policy mode in crd
if utils.DoesGroupHavePolicyMode(gname) {
if rconf.PolicyMode != "" {
resptmp.Spec.Target.PolicyMode = &rconf.PolicyMode
} else {
resptmp.Spec.Target.PolicyMode = &group.PolicyMode
}
} else {
resptmp.Spec.Target.PolicyMode = func() *string { b := share.PolicyModeUnavailable; return &b }()
}
// export process and file profiles
exportProcessRule(gname, &(resptmp.Spec), acc)
if gname != api.AllHostGroup { // TODO: skip file for now
exportFileRule(gname, &(resptmp.Spec), acc)
}
// export group's dlp/waf data
if group.Kind == share.GroupKindContainer {
exportDlpWafGroup(gname, &resptmp, acc)
}
for _, idx := range group.PolicyRules {
if policy_ids.Contains(idx) {
continue
}
policy_ids.Add(idx)
rule, _ := cacher.GetPolicyRule(idx, acc)
if rule != nil {
if rule.To == gname {
detail := exportAttachRule(rule, true, acc, inCount)
if detail != nil {
resptmp.Spec.IngressRule = append(resptmp.Spec.IngressRule, *detail)
inCount = inCount + 1
}
} else {
detail := exportAttachRule(rule, false, acc, eCount)
if detail != nil {
resptmp.Spec.EgressRule = append(resptmp.Spec.EgressRule, *detail)
eCount = eCount + 1
}
}
}
}
resp.Items = append(resp.Items, &resptmp)
}
// for all the group in the From/To , if learned group we also need export it's policymode
// We don't know the default policy mode in other system so in current system just export
// tell the browser the returned content should be downloaded
w.Header().Set("Content-Disposition", "Attachment; filename="+filename)
w.Header().Set("Content-Encoding", "gzip")
w.WriteHeader(http.StatusOK)
json_data, _ := json.MarshalIndent(resp, "", " ")
data, _ = yaml.JSONToYAML(json_data)
data = utils.GzipBytes(data)
w.Write(data)
}
func (h *nvCrdHandler) crdDeleteRecord(kind, recordName string) {
if recordName != "" {
clusHelper.DeleteCrdSecurityRuleRecord(kind, recordName)
}
}
func (h *nvCrdHandler) crdDeleteRecordEx(kind, recordName, profileName string) {
h.crdDeleteRecord(kind, recordName)
if profileName == "" {
return
}
mode := h.crdRebuildGroupProfiles(profileName, nil, share.ReviewTypeCRD)
if utils.DoesGroupHavePolicyMode(profileName) && mode != "" {
policy_mode := &api.RESTServiceConfig{Name: profileName, PolicyMode: &mode}
h.crdHandlePolicyMode(policy_mode, mode)
}
}
//// utility functions for process and file profiles
func (h *nvCrdHandler) crdGetProcessRules(profile *api.RESTProcessProfile) []share.CLUSCrdProcessRule {
rules := make([]share.CLUSCrdProcessRule, 0)
for _, proc := range profile.ProcessList {
r := &share.CLUSCrdProcessRule{
Name: proc.Name,
Path: proc.Path,
Action: proc.Action,
AllowFileUpdate: proc.AllowFileUpdate,
}
rules = append(rules, *r)
}
return rules
}
func (h *nvCrdHandler) crdGetFileRules(profile *api.RESTFileMonitorProfile) []share.CLUSCrdFileRule {
rules := make([]share.CLUSCrdFileRule, 0)
for _, ffp := range profile.Filters {
fr := &share.CLUSCrdFileRule{
Filter: ffp.Filter,
Recursive: ffp.Recursive,
Behavior: ffp.Behavior,
App: ffp.Apps,
}
rules = append(rules, *fr)
}
return rules
}
func exportProcessRule(group string, secRule *resource.NvSecurityRuleSpec, acc *access.AccessControl) bool {
log.WithFields(log.Fields{"name": group}).Debug()
if profile, err := cacher.GetProcessProfile(group, acc); err == nil {
if utils.DoesGroupHavePolicyMode(group) {
baseline := share.ProfileZeroDrift
if profile.Baseline == share.ProfileBasic {
baseline = share.ProfileBasic
}
secRule.ProcessProfile = &resource.NvSecurityProcessProfile{Baseline: &baseline}
secRule.ProcessProfile.Baseline = &baseline
}
dupChecker := utils.NewSet()
for _, gproc := range profile.ProcessList {
key := fmt.Sprintf("%s::%s::%s", gproc.Name, gproc.Path, gproc.Action)
if !dupChecker.Contains(key) {
dupChecker.Add(key)
//
r := &resource.NvSecurityProcessRule{
Name: gproc.Name,
Path: gproc.Path,
Action: gproc.Action,
AllowFileUpdate: gproc.AllowFileUpdate,
}
secRule.ProcessRule = append(secRule.ProcessRule, *r)
}
}
return true
}
log.WithFields(log.Fields{"name": group}).Debug("failed")
return false
}
func exportFileRule(group string, rules *resource.NvSecurityRuleSpec, acc *access.AccessControl) bool {
log.WithFields(log.Fields{"name": group}).Debug()
// not include predefined list
if profile, err := cacher.GetFileMonitorProfile(group, acc, false); err == nil {
dupChecker := utils.NewSet()
for _, ff := range profile.Filters {
key := fmt.Sprintf("%s::%v::%s", ff.Filter, ff.Recursive, ff.Behavior)
if !dupChecker.Contains(key) {
dupChecker.Add(key)
//
r := &resource.NvSecurityFileRule{
Filter: ff.Filter,
Recursive: ff.Recursive,
Behavior: ff.Behavior,
App: ff.Apps,
}
rules.FileRule = append(rules.FileRule, *r)
}
}
return true
} else {
log.WithFields(log.Fields{"name": group, "err": err}).Error()
}
return false
}
func exportDlpWafGroup(group string, secRule *resource.NvSecurityRule, acc *access.AccessControl) {
log.WithFields(log.Fields{"name": group}).Debug()
if dlpGroup, err := cacher.GetDlpGroup(group, acc); err == nil {
settings := make([]api.RESTCrdDlpGroupSetting, len(dlpGroup.Sensors))
for idx, s := range dlpGroup.Sensors {
settings[idx] = api.RESTCrdDlpGroupSetting{
Name: s.Name,
Action: s.Action,
}
}
secRule.Spec.DlpGroup = &resource.NvSecurityDlpGroup{
Status: dlpGroup.Status,
Settings: settings,
}
} else {
log.WithFields(log.Fields{"name": group, "err": err}).Error("dlp")
}
if wafGroup, err := cacher.GetWafGroup(group, acc); err == nil {
settings := make([]api.RESTCrdWafGroupSetting, len(wafGroup.Sensors))
for idx, s := range wafGroup.Sensors {
settings[idx] = api.RESTCrdWafGroupSetting{
Name: s.Name,
Action: s.Action,
}
}
secRule.Spec.WafGroup = &resource.NvSecurityWafGroup{
Status: wafGroup.Status,
Settings: settings,
}
} else {
log.WithFields(log.Fields{"name": group, "err": err}).Error("waf")
}
}
func (h *nvCrdHandler) crdReadyToDeleteProfiles(targetCrdName string, group *api.RESTGroup) bool {
crdName := targetCrdName
profileName := group.Name
// log.WithFields(log.Fields{"crdName": crdName, "profileName": profileName}).Debug("CRD:")
// valid profile name?
if profileName == "" {
// log.WithFields(log.Fields{"crdName": crdName}).Debug("CRD: no profile")
return true
}
// can not removed default group: "nodes" and "containers"
if profileName == api.AllHostGroup || profileName == api.AllContainerGroup || profileName == api.LearnedExternal {
return false
}
// any local process rules?
if pp := clusHelper.GetProcessProfile(profileName); pp != nil {
for _, proc := range pp.Process {
if proc.CfgType != share.GroundCfg {
log.WithFields(log.Fields{"crdName": crdName, "name": profileName, "proc": proc}).Debug("CRD: local")
return false
}
}
}
// any local file rules?
if mon, _ := clusHelper.GetFileMonitorProfile(profileName); mon != nil {
for _, flt := range mon.Filters { // non-CRD
if f, ok := cacher.IsPrdefineFileGroup(flt.Filter, flt.Recursive); ok {
if f.CustomerAdd {
log.WithFields(log.Fields{"crdName": crdName, "name": profileName, "flt": f}).Debug("CRD: local")
return false
}
}
}
}
// other CRD rules, which matched the profile
records := clusHelper.GetCrdSecurityRuleRecordList(resource.NvSecurityRuleKind)
for _, record := range records {
if (record.Name == crdName) || (record.ProfileName != profileName) {
continue
}
if len(record.ProcessRules) > 0 {
log.WithFields(log.Fields{"crdName": crdName, "name": profileName, "prule_cnt": len(record.ProcessRules)}).Debug("CRD: other")
return false
}
if len(record.FileRules) > 0 {
log.WithFields(log.Fields{"crdName": crdName, "name": profileName, "frule_cnt": len(record.FileRules)}).Debug("CRD: other")
return false
}
}
return true
}
// Get highest CRD security level in related crd records
func (h *nvCrdHandler) crdGetProfileSecurityLevel(profileName string, records map[string]*share.CLUSCrdSecurityRule) string {
mode := ""
if utils.DoesGroupHavePolicyMode(profileName) {
for _, record := range records {
if record.ProfileName != profileName {
continue
}
switch record.ProfileMode {
case share.PolicyModeEnforce:
log.WithFields(log.Fields{"name": record.Name}).Debug("CRD: decision")
mode = record.ProfileMode
case share.PolicyModeEvaluate:
log.WithFields(log.Fields{"name": record.Name}).Debug("CRD: decision ...")
mode = record.ProfileMode
case share.PolicyModeLearn:
if mode == "" {
mode = record.ProfileMode
}
}
// highest level
if mode == share.PolicyModeEnforce {
break
}
}
// no more related crd record, restore as system default
if mode == "" {
mode = cacher.GetNewServicePolicyMode()
}
}
return mode
}
// rebuild group process and file profiles from CRD records
func (h *nvCrdHandler) crdRebuildGroupProfiles(groupName string, records map[string]*share.CLUSCrdSecurityRule, reviewType share.TReviewType) string {
if grp, _, err := clusHelper.GetGroup(groupName, h.acc); grp == nil || err != nil {
log.WithFields(log.Fields{"groupName": groupName}).Debug("not existed")
return ""
}
if records == nil {
records = clusHelper.GetCrdSecurityRuleRecordList(resource.NvSecurityRuleKind)
}
baseline := share.ProfileZeroDrift
mode := h.crdGetProfileSecurityLevel(groupName, records)
procs := make(map[string]*share.CLUSCrdProcessRule, 0)
files := make(map[string]*share.CLUSCrdFileRule, 0)
for _, record := range records {
if record.ProfileName != groupName {
continue
}
if record.ProcessProfile.Baseline == share.ProfileBasic {
baseline = share.ProfileBasic
}
// collecting process rules
for i, pr := range record.ProcessRules {
key := fmt.Sprintf("%s::%s::%s", pr.Name, pr.Path, pr.Action)
procs[key] = &(record.ProcessRules[i])
}
// collecting file rules
for i, fr := range record.FileRules {
key := fmt.Sprintf("%s::%v::%s", fr.Filter, fr.Recursive, fr.Behavior)
if ffr, ok := files[key]; ok {
apps := utils.NewSet()
for _, app := range ffr.App { // from map
apps.Add(app)
}
for _, app := range fr.App { // from new crd
apps.Add(app)
}
ffr.App = apps.ToStringSlice()
} else { // new entry
files[key] = &(record.FileRules[i])
}
}
}
////
pprofile := &api.RESTProcessProfile{
Group: groupName,
Baseline: baseline,
Mode: mode,
ProcessList: make([]*api.RESTProcessProfileEntry, 0),
}
/// from map to slices
for _, pp := range procs {
ppa := &api.RESTProcessProfileEntry{
Name: pp.Name,
Path: pp.Path,
Action: pp.Action,
AllowFileUpdate: pp.AllowFileUpdate,
}
pprofile.ProcessList = append(pprofile.ProcessList, ppa)
}
////
fprofile := &api.RESTFileMonitorProfile{
Group: groupName,
Filters: make([]*api.RESTFileMonitorFilter, 0),
}
/// from map to slices
for _, ff := range files {
ffa := &api.RESTFileMonitorFilter{
Filter: ff.Filter,
Recursive: ff.Recursive,
Behavior: ff.Behavior,
Apps: ff.App,
}
fprofile.Filters = append(fprofile.Filters, ffa)
}
// update process rules
h.crdHandleProcessProfile(groupName, mode, pprofile, reviewType)
// update file rules
h.crdHandleFileProfile(groupName, mode, fprofile, reviewType)
return mode
}
// kvOnly: true means the checking is triggered by kv change(ex: import). false means the check is triggered by k8s(ex: startup)
func CrossCheckCrd(kind, rscType, kvCrdKind, lockKey string, kvOnly bool) error {
if clusHelper == nil {
clusHelper = kv.GetClusterHelper()
}
var err error
var objs []interface{}
var imported, deleted []string
gwrecordlist := clusHelper.GetCrdSecurityRuleRecordList(kvCrdKind)
objs, err = global.ORCH.ListResource(rscType)
if err != nil {
log.WithFields(log.Fields{"rscType": rscType, "err": err}).Error()
return err
}
var crdHandler nvCrdHandler
crdHandler.Init(lockKey)
if !crdHandler.AcquireLock(clusterLockWait) {
return nil
}
acc := access.NewAdminAccessControl()
switch kind {
case resource.NvSecurityRuleKind, resource.NvClusterSecurityRuleKind:
if len(gwrecordlist) == 0 && kvOnly {
// crd records in policy cofiguration export may be missing(4.2.2-) or different from what are configured in k8s.
// So we first revert crd groups & remove crd policies in kv and then parse the crd rules in k8s(based on objs) again
// In this way we are sure the final crd groups/policies are exactly what's configured in k8s
delRules := make(map[string]uint32, 4)
// for crd network policy
for _, crh := range clusHelper.GetPolicyRuleList() {
if isSecurityPolicyID(crh.ID) {
delRules[fmt.Sprintf("%d", crh.ID)] = crh.ID
}
}
crdHandler.crdDeleteRules(delRules)
// for crd groups
groupToUpdate := make([]string, 0, 4)
for _, cg := range clusHelper.GetAllGroups(share.ScopeLocal, acc) {
if cg.CfgType == share.GroundCfg {
groupToUpdate = append(groupToUpdate, cg.Name)
}
}
crdHandler.crdUpdateGroup(groupToUpdate)
for _, gName := range groupToUpdate {
crdHandler.crdDeleteRecordEx(kvCrdKind, "", gName)
}
}
case resource.NvAdmCtrlSecurityRuleKind:
// crd records in admission control rules export may be different from what are configured in k8s.
// So we first remove crd admission control rules in kv and then parse the crd rules in k8s(based on objs) again
// In this way we are sure the final crd admission control rules are exactly what's configured in k8s
crdHandler.crdDeleteAdmCtrlRules()
setAdmCtrlStateInCluster(nil, nil, nil, nil, nil, share.UserCreated)
case resource.NvDlpSecurityRuleKind:
crdHandler.crdUpdateDlpSensors()
case resource.NvWafSecurityRuleKind:
crdHandler.crdUpdateWafSensors()
}
crdHandler.ReleaseLock()
for _, obj := range objs {
var crdCfgRet *resource.NvSecurityParse
var err, recordname string
var errCount int
if !crdHandler.AcquireLock(clusterLockWait) {
continue
}
if kind == resource.NvSecurityRuleKind {
r := obj.(*resource.NvSecurityRule)
crdCfgRet, errCount, err, recordname = crdHandler.parseCurCrdContent(r, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
if errCount > 0 {
log.WithFields(log.Fields{"error": err}).Error()
e := fmt.Sprintf(" %s in namespace %s deleted due to error: %s \n", *r.Metadata.Name,
*r.Metadata.Namespace, err)
deleted = append(deleted, e)
global.ORCH.DeleteResource(resource.RscTypeCrdSecurityRule, r)
} else {
e := fmt.Sprintf(" %s in namespace %s detected and imported\n ", *r.Metadata.Name, *r.Metadata.Namespace)
imported = append(imported, e)
delete(gwrecordlist, recordname)
crdHandler.crdGFwRuleProcessRecord(crdCfgRet, resource.NvSecurityRuleKind, recordname)
}
} else {
var metadataName string
switch kind {
case resource.NvClusterSecurityRuleKind:
r1 := obj.(*resource.NvClusterSecurityRule)
metadataName = *r1.Metadata.Name
r := resource.NvSecurityRule(*r1)
crdCfgRet, errCount, err, recordname = crdHandler.parseCurCrdContent(&r, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
case resource.NvAdmCtrlSecurityRuleKind:
r := obj.(*resource.NvAdmCtrlSecurityRule)
metadataName = *r.Metadata.Name
crdCfgRet, errCount, err, recordname = crdHandler.parseCurCrdAdmCtrlContent(r, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
case resource.NvDlpSecurityRuleKind:
r := obj.(*resource.NvDlpSecurityRule)
metadataName = *r.Metadata.Name
crdCfgRet, errCount, err, recordname = crdHandler.parseCurCrdDlpContent(r, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
case resource.NvWafSecurityRuleKind:
r := obj.(*resource.NvWafSecurityRule)
metadataName = *r.Metadata.Name
crdCfgRet, errCount, err, recordname = crdHandler.parseCurCrdWafContent(r, share.ReviewTypeCRD, share.ReviewTypeDisplayCRD)
}
if errCount > 0 {
if kind == resource.NvSecurityRuleKind || kind == resource.NvClusterSecurityRuleKind || metadataName == "" {
// for admission control crd resource, if metaname is empty, delete the crd resource
log.WithFields(log.Fields{"error": err, "name": metadataName}).Error()
e := fmt.Sprintf(" %s deleted due to error: %s \n", metadataName, err)
deleted = append(deleted, e)
global.ORCH.DeleteResource(rscType, obj)
}
} else {
switch kind {
case resource.NvClusterSecurityRuleKind:
crdHandler.crdGFwRuleProcessRecord(crdCfgRet, resource.NvSecurityRuleKind, recordname)
case resource.NvAdmCtrlSecurityRuleKind:
if crdCfgRet != nil { // for NvAdmissionControlSecurityRule resource objects with metadata name other than "local", ignore them
crdHandler.crdAdmCtrlRuleRecord(crdCfgRet, kind, recordname)
}
case resource.NvDlpSecurityRuleKind:
crdHandler.crdDlpSensorRecord(crdCfgRet, kind, recordname)
case resource.NvWafSecurityRuleKind:
crdHandler.crdWafSensorRecord(crdCfgRet, kind, recordname)
}
e := fmt.Sprintf(" %s detected and imported ", metadataName)
imported = append(imported, e)
delete(gwrecordlist, recordname)
}
}
crdHandler.ReleaseLock()
time.Sleep(1 * time.Second)
}
if len(imported) > 0 {
e := fmt.Sprintf("CustomResourceDefinition %s detected and imported", kind)
k8sResourceLog(share.CLUSEvCrdImported, e, imported)
}
if len(deleted) > 0 {
e := fmt.Sprintf("CustomResourceDefinition %s detected Error and deleted", kind)
k8sResourceLog(share.CLUSEvCrdErrDetected, e, deleted)
}
if len(gwrecordlist) > 0 {
if crdHandler.AcquireLock(clusterLockWait) {
removed := crdHandler.crdDelAll(kind, kvCrdKind, gwrecordlist)
crdHandler.ReleaseLock()
if len(removed) > 0 {
e := fmt.Sprintf("CustomResourceDefinition %s cross check", kind)
k8sResourceLog(share.CLUSEvCrdRemoved, e, removed)
}
}
}
return nil
}
|
{
criteria := make([]api.RESTCriteriaEntry, len(group.Criteria))
for i, crt := range group.Criteria {
criteria[i] = api.RESTCriteriaEntry{
Key: crt.Key, Value: crt.Value, Op: crt.Op,
}
}
r := api.RESTCrdGroupConfig{
Name: group.Name,
Comment: group.Comment,
Criteria: &criteria,
}
return &r
}
|
verify_cast5.go
|
package main
import (
"bufio"
"bytes"
"code.google.com/p/go.crypto/cast5"
"crypto/cipher"
"encoding/hex"
"fmt"
"os"
"strings"
)
func unhexlify(s string) []byte {
bytes, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return bytes
}
type VectorArgs struct {
count string
key string
iv string
plaintext string
ciphertext string
}
type VectorVerifier interface {
validate(count string, key, iv, plaintext, expected_ciphertext []byte)
}
type ofbVerifier struct{}
func (o ofbVerifier) validate(count string, key, iv, plaintext, expected_ciphertext []byte) {
block, err := cast5.NewCipher(key)
if err != nil {
panic(err)
}
ciphertext := make([]byte, len(plaintext))
stream := cipher.NewOFB(block, iv)
stream.XORKeyStream(ciphertext, plaintext)
if !bytes.Equal(ciphertext, expected_ciphertext) {
panic(fmt.Errorf("vector mismatch @ COUNT = %s:\n %s != %s\n",
count,
hex.EncodeToString(expected_ciphertext),
hex.EncodeToString(ciphertext)))
}
}
type cbcVerifier struct{}
func (o cbcVerifier) validate(count string, key, iv, plaintext, expected_ciphertext []byte) {
block, err := cast5.NewCipher(key)
if err != nil {
panic(err)
}
ciphertext := make([]byte, len(plaintext))
mode := cipher.NewCBCEncrypter(block, iv)
mode.CryptBlocks(ciphertext, plaintext)
if !bytes.Equal(ciphertext, expected_ciphertext) {
panic(fmt.Errorf("vector mismatch @ COUNT = %s:\n %s != %s\n",
count,
hex.EncodeToString(expected_ciphertext),
hex.EncodeToString(ciphertext)))
}
}
type cfbVerifier struct{}
func (o cfbVerifier) validate(count string, key, iv, plaintext, expected_ciphertext []byte) {
block, err := cast5.NewCipher(key)
if err != nil {
panic(err)
}
ciphertext := make([]byte, len(plaintext))
stream := cipher.NewCFBEncrypter(block, iv)
stream.XORKeyStream(ciphertext, plaintext)
if !bytes.Equal(ciphertext, expected_ciphertext) {
panic(fmt.Errorf("vector mismatch @ COUNT = %s:\n %s != %s\n",
count,
hex.EncodeToString(expected_ciphertext),
hex.EncodeToString(ciphertext)))
}
}
type ctrVerifier struct{}
func (o ctrVerifier) validate(count string, key, iv, plaintext, expected_ciphertext []byte) {
block, err := cast5.NewCipher(key)
if err != nil {
panic(err)
}
ciphertext := make([]byte, len(plaintext))
stream := cipher.NewCTR(block, iv)
stream.XORKeyStream(ciphertext, plaintext)
if !bytes.Equal(ciphertext, expected_ciphertext) {
panic(fmt.Errorf("vector mismatch @ COUNT = %s:\n %s != %s\n",
count,
hex.EncodeToString(expected_ciphertext),
hex.EncodeToString(ciphertext)))
}
}
func validateVectors(verifier VectorVerifier, filename string) {
vectors, err := os.Open(filename)
if err != nil {
panic(err)
}
defer vectors.Close()
var segments []string
var vector *VectorArgs
scanner := bufio.NewScanner(vectors)
for scanner.Scan() {
segments = strings.Split(scanner.Text(), " = ")
switch {
case strings.ToUpper(segments[0]) == "COUNT":
if vector != nil {
verifier.validate(vector.count,
unhexlify(vector.key),
unhexlify(vector.iv),
unhexlify(vector.plaintext),
unhexlify(vector.ciphertext))
}
vector = &VectorArgs{count: segments[1]}
case strings.ToUpper(segments[0]) == "IV":
vector.iv = segments[1][:16]
case strings.ToUpper(segments[0]) == "KEY":
vector.key = segments[1]
case strings.ToUpper(segments[0]) == "PLAINTEXT":
|
case strings.ToUpper(segments[0]) == "CIPHERTEXT":
vector.ciphertext = segments[1]
}
}
}
func main() {
validateVectors(ofbVerifier{},
"tests/hazmat/primitives/vectors/ciphers/CAST5/cast5-ofb.txt")
fmt.Println("OFB OK.")
validateVectors(cfbVerifier{},
"tests/hazmat/primitives/vectors/ciphers/CAST5/cast5-cfb.txt")
fmt.Println("CFB OK.")
validateVectors(cbcVerifier{},
"tests/hazmat/primitives/vectors/ciphers/CAST5/cast5-cbc.txt")
fmt.Println("CBC OK.")
validateVectors(ctrVerifier{},
"tests/hazmat/primitives/vectors/ciphers/CAST5/cast5-ctr.txt")
fmt.Println("CTR OK.")
}
|
vector.plaintext = segments[1]
|
group.py
|
from fineract.objects.fineract_object import DataFineractObject
from fineract.objects.types import Type
class Group(DataFineractObject):
"""
This class represents a Group.
"""
def __repr__(self):
|
def _init_attributes(self):
self.id = None
self.account_no = None
self.external_id = None
self.name = None
self.status = None
self.active = None
self.activation_date = None
self.office_id = None
self.office_name = None
self.hierarchy = None
def _use_attributes(self, attributes):
self.id = attributes.get('id', None)
self.account_no = attributes.get('accountNo', None)
self.external_id = attributes.get('externalId', None)
self.name = attributes.get('name', None)
self.status = self._make_fineract_object(GroupStatus, attributes.get('status', None))
self.active = attributes.get('active', None)
self.activation_date = self._make_date_object(attributes.get('activationDate', None))
self.office_id = attributes.get('officeId', None)
self.office_name = attributes.get('officeName', None)
self.hierarchy = attributes.get('hierarchy', None)
def add_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=associateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
def remove_members(self, members_list):
params = {
'clientMembers': members_list
}
data = self.request_handler.make_request(
'POST',
'/groups/{}?command=disassociateClients'.format(self.id),
json=params
)
return data['groupId'] == self.id
@classmethod
def create(cls, request_handler, name, office_id, active=True, activation_date=None):
"""Create a group
:param request_handler:
:param name:
:param office_id:
:param active:
:param activation_date:
:rtype: :class:`fineract.objects.group.Group`
"""
data = {
'name': name,
'officeId': office_id,
'active': active,
'activationDate': activation_date or cls._get_current_date()
}
res = request_handler.make_request(
'POST',
'/groups',
json=data
)
group_id = res['groupId']
return cls(request_handler,
request_handler.make_request(
'GET',
'/groups/{}'.format(group_id)
), False)
@classmethod
def get_group_by_name(cls, request_handler, name):
"""Get a group by name
:param request_handler:
:param name:
:rtype: :class:`fineract.objects.group.Group`
"""
data = request_handler.make_request(
'GET',
'/groups'
)
if data:
for item in data:
if item['name'] == name:
print(item)
return cls(request_handler, item, False)
return None
class GroupStatus(Type):
"""
This class represents a Group status.
"""
pass
|
return self.get__repr__({'group_id': self.id})
|
user.go
|
//go:build linux
// +build linux
package systemd
import (
"bufio"
"bytes"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
dbus "github.com/godbus/dbus/v5"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/pkg/errors"
)
// newUserSystemdDbus creates a connection for systemd user-instance.
func newUserSystemdDbus() (*systemdDbus.Conn, error) {
addr, err := DetectUserDbusSessionBusAddress()
if err != nil {
return nil, err
}
uid, err := DetectUID()
if err != nil {
return nil, err
}
return systemdDbus.NewConnection(func() (*dbus.Conn, error) {
conn, err := dbus.Dial(addr)
if err != nil {
return nil, errors.Wrapf(err, "error while dialing %q", addr)
}
methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(uid))}
err = conn.Auth(methods)
if err != nil {
conn.Close()
return nil, errors.Wrapf(err, "error while authenticating connection, address=%q, UID=%d", addr, uid)
}
if err = conn.Hello(); err != nil {
conn.Close()
return nil, errors.Wrapf(err, "error while sending Hello message, address=%q, UID=%d", addr, uid)
}
return conn, nil
})
}
// DetectUID detects UID from the OwnerUID field of `busctl --user status`
// if running in userNS. The value corresponds to sd_bus_creds_get_owner_uid(3) .
//
// Otherwise returns os.Getuid() .
func DetectUID() (int, error)
|
// DetectUserDbusSessionBusAddress returns $DBUS_SESSION_BUS_ADDRESS if set.
// Otherwise returns "unix:path=$XDG_RUNTIME_DIR/bus" if $XDG_RUNTIME_DIR/bus exists.
// Otherwise parses the value from `systemctl --user show-environment` .
func DetectUserDbusSessionBusAddress() (string, error) {
if env := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); env != "" {
return env, nil
}
if xdr := os.Getenv("XDG_RUNTIME_DIR"); xdr != "" {
busPath := filepath.Join(xdr, "bus")
if _, err := os.Stat(busPath); err == nil {
busAddress := "unix:path=" + busPath
return busAddress, nil
}
}
b, err := exec.Command("systemctl", "--user", "--no-pager", "show-environment").CombinedOutput()
if err != nil {
return "", errors.Wrapf(err, "could not execute `systemctl --user --no-pager show-environment`, output=%q", string(b))
}
scanner := bufio.NewScanner(bytes.NewReader(b))
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(s, "DBUS_SESSION_BUS_ADDRESS=") {
return strings.TrimPrefix(s, "DBUS_SESSION_BUS_ADDRESS="), nil
}
}
return "", errors.New("could not detect DBUS_SESSION_BUS_ADDRESS from `systemctl --user --no-pager show-environment`. Make sure you have installed the dbus-user-session or dbus-daemon package and then run: `systemctl --user start dbus`")
}
|
{
if !userns.RunningInUserNS() {
return os.Getuid(), nil
}
b, err := exec.Command("busctl", "--user", "--no-pager", "status").CombinedOutput()
if err != nil {
return -1, errors.Wrapf(err, "could not execute `busctl --user --no-pager status`: %q", string(b))
}
scanner := bufio.NewScanner(bytes.NewReader(b))
for scanner.Scan() {
s := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(s, "OwnerUID=") {
uidStr := strings.TrimPrefix(s, "OwnerUID=")
i, err := strconv.Atoi(uidStr)
if err != nil {
return -1, errors.Wrapf(err, "could not detect the OwnerUID: %s", s)
}
return i, nil
}
}
if err := scanner.Err(); err != nil {
return -1, err
}
return -1, errors.New("could not detect the OwnerUID")
}
|
datum_invariants_test.go
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tree
import (
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeofday"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAllTypesCastableToString(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
for _, typ := range types.Scalar {
if ok, _, _ := isCastDeepValid(typ, types.String); !ok {
t.Errorf("%s is not castable to STRING, all types should be", typ)
}
}
}
func TestAllTypesCastableFromString(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
for _, typ := range types.Scalar {
if ok, _, _ := isCastDeepValid(types.String, typ); !ok {
t.Errorf("%s is not castable from STRING, all types should be", typ)
}
}
}
func
|
(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
pacificTimeZone := int32(7 * 60 * 60)
sydneyTimeZone := int32(-10 * 60 * 60)
sydneyFixedZone := time.FixedZone("otan@sydney", -int(sydneyTimeZone))
// kiwiFixedZone is 2 hours ahead of Sydney.
kiwiFixedZone := time.FixedZone("otan@auckland", -int(sydneyTimeZone)+2*60*60)
ddate, err := NewDDateFromTime(time.Date(2019, time.November, 22, 0, 0, 0, 0, time.UTC))
require.NoError(t, err)
testCases := []struct {
desc string
left Datum
right Datum
location *time.Location
expected int
}{
{
desc: "same DTime are equal",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: MakeDTime(timeofday.New(12, 0, 0, 0)),
expected: 0,
},
{
desc: "same DTimeTZ are equal",
left: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
right: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
expected: 0,
},
{
desc: "DTime and DTimeTZ both UTC, and so are equal",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(12, 0, 0, 0), 0),
expected: 0,
},
{
desc: "DTime and DTimeTZ both Sydney time, and so are equal",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(12, 0, 0, 0), sydneyTimeZone),
location: sydneyFixedZone,
expected: 0,
},
{
desc: "DTimestamp and DTimestampTZ (Sydney) equal in Sydney zone",
left: MustMakeDTimestamp(time.Date(2019, time.November, 22, 10, 0, 0, 0, time.UTC), time.Microsecond),
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 22, 10, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: sydneyFixedZone,
expected: 0,
},
{
desc: "DTimestamp and DTimestampTZ (Sydney) equal in Sydney+2 zone",
left: MustMakeDTimestamp(time.Date(2019, time.November, 22, 12, 0, 0, 0, time.UTC), time.Microsecond),
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 22, 10, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: kiwiFixedZone,
expected: 0,
},
{
desc: "Date and DTimestampTZ (Sydney) equal in Sydney zone",
left: ddate,
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 22, 0, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: sydneyFixedZone,
expected: 0,
},
{
desc: "Date and DTimestampTZ (Sydney) equal in Sydney+2 zone",
left: ddate,
right: MustMakeDTimestampTZ(time.Date(2019, time.November, 21, 22, 0, 0, 0, sydneyFixedZone), time.Microsecond),
location: kiwiFixedZone,
expected: 0,
},
{
desc: "equal wall clock time for DTime and DTimeTZ, with TimeTZ ahead",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
expected: 1,
},
{
desc: "equal wall clock time for DTime and DTimeTZ, with TimeTZ behind",
left: MakeDTime(timeofday.New(12, 0, 0, 0)),
right: NewDTimeTZFromOffset(timeofday.New(5, 0, 0, 0), pacificTimeZone),
expected: -1,
},
{
desc: "equal wall clock time for DTime and DTimeTZ, with TimeTZ ahead",
left: NewDTimeTZFromOffset(timeofday.New(22, 0, 0, 0), sydneyTimeZone),
right: NewDTimeTZFromOffset(timeofday.New(5, 0, 0, 0), pacificTimeZone),
expected: -1,
},
{
desc: "wall clock time different for DTimeTZ and DTimeTZ",
left: NewDTimeTZFromOffset(timeofday.New(23, 0, 0, 0), sydneyTimeZone),
right: NewDTimeTZFromOffset(timeofday.New(5, 0, 0, 0), pacificTimeZone),
expected: 1,
},
}
for _, tc := range testCases {
t.Run(
tc.desc,
func(t *testing.T) {
ctx := &EvalContext{
SessionData: &sessiondata.SessionData{
DataConversion: sessiondata.DataConversionConfig{
Location: tc.location,
},
},
}
assert.Equal(t, tc.expected, compareTimestamps(ctx, tc.left, tc.right))
assert.Equal(t, -tc.expected, compareTimestamps(ctx, tc.right, tc.left))
},
)
}
}
|
TestCompareTimestamps
|
generated.rs
|
// GENERATED CODE DO NOT MODIFY!
// Any manual changes to this file will be overwritten!
use crate::{geometry::Size, mono_font::MonoFont};
/// 6x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAACcCAAAAAClo/GGAAAIX0lEQVR4nO2Wi5Ibtw5E4///aOd0N0ByXtKMdjepcrlFHDRAkKPHOvf++ueH9ckDfnNIcUtjzid+/3KKbAGrFHfktX7/GgMezQOK1Bi96FDQcprUnvK1XjzA2HpyAynzJqDtqdgYW/gSpzjXl2pjeKktWX0SmfpMbIwtfKtO8RQXjVJ7Z4HBjKLVU+FYkYqoh/wcVrlW285osavcVliuorrPqcBT1ZIorM5osavcVuyVSzEQqxeGOooNV833gMcqdEcp5Sp1CJbOKkVx4aqlw7vLQeXn0knFVpvOLIb5Kf0RD8j3ZebHwZCohicZldgMd+KAVdkzHPAfCH8noKIIWEhUiMpNGqtmw07gP3ZJeiknCjzTpogwLCJMbBnJJ3ajyQpAAuMxCMMiQt4Du01C1kUqpR51ICWmRG3lehpktQR76ESEilWugW5RdjSVcSzJHm6jObQtXQmbw4HEpVDv0Z5MsJwdzSnVfUo+1DFlUiJUCCwi1HImwgTkGpRCmk47qvLkUAUtmjC+SMD4hbVxnllfka4IrvRq71v0Jzxg+aGw8iS3i9sf33MkeRyJio6orAi92dssAFkAZgE0mU4HIrEIUVmhP1toXynMAhKp/Z4diDQW4uosanXyAGpIkQUkUvs9OxApUwpxfakm5aWCR/nZTKbkvbgTui9PUZUS7y8GcWJ5qSb5xSZz9PBsIFL7Qe6Ccg5EypQidD0mSXiMiiwgkdpPptOBSGMh0TNcCvFOMg4vALMAmkxniSxCVFasZJ9+FbYYERSziyjo4Al7KDho0Cpvsss29mf19wGSv+vS6i3/OLRI/lWgUjy7Kq/JiEYpAsnGsHr0wDWuyHmR5RzJZMCyS71SUk6sZIGicqLpzHJInsv0Skk5sZIFisqJ5swlf486pZ14ff2QegRbAYnM5kpjn1o+kq6OK0vKS3hDqFQTQbFzOJRpd32SJSktUVt6OdVEUOwcDmWabl5x4lnkJTcDVSpjTPkA8vcuT9jbgdVrmDWDJtuylo0xtVS7nfviOePo4rEUXf2Y/osHfPzV3JIuH08YpjRr/0+XNXut6syRjTbNGh0aNYZl7TKJZXXeatPcj4x6mI0tdafzVt3kb4r3gqFBCklQnoDy/GOAeJDEJgsPqEljBpJbtKlOya6Oxbej6UTPXpTn5ZnUwPIdaYnKCrU9TRLomAlE0nUSlsgrTgRSikllhYichHQ6ECk9WyKvOBFIFLwRiDtSC+njdMeBSFm2RF5xIrB8vW+QL5KgPGExgyXcV/ZSdIcI08HeFUccU1Ss13q3v8rvaCM+N3yptwNf1fc8YHySYYb29UfKD7EQsKxKJ+qJG8ropP6OlKVKJ+qJG8roJMGKlr99PFU8E2p4ju4VmWEIR95wqkdDpqG9ljOxkmXYGzolYaUuS5pmaxJlxIWQvqiX3DbOOFVHJi0ZhUF4SyCRHXobJClp5RRfC6CvlE2R8BLYjgGVGNC0jJS8cmoMaas8D5PhDkNdBSCxHCKDHu0aFKd0pjTde2lWUeJtQDTM0IcP4KbDVee6N/UFPXqAvwC+evHl0WW3M+dAc7RVdwheRsrlgkXsEGyyjV1Ejwb7LEu5I4g4zguDAzvpmu5XoonTITJL3uGGHVo+GTdQcBNkN/sttlgYVIkmrgYNprS5tgDL0vlsETGL2GVhUCWauEzm5hT4VJLq8pxnkdj1oG06gXpSJQZwGiWzkK0SaI5xbaYXT0Ijc3+PVqIpp0dDAsoDpWptMxfj2MVQokrOCqlSN0yQMx1FwIq4uy2Fpxctu50ZAhbjs00Fmkv/rp6feKg/7QH1VZszn+m4l85KCzd/On5ebKgMIv4qxpBVZ+g7o3SKyw3+y2M5isYq3bNruhQUVsxKy7ezHMFIQ68foFcVzUoiSx+LK/Cw6pZGuIC+3JQ6MMEASCdUA4msSMVR6jLOnN0cj7Sh5F3lApGVfcvVQeoS3AwvVQ9gCgzvrNYraSgh7sUH0A21WYms0EsZSDV6EEO8F2XchXqPO5RdyMuMB1xpDorvpBmdwMrburgWY0edNq/0ZvbsrrPetR4NfyK+qXqGs39RLIUhD1Z1w9nzmVRIzrPvTCcEXmBPJv2jqQAh8AIukAm8gMqXYoYIE8/EgRx6xfkASPCnC7bUBgMHslGnX9CAXHTJznvW0Xf0T4BXBU7YeU8Oy7whc8qc0eemOJId4UAdfk9eGb9m5z3r6DsmK0RlsGHnPX0gx16RT+LvwiAdqQ0N70m+Lz3uqZ4c+eT+Rw/4SOMBfM98j1U6628fpg+mKFiWsyd7JhzyT0Srto0QeIFixAH5NEzgBVw4kJ+rDa3bYtYHX4n3wIgm9VcE8nCCHZkLWp6q+bdkGS5uM6/411TWb4DhTbmVjXOyCnqpcyTLkNeLy3GsfmXjnB15xe1ZwVepW7UMb/MhYPwpWYKPYE45I9T/8SL39i0+fcD4OAoKmQtG8VfULQ5Rb8b2iXL4vh6OP9f//wB9j18RP2xd4Jzr+HnUB1BrVZfOnh+TlvPsO9MJgRcoZnQjt0PgBVwgE3gBlT8qniN8C8GRLNxb6tN2PmfiSH+COwSMc4jgv1pgS3aII+v575jMmUt23lNH+a7eMS8duWLiyLrgHQU8409Jvi895ameHPnk/kcP+Ej9/f6YuDv3+zksy5nfV0wfbNS1syd7JpTc4TBRhRIIgRcokkAp3gRewIUDuSNLuqce7HyldZ8nG/yDp2t/ycheV3DmkrotnkIpjfJX5Pb8szPVueJyW6U75HqkSq+496x0hx15xZ2Qtw+HV0rxlhX98TFHKtiVYckrpVTYXzKK9+ETIh7gR0h6wEPlkitpl+uhsgJ8q/z5dDWy/xcpYWAVWMm1cAAAAABJRU5ErkJggg==" alt="Font6x13 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font6x13;
impl MonoFont for Font6x13 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/6x13.raw");
const FONT_IMAGE_WIDTH: u32 = 96;
const CHARACTER_SIZE: Size = Size::new(6, 13);
const BASELINE: Option<i32> = Some(10);
|
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 7x14 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHAAAACoCAAAAAAd5jXVAAAKnElEQVR4nOWWB5IcRxIEj/9/NM89Iqu6e9T2LkDQ7C6YITJLzQoA/Os/fxiPD/79n78owgfsDQZ5G2w+dtNUKayfxRyldBVTl0f/dsPfhM9g07GHpkphubvGgDCeKbIt/psenGsUOobbWV5W7wFdfQMXj2W7BY9zQd/iYTK9TUJwxKTIttdw8Vi22+AJWwwFvAp2K479JpbQ5nfIGjVIu3Ccryk0b7/C+V6wHRxzsTp7MjVIO9hbGfoYvV+EF9IVtKgg+RaSzOQ1ska9AjeztK5RV7FQHPlI1/yALsmi/eDyIA2cYmGwmuXBNB7BBh1WZdF+wAHbGiuUXX2w8vILLsM0Ea/7iNn1Gl2sPuJ5OpPIT+Ed8hkvpjOK/En8Cw/2F2N9ydONgfypELSEo9M7026gO/N7VyKoaQ/3gJTcmnQyVh9wDCdpcJ3gPYJJUhozVQINtkyS0roqxseCZhU+n1ghxudwGWgMAtOQ0joT9erC6vT3JzQUcwJtSE6rdmZ5RSfucI9pK9fMiQg4eYu8Oi05E07X9DO64YxMIo/XLEtokVenJVckGDvhcdI++nDNSWE+vTZdrKsRSGntAUf0dGSQgAweTkRq8kiQ0jqLwNZsFONjwcpxTy3Fx9KyOpumG8Or6TD0QBcPP6UE6rcil37CF8u/H/8XD/bHjmIqFm83DWEsrRra2IJ0M+xA9X+JMoRu1B3Z0ZNmMEaZWDsbpLT2AaFloo9B6mQ6j8G01GsDJ4OU1j4gcJMdiRCDDKFlh9tjaSm+CSbGMUjRmNCzyYogcJkNCcVEhkwoJs8PgiQ8BvcQaLMmiNMnMLQhoZjIkAnF5PnBbUCDFI0J1TgDs5aeKSCwYEcixGBKpsNtYoZOKTqTpGhMaAxSWvuA4C10JEIMpmQ6/dRSrw2cDFJa+whsmdgYgymZLnJZvBplYsfZIKW1VyhNUmw0SepkrPD8bs0kolaNEVjG2roW2rRFUNMaktrFIDP++8P433zQb+jCkX4L/Dn1pybaxToALAJavWtYfDq3GL8wkYhXAyKl0YjVUs+W/RVIvTVuismmwSkW6wT1bLNfgdTJIKfRsrsq28CRBus8dbJQxBVInQxy2sWYAYtsA0carPNUDbiFjNYUSJ2sY/V8oHr4ERY4D3oFbDGp1hRI1cSMwcWqhx9hgfOZcI2RIjo5qQKpmjCqXV26bPsRFjifCcYN0JuWZVwp2RFdziJ+6LIjLN/I3UCD1Mk6jpTsj+IdzGoFLN9h+UYOCu/pFaWd0+qlY5kiyyViJsGksQPn9mnx+ziumDQG5jOtNuCD/yJya27xpaMDaR4e/BPwY/zRR3ns/OI5F5fJqdnxHCgC2OEJzk+rpzg4T/wxrG7Pz4EigB2e8DB/3nievMsbe7jDE/a8n17F4mO4RodX+VL4YkBbFRCo/qobFAoDaTVC0JXZ8M4ERskMezIgtJoVkQmkNOYLrK8pxcAkKa29QmlhRRC4oh1ZEQyzk9KYF+TWJEYmOQfbK5QWVgTBnenIimCYnZTGPCBKahnQIEUDGrgHM8uKILSaFZEJpDTmAVFSb62bUCVZVgSB0pIVsYaUxrzIKlPATB/DNToctcHNsiIIFC4TEEhp0HPMv4mefTzHhCJ8hTt7HtBP+gimz8MXuLXpd+K3P9ivtN/e5v5QYbD8DXrwG5gDY3Ge5QfcngQ/wAPfwhwYi/fv3faHv4EHvoU5MBb3i5NFXmfKErh0bEXxnPrKRA4i27YfYOuceGFzQIHUW+OmWmTb9gPrBHWyUMQVSNWEUcWY4IYRsPwIC9kNqWXBhJhSsj+qExCVitM92PYD2Q2pZUVTVCnZH13Ooo65LsbHth/IE5BaBjR5iFwislMiaJKYMLb9AAcB0/raYJdE8DpA21V+xS2ypEezAycg27Yf2E+AU7wNzlCEwTRj2w+ct5/iffC1XU6171XNVzwN/mn84EE+t8f4Iigsna69Akuuaaf/xaA/8hX8ilSyRRm2TJJi3zOcy2J55wf4OLt1RVZGB+wDDAzYC7hyrC3f6HVoywS70QzGApbWNorBM9aWYrlzM6sO22BtBINObfWAJrdBagbYCU66JpY7J/uPpf86o4w0mjhamGWxbyOc7AxHx3C5c3IexOix1dKrg7UY0MyegJZGXR49bVnuXvLDC+wlqDoB2RawliN4EpIhIAR0LKxuuXPy9UFVVrZtB9wGLm2bYw+zuVUsd07OgwqgXWCI1hRZmOSCJ9MdQ5MslneugmygiMXkmlv04tqlT9u9AaOj2WHhtPEfwT97+wv8Gw/yLUYXphvDY78Pj78fRGqZznDAh/DAA2YLi/HAGX9F1VSMsWAyd29Mp8kNr7wMAqex05ozujHaZaxB56SFzh+GwlEXL+gIpQgB0S+plnPtWJIXdAx2WMhBh5GN6VytANxwtWIHxuaqRhGECXIOMoxsXDr31PRa1U6VRXtXDThauDKkGFzQEftzAkex+S5OpzOLF86O7gp3Sn7VYh/BRdlNYbg2nWdl0f4VXJF3HvQNlI34fqwd1BfavwIrOcEFRAbvMft6IMZDFB3CUigYgdVdsdceHrw0ATsd8QSx380M8ByFab8A21Dw5YOf4G545wjbUMDnXRHs8U30dF4lgYat+YaUlQfsWUNUWQwaogqcBGr9GPO9U8LUI1wBtTk3rCy7rDn4GmyMCj4LAn8GX1U/WsGrlCZdYvHbtvyTcX+VQUxSTE3og+3wwnIQUh9MYJYDuo+2wwtbd1A3jKchx+b0G9vhheU8pL42kIRjwA48mqvqK+s1N83qsTn9xnZ4YesO6oZRJklxhwl9sB1eWA5C6oZRJklxhwl9sB1eWA5C6ob5za0wEGm+Zbz5bfAy+kP84OgvvXec5Rr/gsZoQMNW1tDwhJkGDVFFNl2x+vn3gXA2v++c7ZoiT+DBNarNgWHlClfU7+P2uf0qJ+Yf4JYJOv1oBZvpoWe+NIFTGUFgS9F8NHYKEqdghtQHAxxL/MGDlIezX+4hpN6apOAC0ZqFtyZXyTV4NsgOdNFOl2QE2ny0fHmmTCjia4NsQjepOGQFgTYf7c8/CFPSmsGz+dEiIO5CVlqT7hgOjBTNa3PbEoBTrnAUXbT7ZP7tQuaw3QxfG45GNOAvzQ/Rd7+PHx77Fx78Ke49OD+G3wF+CWDQsJXvGVqmHuBC0BBVZAXUowqcBGr+LrHA7yCdIiuPmGFtzg3tz3pac/BH4avqXcO16d6ZQateLN87SN2w1O7e2rp89GIUV9w37jSZczf1bLO89GLZQN0yVHDM02JNrgbdv/Ri7OGmu0aZOPzRQFP1YusO6oatryDxvbHnrBdzA/iucfKTGbTqxahvIwd/ih8c/aX3funsj+DPmSL9IeR3wPd41X8RMBrQsJU1NLxiTxqiijynGOTLy4P+ocYIZ+virCnyBD7GGtXmwPAkNbg++new93P2Bvb2IJmRH81G/8Lwqp0ZUh8MJEkFMDXdMXYLEidhhtRNW07dNoFRcoZ3bTl124QGrQ5fGGQTumi3FqmbxjHBcScU8bV1E1Ii6FqkbpoHRyD11tyEmiSCmnUnpd1Hw6vpNPyl4RGAE1DqZ7jxZ9CX2JV3tHTUz3DjQd5wU3X5fwHbU0wwfHd6/gAAAABJRU5ErkJggg==" alt="Font7x14Bold font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font7x14Bold;
impl MonoFont for Font7x14Bold {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/7x14Bold.raw");
const FONT_IMAGE_WIDTH: u32 = 112;
const CHARACTER_SIZE: Size = Size::new(7, 14);
const BASELINE: Option<i32> = Some(11);
const UNDERLINE_OFFSET: i32 = 11 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 8x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACcCAAAAACSQfz3AAAKG0lEQVR4nO2Xi3Yi2xFDc///oyd7q+o8GhpojOd6kjWySlLVedBgxjf55z8/jD/qAX5Vg8EN3WnWxGqSIgCH1/Drn31nH8Pg5hT+6x86uDCbDm0c6PAaVx4gLywwiI/704kOZT5vnrj7Z2DHvod2w7hoPgAjAwJ3zLbDeMJuH8MNcMJ+Ay3E565qe7ww2woo9Ng8KG57JnbWQAYTOZD18RmUslA+0fMtFG7aW9RypFGTCVoI+gEQaPVgoru2idv+iF4tPQU7IG+4d/myDGBqQ7dtt+CC40L2RUBbMGa3qLlq5cKEhbSRE9z+yjyPUqLtN2K80sJhsucfwd8H8AHGt6R/N3xJHGgJwJXVkzAUdMCmn8ELGjPWfjomELfB1Agoz0pFMAMgw4Of4TDuJuZ/jDpABEQjw2rFEsOF2VJgvAUMX6FQbXR/gJZCxamQ4jOLMyskDoH9UWXb8qygVE9qIB49QCWO4OZUN+NBAB2gTY/no+rx9LYdY4R7YaUWkJcqMIKpgJBVvKU0AoYfcDccA50L0U2mBWTeGOiZvXFJaQQMPyLTdU9aYYAYBLu2R8osBcZTIBYZVuorViCOIA5NtuRDRQEhPgImcmb00xUw+7LNY8HYC24i/HZ479CXuLbrN+LvA/AAfGk0g7a1CW1x0/AEjVJs7Wmo9TcJijIUxrPt8IeIgodS4CxxcATGWwLTXqAMhbvPIO8dxBEYnPqSwETBuMjHq1gOoZ7Ah4HUQM4ScQQGp74kMPHfDFRHtPpJQKRTA7HowKfhcRKDODLnVFwB9sdJpbxTYAbYxkimBn7YPC5yYZSIIzDYnCOI/ZLAxK2ojsQmOViOJGQvdCCXgzgCg4MjMN4SmCgYF2Uo3L0DiCNwlgJnid23dzIkelugDIW7o3yQ3EPGkW7JFKEtbmJLgcHhk8NYJre10AJ3AhTGs+DfgR/G3wf4Ax7A7wI2vhQxPaGXEzKbX6ZH3gfoDLSkoFoxR6J3IPDUW0rXCz1yBOjW0ELlsbewXgjee1ApikALAhQeSsQj00SiYjXqAgXee1ApikALAnW8o+EmpHX5nha4CcVqFRl9gu4QRBFoTaYLbrztLGzoO0AcWW8EJEZKldpAFSkVHG36SsN31B0i90w2kiKlSjaii6mjghlmbDugLhB4roazNilVsitaP4zphrSCGUDl0iPqBYP+nadXuidEb3uVgiAaybZsNGABS3u74WbExn8JPg/wP8c/iz/iAf69z/0Eee3DExyagcOQX9/WHpY2zLn/u+0x7tfmwR370Gw1Vjwmq7DSCe7XTrfvwz2Dm3ZizVc6wbbGJ2vL35b46PX8ucGrVwC9822/KpJYSrW6qefGmuCmDeztXXK6M13xHK2xajQlgYmCIoY0ez/U1Ubf3AsyiCNZZt0stFGbBCb2o8JOmRydrhZ6IJeDOAILI/QrpF0SmCgoYsjk6HS14MCPx7KB053pEQu0lS8JTKNakcnR6WrDd8T05ksSd65Xr4Du0yK2ox+l9JwEUBjPwt2X8A14R8kRjqyLuL7zDvUO7sH8bPwAb2z9Pfj/foD+ZdQ3ohtCfxELKz3Etvs9jIPDK/CvjB9CYaWH2Ha/h3FweAUETtj1h6OxtPe6Fj8WBCg8OpYD6UTb9AM8wJw1OLMF4ymgbQWBSsFUSVsuE+nEnGyowwqMp8TY3r221SRdNlZUWqe3TT+AOyjX4MyNtKPXLESjisS5bSVx6yts8A5+a65obrFATOmCKURT/TFJ/Hh+heErbOjDAzaW4EKWAUHXshaJjioBONtAeqLOEHRzCw7s40PzHnI08ha+7wF8i1z3Lt4/8c345AHqO++nBvGLtx33bbGuAdP35RpGIzE4fdTx2D3ckyKzk3QLVjLNLqpQKRqZ1qArijIU3oBJsTEDqKn/uJMibQhMDcX30x7yh0SHMZG3cI8/xGAGUNOxyj1xQ9JYICAxOFCrFBwF7+A+f4jBDKCmc7VNh3mO2+1wwGP+kOiyBIGtgwSHfnC1JGYANWUHPwkIwCEo3Xw/zbA4QISNPdUPMZgB9JQnTejW3oGmk6JipcopcvbRwMIMFa3GSmtcnvdHhPEW0I7BBV55bxmsDcPFcd8WD7vYxi/rsDzXpx+Xv4ZvuOIz/H2AkwcYv9rhpBGe4sGuMZ4+rw1sjiM2wuVJO/hu1rkj5i0zBKObPq8tp3AISkGHthvw+tvfkQ09xKAFQenyFQIaBhCU8grlpBF2XHwABcaD4SvFrXpFLk6bMcDhQhqEfU8fgA0V6GFKkaxoYYbcRQ1kwK6eun1DVhXmWgI6wLm9Bezir75jduIGqFFmEGnU5BGy2hJ7gHolwCYo1oHy9FCUXkEOtMRegD1QYBDkjWPpoVEt+OS14RyetkhXHoAtMOBUYgRUD4nbA7wCL5uDSdob4GR0vMP01RIu4smLPll6iQ+OTuT9fBUfHP0e+ADzE+zQ70hzQkHg4Ay1Cjr0PjoYb3SqdRsKjYtl0Jo74XSGfH0SYsAxWAaXt07H4Cj0MnhppI4ntHwEL8jF4JLzkhCU4pT3PHX2PnATmzAFvvIiKH3tK516X4fAK47x7GRC8MpXOnWu0xB4xXl1DoH0YAweOans3LkYfcM5BUHpa1/p1Ps6BF5xDIIoAoNHvtKp9wUIvOKYnwXNZoanPvbdO+nr4GE+xidXfMfrf/QA34L1AP6dy3uKBJ1cKXUw6gAHVqGTJwAdFG0HsBh1sQ8Yg3asmXVCaqIvIESnY3D5QQHniLVofRWerUe7Bl7Xl61z/itATDF8bcCeOqWEkdk/9FEKtCog9SZw7YKnoJUp7Oaxj1KK/GmlHPhTzvoFVzxJpCnayFOnVKykWwOWLkTAa1c4jToq2shTb+EUINF5AUqDaKpeCXviKUihRRt56i2t7f4fEwMFcX4lOt0rl+W8iTimwFNvaW3PA+Dk9Kp9Btgzn3Bg/8q9HJTqLPgr+BTjwi/is9PfgP+pB8jX7bvhleu3WKlfSHNAweV3mNMOHgR0MF7ocFyGyUE5Cq3aatC7PUEtbwaXt24Gp8OfBQ/AP0ftqvM5QHL3F1x95BbwwxCXHCmO+NRbwKlTPukb3lbHQXoGj5wdCjh1NmoIvOIYBKWvfaVTr+v8dVx0bJLBa28Bp07xWXntVfchPEZgQL1w7YmTvg4e52N8csV3vP5HD/At4AF4I/Cn4CvXdyrmk1iFTi4AzQkF77GGnTwA8k83GF6gg4x6AzJPIKAdg1atG/TWhdG3Y1CPGZBWQIDVEb6A7dQWX+FmK53/IDX+eeujf+kD3Xvxq1LG6xAVBwncQ9AxBT53rI+Q6fUXpbg3jiEzyLd8gMibUpKflgJ3n0G+5S0oXnxdvHtdgfYVFPiWt3gJUvNnxUb+K8K+3k+wrzBmke5f+sDocS587ALnE2NAFnmAD8Gdl5B9CAzi3/EAl8Eb953nhUX1/wV4+4UVLMr/4gAAAABJRU5ErkJggg==" alt="Font8x13 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font8x13;
impl MonoFont for Font8x13 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/8x13.raw");
const FONT_IMAGE_WIDTH: u32 = 128;
const CHARACTER_SIZE: Size = Size::new(8, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 9x18 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJAAAADYCAAAAAAhJjVlAAANuklEQVR4nO2bi5blOG4E7f//6HFEAqAo1X123Z4t25UNZAZBUlI/pmfOnrP//V8/TL8f9EynD/qnlyQFvKTjaFPHn+l0dZ5EUjtIOEHKgZLL6Cu8Ky6ebrLGk9QGvB75GawXsKEewJvy3ummA0VSB9Bb3fugSTQ4ecBDeep0zsEuX458NwUxKcIoBpu2QRCrmsEz5RB1KJNdDChznczaqNzUO6qQM0gCEw9VZ+ylGm1iQAXmaK0Jsrm1rRo5JBgKHPVomzhzad/Vdk2szCfSM1lihkeFnugzNXioPhK7Kw5RwJDvECg4O0vbOohVzeCh5kR5a4ZLDKgOCuIKkALpQwxx9BCGRj2ZE+WtGS4xoEzl99hYuctNa7ngq3LvpJqsKx1PtS48Uh/quK2Hm+jx7uf17Huebf/7+v2gZ/r9oGf6kR9UfzWh9c9kTwgluiC3iUgaCBKNDd5S3aJ4E7UmiCUFqKba7AU6CGWBUbWigJs6bx0riOKHHuOVOHJRhjp7s1cbqCyW2WW3dN2YdRI77lexVWo+gkqyQgtULXAq2YYnTlB5aJ/AxwfVGi8V94Nc2BgiD0DZQcwCrBs7DmALN6MsZla0XoS1RwsDmh0FNFt1EhQAodBFvX/RTJP92Fi7OiiYN3g4qgmLWHtFTC0469Z4ZpW3Hr0BGp4sssvaK2JqwUU9336GPRmgAQqW8HPaUUCzQxQg4UkKQA28N3kA1NC6rl3VaTzyjVHPiKhuHhNooBIoIgukC4hDalsV2v+G6m0v6MVj39XL3/PyuX9Nvx/0TL8f9Ew/8YP4SyuJ/klkUBMRMMwCcUFmFbjGGrFAshNWmh0FNHsMffm3fQe+bO8ytUCikm3tUVCzy5RvsgEmmq0xzoJyrRO4ZkcBzY7OoNll7VEQo2aleJPyVqxa8+Q//qi1TuCaHQU0O7oDsfYoyHMNIRGod2argC3MBVuGS8qxCmAqoGUL3kFHmVxmjWzhKCsV0Gh2CzJh6ecYLFGNy+wooNnRCbioMYm1R0H2DSHRoFXHauKX8DmGSyrjMjsKaHZ0Bs0ua4+CGDUrFdDsmK3lJJ8Yr1E6ZkcBzY7OoNll7Qk7ZpepgGbHbK1JBTT7sL3L1AKJSra1J+zD7Cig2TFbi6NkWwYkEgHDLBA912ImZ6KxRixClbjmAAlr0oAh/2L8Yfr9oGf6/aBn+v2gZ/p3Pqj/jtnk5DQY+VcTqj1QINAiIFnQZsfsmK1RnNXrFgBhK0qzmGx5rTpbVLLMLmuv0OyYHbM1ag6qQAxNqma/OtA6HpSO2TG71ZjQ7Ji9jEqmS6EYmlTFur3US82O2TG71ZgYo5JteMIsQLOjowWNsfZSvdU+zB5DpHTEGFVZBMfJWqKmDnSLTvL1yL2c0DIhS07GK8aoyiI4TtbygMnbdJKv742ARlNMosLyihjesT6oJ8I5D9hwwVl5PYkAHskKSJeKyiti+IQCGVOwhE+ggw6evOjyaipZZpe1JykASho2wDLWPoEOOnjyon5QiQevSRsipYSwqNJIO4+1ExHcg1IvOq76Mrxz7nvKp52e62QfuFb+N/XP0u8HPdNP/KC/8sf4j+W3/Kgvyqf8pC+69SV3vu8y5m+Oy3otrxhT3Bm8p1vb6/5Z5zErChhtqyvGShve1q3tO5fOY1YUMDqvTtq2NrytfdtfUAdJoWiBFCABHWXCYjJARDDoWRtVMFYgaxaAaFIncSAds2N2zN4MmfZm7VEQo1gFy6ugGEOM4sdZzuyYHQU0O8aDcFQL8LD2KLgOiwJeJWWZ1vAl73mTsYsGFdDqCABhyLQ3a4+C3iJU1nqVlGVaw0esyuyYHQU0u7Ro3pZBrD0KYhSrYHmVlGVaw0esyuyYHbNj9mZosiDWHgU1G1XgVVKWMYofh/y5+rOd7MkCKUACOppJBloGZOTgsNmSZwLFcIP6nvKw2FcxpoA39Obxr8obY1/FmALe0JvHb4hf6buPYe/Ozl29e/6v6/eDnun/6wfxp7te1f/UsU5+1Z3xh9WfgRY15Muao4Mea7/zvo7biwrwKrh00GPtd97XcXtRgZ5fpKWsDdXIKcgREUgWtNkaxY5ux+zYugXpatGCi7xWXWdsrN5kl7VXaLZGbWMq2YaSsdKBvoS46nh0ejM0iRoTY1QHlc0E2YMgPoEWLbgoj6xtezPFKzyCepQYozooNvO/WyFWFCDhR2604KI8srbtzUa1KK+I4QZctSbmGuhowUELLqrvsOuMXYaSMZ0ywpkYeBTMEcTQ7BW6QQsu8h2oNgfX4RkgUCCQ1L9BnrU3wyeQFGzoqPyqftrSefWHevCQB1ulv/BB87+j/5n+wgd97xnfuPp39H/5g/idooBv6dsPOMTXUMC39O0H3BB/ybz+3DlsSvKhZz/D2t/d9DmbHNQIMh6JI4hTA+AjeYwjeTlGMdg9YS/VonzCpICLalheOkhtO/kXg+vqSKhG5zjk34sc9X6ABhiGLtoOtw5SfctDoV4HqEDtSJMzKfEOPjp2XFoQiaU6p40OUnONQ9mSIm+k4eyIK+ylfrzm6UR/DxSFIw7jdaN0kKp7eJ3wcCYQN4fwQ67tJd7BRBuxTQElOU82OcdJanSQ8my8T0C4GXETN1HvGfYSb8B9gpkd9ikg2tDDbnF4dJBiL04EYygZa0cDnG4q1bx8xIoC1EHD5aWDVO/4DqjjrD6ywQlRrp0mzrYv2vXl8IaoL/WvoL+aqlatPrLAsDd56zR4pOvhna/i7IPt+QpONX1EH3zUZ/T7Qc/0v/eD5s/w3xZv4R8T/In+re/hNbyKAh/p+YmXtB6zwVDr9EFHXuGr2EI3d+cxwFBprQ/I+wEVotYuFCCoHb6KS/c22aox29SRIRwt2DFEHcokGpr8It66XnPVjEnqAjhasFFpW/mOHhynDoqy1Dzcb/miGZMUsCjWv088wYXoBEnWoUzQlhSwlGXbbErEoRqjY4cRZYTpmP8hpLOoueNNNSLQUHIpJ2I6FUgssT4PECdT7GA2R5jQWXHeFYqNevZAORHTKeCueFG/gIMUIPYMmnQyq/KX5TUe4C2QAp6LcxSwY4dJGWF8xIvQNrghbnjMQ0HhqThGAcjb4eskawx/R97up+MU8K7qlk8KoEx6AL2jXL2rx7uv6c0nPHilW/Y39e0HHPLX/PuP+/4TPiw+aP91HuZv0Dvi1+HO3txVw9fnzFwNn8+4mJ2D5q1mZhiVScMNHRtD3ifIudWBhi5nhvFTUp1C0QluaXa2pHYI4qekDhDfUn/hX1Mevn4X3wG+rK4LpIH8Zz+6D+vwLQBl430I+es8HT2ADe+BzaON94DrVJIVegU2vAf2vOMt4DqVZIVegQ3vAQ1QME69AxDJCrFy/RDEyrtAr+PvQLIAU6/AhveABigYp14DzS5Tr8CG92B7rB2zY3bMjtmx6TL1Cmx4D7bH2jE7ZsfsmB2bvq7QA9jwHvSDNDtmx+yYHbNj1ZjLpIEyVvdhHb4F4Lflx31MH3nUJ7/o+0/iayjgM/rAk/jd/8BTRjcftf+Mh+df8i9orqjhr9dn56J96E+1jpWrIfeIZGYYxeSGjo0hrxHk3Or4om3Kkbkn46ekOoUi4ZZmZ0vqAvgN3Rn/qe6+52X1X1Y+Jg9rQ+RLsJQJo37EI6A4qtst7/sXoyP7MDtmx+yYHbPLRi54psZUs2N2zNaodUNSEh2bVaw6ZsfsmB2zNep4GB1jpNkxO2bHpjEFQBnFZsVOwZq8BraWJwhZpWN2zB7zqIkvo7eH0MciZsfsmB2zx7xjvnF9vELjLvfnz1CsOrZNYnbMjtll7QR3ZTtmx+yYXdZesYyOrdUfAMaH0EkndsyO2TG7rJ1QogM6tlZ/CDaGS3bMjtkxu6x9Akl0LH2YHbNjdsyO2Rq1HhZjiVE1pm6ChE8gidaQU1bLEPkSjDKoCZh8DMkOBFH8+JR82gf0kYd8Ur8f9Ey/H/RMP/GD9n9eh7/+N/lo/UXyRXNXDV+eM+OoF5cjrnoHDc1bzcwwKpOGGzo2hrxPkH2rPGqcI0DOZCGoLalOoUiAKm+pj+xJ7SDhqIGggKHwTxIfxM85+RaQ+QmFYPoFMIlHYKN6NnodcMoIp2N2zI7ZZe3oHtioFuh1wCmDBQpodsyO2WXt6B7Q/mK9DZUBpmqbRLfAUzq6B54mNTtmx+yYHbO1Ku67wNErsOE9ON6RjtkxO2bHbG2Vi0zSMTtmx+yydnQP6PoFfRPIfE9PXoTkYwC/rfqwD+kjj/rkF33/SXwNBXxGH3gSv/sfeMrog4/6jH4/6Jn4IP5IUvDSvhy+/IfdI80VNXzj+myd5Wz+VJp1qlwNuUckM8MoJjd0bAx5jSDXrckRa4ofYgeTUBg/JdUpFAm3NDtbUjtI+ASCKH4M/bFOl2/81jzU6bKqZcZY/+zByhdhUvmvOdaaHbNjdsyO5RIM4FhcZU/LhE8jX4KRK/swO2bH7Jgdm162Le2YHbNjdsyO2Ro/iwhwCOC1lY7ZMXsZlTyM4oces2N2zI7ZMTtml7XgKinLdMyO2cuo5GEUP/SYHbNjdsyO2TG7bPn86tgxO2bH7GVU8jCKH3rMXkbxBpx6DBKesA+zY3bMLsM7eiqaFNA5hidegxJLlMnCByB1FAQB6jP6w7+AOlSh/R+Q/7ejvLoD6In9n1C9fVdPYj9J/wOLGNFgVbYQjAAAAABJRU5ErkJggg==" alt="Font9x18Bold font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font9x18Bold;
impl MonoFont for Font9x18Bold {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/9x18Bold.raw");
const FONT_IMAGE_WIDTH: u32 = 144;
const CHARACTER_SIZE: Size = Size::new(9, 18);
const BASELINE: Option<i32> = Some(13);
const UNDERLINE_OFFSET: i32 = 13 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 6x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAACcCAAAAAClo/GGAAAIaUlEQVR4nO2UibLbOBIEZ/7/o2czqxokdFDHs72xMbHF7uwDDYBPkv33X39YP7rgH3bpn+gc645//m5UyQC2lDQYvtM21TQXBH/9Q0niQ4Ejg44aymtt60nXaTCYfEhYQETbBCL1M/3z97lyDvnqfXciJeEoTIEi6WTSp6J/rJAf6nHshhQiaZRamWQGwGrPqZ5e4Ix9OjXThOjISGaU7KnsY5XFaI5LGGAg2qJNApH6UWnr98ouXw6S+lAek1QqIRhtI+nrnjFqucsOjrnXUDUrd20d3q4bjd/LnfqtbjpncSR/Sv+OC/qBhf12SAhUR04IJrBY3okN0URnyPIzpBDjQ4AhqUvjIo1dZyOZwDHcx1gfcGeSISLB8LJ+y8p8HDscJQwI4LgGkWB4yTuwuoibpmiVgB0+ZIqQU3p8G7ZwYgNMp9R3pea/a+PyRcShnkxG7lHmuy+euq1TAc8xxgvF+dCDkxPvffGU9dqVd0rH1zQS6qUuMLzUEvGyDjkGtVBn5opVby4taNGEzYc4bL5xFp5H7FfkEcWVXq39Fv0rLti+KVJzQr7q4e23nzmCORmBio406mUW4RRH90wwgE62sxwRMFwadX+3MLmGlzWgCCu/53JEOAxxdI3aDk6ScgoMKMLK77kcETqly/05axILXmAuQwSWSIhl+uYUUxl8wSSIHduz1awxRoucPiKs/CBHQbM4InRKL1Mfk4aaHgOKsPKT7SxHhMOQXDNlgkk8BmANoJPtbF7DpVHfyTr9KZKSSDDsKqKgQ44nhyJOg9bkIassk/5Z/f+CKB/2aM9VvgpoeMxvv7UnZMRRikIlCVRXGX3g7lfkAIklViYdUF1tvVMZ6zsxMDTWFxOxOFrFPZWxvhMDQ6OmbsMSyyVt9iSnBQiwDoXP9Hfq6DYsdUu62Y4pw+axPtTn9RtQY3koW9rNdkwZNp8lH4IW36CSBJvcgtzq00zeeJrCJ53DF5WZvisbEJ++R5AQzJOBPXcYO53mcQIy03dt5f3Sp+KaY+ddTnWUf0r/lQt++tl8pJx93HAko7OeH786m0vT2WZO3bZm8tBRk2DVJFvQ1Yq7blv3E0d9JGjPq9VZcdfR4kfFW5LQIZQEaI5Dc/41QHLQwCJGDqgJx8ztz5Q25VOy6rbmK6OZQC+5NOfJTGsWlEd4CJ32JJ4+gSro8nJE8DhFivdpJllAxtWRRl2iBNHOckRoLynep5kEioL3gGSP1JB/zerEEaGWFO/TTIIox+cE8yEBmuMRM6R4+saYvjp42Q7pp2JL/BQV9lrv1nfljW7E3w1f6u3Ar+r3XHD+JWc2uit/pvkiGkKAqfK5ZuS9ZrBB+kMyovK5ZuS9ZrBB4lhEFjZw7+Sr7xzdKzIzQ6ihPNRVRsvUzbVEfCcWJBduilITVh3NKBG6UzXIcQzvY6bFNwxXWJodJyMTPcBjfVLrvpSeQjWUp/gDAX1DFyUeEyw3ARMYcNoE3YTy1BpC5wbaCRDY1QEBi0sGHW05oTzlnupIPhHD2BJvAdWZLf3wgicnXejDsZ/ruwv63nz4hlfi25mRCaibhkffernAcNgjygexjrP4sEyPDutYZFxeVKb64q08ZvqlssGtPISjxkAytP9lechskcFTzGEkWR05MoMBU67uLYBF7F/rutyU1bZKxcwMdmcr8lbKenL3M0CWSNfcAsRpqQmskznZmMYEMGR3owlGomYSTWQJI0ETaJp5NcShOTBMa495yFglaXtCoq4mrEYIumf5EGCRUa86vYmbV2NFhkDE+NmmAotnfzvitT6b+gX9+y7Ih70+6xRXelxsJ1wn5IKzIJ/fQxhUfKfblOou+oZodSTRAPLLw1KgcA0d8pysnEop9KjJ8Dghp2P0Dhzh0JsL+nfrixMgMbdxBAEmB0vMsJ8+ySY7LJDggMdO+xAgFpsg8qei380kx8Yl+wQnJi4kYF1XFk9kG18nXKjLve7MjXZeiiEMl/filJzQ1dKo+xhB5OzKdzFnnwAvNIsckRCQm5wXXMi9HZTv5IxHk5onTXEtxh71tHmlN7PPznrWu9ZXwz8RF6w3SsxXSkoRmINdq5GY+U7qKvHoE4igBDFwT3bkW7MAJYiBFCgEsV7wWgziZf0rOd9Nr3heAHF+5OCWLjBwT8PsfsEActAlV7yjm03eMV8BuRV4whXv2G0c8IqMGnDU6pGsiHvyaugdeTKNrrjiPWfrOzbq0ghuuOI9s6HbXpG/JB9FQHikCw7fk/i5vO5bfbPlJ+d/dcGPdF7AB80HOXWiv33YPjhFgUWJmVwz5RKTIe0sBCWIgWF1bCCWIAZSxJH3klFhH4tZN75U38HJ/l+EAeiKyQWjTM38W95f8Bn7NH9NIofzcqRmxC48JzbwsfNILKiBs9srLK/Iu+AUPjYeuT5CXjqJF4C1/JqYYG+Slnc8vfQ7IK7lj/jtBWzon6FTmFywan5FT4lLP9Kk36ibP9eX49/rf+ACP8hfEJvXp5rY4/h++MYF1HatMjHzx2SUePQJRFCCGBh29EZplyAGUqAQxHrBHxUX9Af8OwgeaCRLeEX/2hWfs/5APigS/vWD12Sym/B2bskK/sC5/h0b2XLJFe/pVj6qd+zjlivWHzkHvKMgZ/xbEj+Xt3yrb7b85PyvLviRuOBHL/axPJwvUvpFYlHiTR/caNWJmVwzpbJDkdUUCaAEMTAkgFHzEMRAijiyk2zq91qDK15pWyfLz5XLYPNLVsk9gj2X9LTmGt7G5Ffk9P67C+1ccTtNwz8hxyMrn2Z35GBSbeXtf8TlfZo9IQfDIze0eMtxXiwdkkfqrJpg5oaWevJLVs2z+QkRF+QK5QVfqodcidUagljS36i8Oueq5P8BEGNXB7lsU14AAAAASUVORK5CYII=" alt="Font6x13Italic font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font6x13Italic;
impl MonoFont for Font6x13Italic {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/6x13Italic.raw");
const FONT_IMAGE_WIDTH: u32 = 96;
const CHARACTER_SIZE: Size = Size::new(6, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 9x18 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJAAAADYCAAAAAAhJjVlAAANoUlEQVR4nO3ZgXbjOJJE0e3//+jeG5kACFKSJduqGu+OX0dGBpIgBUtlVZ2Zf/7nh/F7oGecD/TvWOr0KsfWkUb7Eudb55N0OjpGrxb797g1y+KD8CL//nO+Yd6v09G9PKR//7GmFDWPAm+9jL3n3QaFTtdeKpuTZobZ0TG7O4nsKXad9xls1NuCem0KGh19ZxuM6Akd5uBjsok2MtmxJqyfNJFpl1sxroSOuauTyJ5Qe1IHNdqwpnDzs9aBRh5sq4psyAKZDW4nsPM6OjPu6j3zZ+WUmpPFtpxx9iM8preUPcQeKnRK1UGoamdbb7G5GdwwdrRPxnBhTbNTah6o+om1XmGxJvVRnxiTsaN9MoYLa0J9elKt2tbyYC1XuKXuO9GTectoz5k3fMjYNNp9Prz4iQO9iyfn+fsHesbvgZ7xe6Bn/MgD+dZLw/ydHBMtJCIXt0mingYpMW0Ln6LvonohxbNEVqlmpLo4Mo4UsppVlnrA+dJaJaj1dwOffzntvnpfTBUrFFmlygntd7heGOtu+Tf1TEOLkY9GVcUKRa1is5b7iYsjjH6wT04HQnsx4nhQVlVyTbxhHUKuYE6ynPd1O0KuwdYwFt0gPTrQOaXaQkK9QELoHk9xL9XpysfT9AcHWgGyVwhzmEnlR44Vztwbz1n1e48+ejEXs1ciPHKscKHn+0/YE1RQ6SmcW/eykJA6LIWtpYoR6pMNK8wri8s6K8cdLfjpOcyStKbuPCZSh76AOdE7JF6DuFLYVhVTf4V6tRd4cdu3efU8f+1AL/N7oGf8HugZP/JAvsjS0L+c1nOSKKWld0hcoWbVYuUGPZopQ4sZ0IET4mNv0qh49Vuj6lXFCpVmnT1UWpYqOnACJ4x/D/le51mOxuLUdODUXAIn7B4qxVJtwSu2pSQhnSf1f7UcbViq6MCpeRR2D5UYoR15pfxXIcsI5wOtcmUevOnAj0uqQyzUpGfHsFM/HqNVuBUuBzLJA5PLUkUHTs0puHFe2j1U8rlU71XowElxwuVAW5VT04FTcwmcsHuoFEu1hQ6c9O3nqVQ/YQI4gVPTgVNzCZxw9dRhqaIDJ3BCDsQkdOdU/TCqXlWsUGnWrafa4tR04AROqAPlDTq/Q1oGSJTS0jsk2jYxy6IncYMx6lbDcoMgrMkMfa2+GH8Wvwd6xu+BnvF7oGf8pQPlS+b0UjeDia8mrgUxQeuwUvWkfINZvhg0ZGkVRmvmYvbGbVkvm1WWeuAETuAETmhHhTLMHkbuv9gX2/0ETuC06LycwAnxWWWpokP70UPneGqyPZHACZwWnQ/fa/j6WVdIZKttYcSy4cV41ViqnKozZUuo9eZbtdCOmzD7/bTjWVybG/iaNCa4+lYttGOG2Y9wN+3MZ6EDby063/j4XFgrsRpGHw1HOuIKJ8azQkI9k6oGHW+8tqYNrUK39mKLK89+pp/VJM0qS529WipePS3/CVZ3vNjiyrOfmQ8q/OE5P9rASIndMpqpe9qsiysk92AwFqNduB3e3/dNcrbTc6+DrEP9m/pH8XugZ/zIA/2RP8Zfpc7yk07UR/lBJ7p3kkfHu8x9deyD7Wr+14xBxePS5Z473Lt83H/mPM8qNdnzhf3Snu9x7/Kje87zrFKTPV/YL+35Hvtl72cN/F3ZYU0q5K+5CjWxiyGTXDrdlRY6ujpstszHpykmZJSQeMINtbNCepx04wrtZaPFcpnHhheJs0J3PjTvIvT/PnTCBQKnpgMn8PHzVG5Lnb1IVNR04EvghPOB6kW8yrwWNR14b7GnVqFaLHX2IjGfi1ZkXb4ETjgdaI4icGo6cJrMmDMm9nr3IjHHSQ/d+RI44fZAda+ShPQ46cYV2stWm2H3IjHVNhtfAiecDuQV/awu5B2WVPoKuVShJnYxzEmv43NS1EC1z0vJ5YT4uCR9jzxr+pVMU5/hk9tv6Rdsv5Jp6jN8cvsdxlt9F9ceXHnIZ/f/cX4P9Iz/3gPNP9/9WzdXtzwYv50+B64h3/8zhyN9zHbLl1j3XwJvTY70MdstX2LdfwnxepMmOd36QBPsiKQeVKheQZUTOIET4qlY35XbatnchjO5zZW6GutVmUtahWGzcQInxGdthur1wKbWxTbc2J5IiKdiNBmxW3yrVmK1hKi49jvhzLyVE+Kpot70WoxRt/hWLbRXpzDah+HMvJUT4qmDWpXNVj7eD9ZKrJYQoT2sdBvOuFX11dixQvfymCI9e8ReaPkvqQu9Y67CirfhTF7DB9MXE7IvVRgYqYoJWu8Ry6vNWr72IrEWFeaGXt2SA2kHl+XX+OAhH1wq/sCBPIG+ys3Nl+VX6A/si3zj1j/D/+sD+X39/uO+/4SDP3CgNzzwu5xPcHugyzdYb9hdP+2Ae3p0c+UOc3N6UvIH5EUVgRN2T6ONWsUUNQLdUsOywRZPjD2j9fmTM0jh3BbWeWc0XSis6BZ/7LJnu3akMC7YNC6MQfVUvHsSQ15+ozYMi4JGdxj72ORIoS/w3pOD9Y5aqW3U62p00LdqSteSCRn0aGAz7zuaI4W+wKlYXY29czSxpgOvkQlbyDSpOB6YfdY0OVLoC7WnGaFbfDwoZAmNDizyIVZDXcqiQjhSslXZ5EihL9QGki7XQ10IM1w31bzswJKaFUItygZbxLiSHy2pTsbFg7VcYUvFvH3HFrrDdfOeXe3laAn2z8Vgu8iq0dv4xqPmMa4f2fd457Pewu+BnvF/+EDzz/Af5uVX+UvneflAf+s8faD1TTJe9ljPcItvTNy9PJ4i3Gw4Lq1wbKlZSqDQjRM43SVz3+P8hvUSuZqKEY5LK8xLkyxMKIx2hNlvyIUHBzJnSE+VE9qxwjWlxnm9wUHsdVGbDnrJScVvWVNPHNGIqjcrbLFC6qBGwyHQTi/jXmz+S287f9HTM2ZUVabqtvRSRZQtelavpZK6L8aGcq3DZZfbey3MaG8p62wX4i72KkGhfTKGj+kN7doIz7GRiu2uEWpCxWiv0re1ayM8xT4qdAr1vsCyhdP/CZy3EdvklnpIGV4+kG3UuLlzO3pCOB3oFZzBkfvmLDp8Dgfgmicp1CRrXfoMH5/g46uv8clHfPSSrtF3+f4TDuqd/y5veMR7yYG2N3rG2e/w+NJ2ZcbZF9tgxtmbWqzRDOv3x6fQ6ZjMcId1ZYa1eQWJhRnWpQ6EJIxWPcWcSK90CfcYl0arnop3xwijVU/F0+mT5K4/SD19vcbngkoePQ1+9znuhU7641Cx3it8KZQzaj4IWPlBSM1HfymUhVcCVn4QUuPRXwtl4ZWAlR8ElZ4qS5WlylJlqbJUzKfOsghWWX8YcOy/H9Ta/ulQPRZeCVj5QVDpqbJUWaosVZYqS8WoevNKwMoPwnhsnMAJnMAJnMCpqi28ErDyg9CPrgWBEziBEziBU9VwRs0HASs/CPNBnMAJnMAJnMApf4x4WnoaXOK4Fzrpj0Pit8np3sUbHuU49C7e8aT5L4K38MZHvYf7B9o+ghlnf4Vt74yzb9wZ4TT03o9dox1h/M0lzD1rcod1ZYa1eQWJ3bJPbV7rsX206ik290gd7jEujVY9Fe8usDs8GH+ZR6/zMu7PN5I2nhXPpEfqWVjMSZ5QTuAETuAEThP3W1Bms8oJnMAJnMAJ7U3lfJ4V4gRO4ARO4DRIUu1leViH9DiBEziBEziBk+IETuAEToivsq63pVfnA+VpPAIncAIncEL8XJzACZzACeW1NY2VK3KgnJGfBU7gBE7ghPLj0UvgBE7ghLvu8+E5SlZlpDiBEziBEzjh4kvgBE7ghPuu6HqgHLRDnMAJnMAJ5fVDzKYInMAJnHByVaeotWovI3ACJ3ACJ3BC/Kj2CJzACZxw6yFJta8qJ3ACJ3ACJ3ACz3tbYVVZqixVlkK39pCk4lZaVohn0iP1LCyOiVT9o1AvND14h3OJ3sTx6G/xnqe8kd8DPeP3QM/4/IG+9du03zzz7E0W22TG2W/xdfHo2nZhxtkXx2Cm9cAOlMjCDH0Nvqs6HZMZ7rCuzLA2ryAx7D0VTyckYbTqKeZEeqUOEnvAuDZa9VS8+3Ne3Pb3cCB/vVT/bBjv3jbB85D2QUiF9Y6+HngLnMAJnMAJu+NBSIVahNcDbzUdOIETOGF3PAjKe4WvhnpK6EEmHPcCjv33g91pcQIncAIncAJvoR2vBKz8IIzn5s/nZ8MSOIETOIETdseDoPKG1tfEJ8MXf8uehMTvkvO9je8/y3HobbzhUf3JvYt3Pust/B7oGT/xQH5H6MS2nnH2F9i3zjz7zr1ZDnT8mghj02hHWP/CWnvW5Jbjwkxr8wqJbMOaDNceIetijRjSU2zukTpI7MSc7D0V757Ehhci9Vr4Or7zj9s/+6Sb/Vl7YrrDd6jBy0EcTWXSS07gBE6Ip2L1oslSGU+qqgsSgRM4gRM4ob1J9vjtEoETOCGeitWL6suMEuIETuAETuAETuB+wCCYvH6gGFUNZ1QGTuAETuAETuCE9kZeAidwAieUM6oazqgMnMAJnMAJnMAJZ09XBE7gBE4oZ1Q1nJHEV5UTOIETOIETyn1o1U4Hyp+NDwK6lddYSyahW6zcYIzUsyCOpmqS0Kd7HBLT/BgmAmpCb6Jf5nV6f3tTOfUfwGsT2iHQf+xA/fGcGJOyn8T/AvA0SW8u0Wn6AAAAAElFTkSuQmCC" alt="Font9x18 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font9x18;
impl MonoFont for Font9x18 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/9x18.raw");
const FONT_IMAGE_WIDTH: u32 = 144;
const CHARACTER_SIZE: Size = Size::new(9, 18);
const BASELINE: Option<i32> = Some(13);
const UNDERLINE_OFFSET: i32 = 13 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 7x14 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHAAAACoCAAAAAAd5jXVAAAKlklEQVR4nO2X3ZrbNgxEm/d/6PScAfgjS7a1TrK5aMfAzAAESWnttl9//PPNOF3488fPU+8RcwJDfAE/f2zjtbUvbEElnkujCr2Ihu7i3YU/cbkrZQUofpA7YHQbpgI5uiXhh5tpqkiMeCXXYHFbpprw1f7JTSE8tU5tTK9xTsVAT+AaMWA5UduLQUnec6J6IKYfqooncI0YsJzoq/qYeIrDK05bJsx87QiOFTWeGLAcmKN1MWXuqqoxbUxGQIprZMm8Qo5OSIzFwKw1pte4rGKga2TJbKQeyPZEfWJy8wINcSXHwWqG9n41GjmffU7gcoCEn2jfcsShyTGU1Qq9QE09QRZDJxy7ViYo/hQcQlzgotut4m/E37jQ34LIO/P9UsoqoDsrzKrU6ik3UJNsTFjD5aQWuzFgKKgf8RLoEatZLsx/nloJCK4KhNMlRIvAEmrLoIW9jG+62hETglH/FtuLAi1ZEesQjMgoQgtgUgnUA2OaQJRhBFuh5ZOmHmIE0cbEwfFdLFQtc0y7IrBpReB12JSDCG7oM3acGtWRanjRFI3WFFUhlRtN2ZAOzwJ0VYfXMUXFTb1jVCWVg1oGcUmkq4Gtyt+FAOFQcUgm1BYDhKtpchNngRRgqBheJeqh2EAVplbt0ke6GkKCUYXBaJdsZjkN8VvhmS/xbv234z9xYX/t+e3gldbiCKYlJcyCZEWJR6gxJZpQiwMMotTkDJNQFZIQu9QaMgighEkIX4FMNTeoXK1YPhcN1NLNRQCtSAIuMwn82IDiQpYKSYhINcGFLAJoz4YU7grFwwiKC1nysrWMtqxmCwmsUo+GmrPjhDs52I/e5nYhFfvxFIaIsKZooAjD1VwE0J4NtaQlIVYoLmS5iyYU0UAt3VwE0IokiNRkXjWVuj3wc9FALd2c1EmYhIjU5HYhiSnuxe4dqzihrP1NnYQa0YSOAvOCc388WVocwWQEUOJTQVTUCg1qiQKMkRZHmLH4Vvx/4VeRL4ovLEhB8huDC/UFU6v1xSLRrgANvvUcAxGkjBhkTcaioYKWJBpj46U0mVCLQcqIkQSKOVjgiGhhbLyUJhNqMcyKmc2hZrFcYxxzIYHOhFoMs0IbQaHmIZsZqOneNIVvTwPwSaliZc30JhRqHrKZgfq66ySoKzCPq5QqVtZE/OAH2cxAPx672c6FEqWIkUypgvSDGCAcAs90wq0C8RgENaJFZqjeySmqChAOgWc64VahcJ6Cb2kyQwzE1Se2yQweTctCbQT8TTkuFVQVwENkqUyPLqFrciIzFkGbloW9Pi1+gHVGueINh/q0+nX4qnUKh60CWFDO+rvAhTzKN8LLths3O7C3+lcjVnu6/JgK2+AD0p9zm5vYWliiMM1y24V46AoP/Yu5rbXZg59YzeUeMPv+iHwBlB7c4lMjVqxCVjT5oCllgHHAbK7OmIQRTMCCxEgEbqGjWMEsW5H0o02CEfpVRWzwqcnuyBOzG9GEWipDJBFtEk6OImrpp21EDur5uxvRhLJUj4lKJBFtEpnsIk0a+bSNyKLLXTShiChTp8YvEpkkRMSGn7YRWXR5/HsrdBSqygMvEr2BbLbhp21EDnhunpw+XFUJLcWK8RAlvfJZ65IVGHneXL/S+2Br0w4b5lvcmXlAPekj6J6bF7g19Dvx+y/kVyaHYssP7P4Ch9lb6B2b5GdDFIY+wZy7jd6xCZk7C0OfYM7dRu84ylQu5HKYFoxfFSIrNCTCJEh5E9hRPfQoGxilxw75UCGVkwiTIOUl0IEfZMPYcRQjiA4iTKciHWk2NZ91wq22dxF5VUSeVOty1BFNyBStR9mQrfR3KcQeqdZldMZ2M2g9yob9W8MqICKRc61bUH1iWcguotDmKBsYdgtLMB4nqGxqEKpaCcEooYMUJoDVbEFTNrh3Yvc3kS2hRvujbPjVC337vNlE/iZ1VNkjzp0/jA8u7HfiHYhRqcoVWFpr09Df/BGumCFTmjGq5AXShrz38L9r9DYc/v78DFOFQwOs1O/A45ALZMWRYOhEryCcpZfaohAnRAJ9kpZ5gSyThaEg83kcMsWsuiyBiEZOojS0GOwB9s3CUOBgxolkmGAcoIGNIpGTrDCR8AHuXt2hwN6YzxBSF8ZM4IkG90BkYewv1CmOmIWhwNG5oaR2RNTZjQT6SsDmMQHiQC2PaihwduzAnGnKVB1nHcrhl+ESszAUOJENBjXlBC2hmEUiJlTIAcHQMiExFKQ3bqIgJtpHxkjhWFGPG1vAYWSagTX3Z/CHjz/jb1y4fkGiqxZM63O8n9iR3wYxgCVMAhQ3Dt/+QD9aS6F3lWSlLMjBxIauIqEBf4CHRiEtiGj0FS1JIpmWtJD2YzPgCD6YI9KCiAEsYRImEc2FR6QrppnwunOXNlRPONZhwiRQ1xrL9ZoUIQa0pMch8Rv2ag5swnUKRjYbqcfvgim44UpybN3BDnqwwlgmIIKUESOZocAdqzrCSROKvEKNwB4GURmpSHUg9SVcMaHIC9SEjBKmHkEZMBupL8HzEZpsfIEaIPmDTUtQqSAk6G3VER6T8ZbG7gu93oRwqr4rRX6LzIre1tj9e2T6yxeOBxazexNsZjebxsbS/rLgqp9jrJe6Y6F6oExxoKtycodLKlLFCVlaTJgEKG7pbyKieQvmCJTEhj8FO/vx30ueEC4Ls/1SmAfXgiZMwiRMwiTMGUkoooEeBbQ7S23s/W+FSIJXAtqdxWN0dwQeG6yhJwLanWUeA72T4xuS84t5EDCmTuIxuntCJMErAe3O4jG6e0IkoYgGehTQ7iy1sfe/FSIJRTTQo4B2Z6mNvf+t8B2FmtFLqcVr8awvg6s/xidbf+W+daH/9vIkU5TaLa76ATZNUerojupuGDULPYsDkztcU5ENKUOLCZMAxTtYCX8dt/fNS/PUXAjzE8QhNO29lAKzNPzpotdikLJAiRBFjurF1wIJjC1EfykGydl9uhGiyLOymOK9uCE+1RMxYULXkftxGFhy8ZUYJCwhXZ3FhAkdd3ARgaWO8WPxUkx4UTfO0mRKFSHqGD8WL6Updaj9WZpMiSBLv3Zh7KJunKWJP6V/yfJ9Yc1wq9LVMzFIhiPwM2kyhZoL8bCVmuqVOInnRqtuXgsGNoE9mlV8gD7my/hw21+48FPcu9Bv9zeBg+YfJ6bOllMGOOIRo1XqjoXqgZh9TZdmC9ThkopUcUKWFhMmAYpLIEJFNN8KLg/fFVTp6pmg8pX4ltibYrgLg30qg66EADrwTrzMFNbQpQy6Ek8A9yT/OsOIWP5A0ElIzwZnGQ91T4gkeCWg3Vk8RndPjH4B7FMZdCWeAL4q7Hwn6JWw+nX4pJ/ik62/ct9HF/4SuLC/62+CV+U+f30aU5TaLa76AmOh1NENNs0FiryhYWEJLe5wTUU2pAwtJkwi2gRQogrMJ7i37ThF4T8fvCQyitfSqCqnsfmlMDony9grgd8IJDC2EP1LORztNlW+J2SdQPhJ81IMUkYMcqh8TyCROtT+LCu2KkTKNwXmO8G4F0rzSkyY0Jlw1bE3BRL9d7XGXooElzOthtqQKKheClqcCvGCS2EV4qkoKXBtPoGb34ARIgkQ4o9eyIs6NCar+hde0dUw0HDERwAAAABJRU5ErkJggg==" alt="Font7x14 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font7x14;
impl MonoFont for Font7x14 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/7x14.raw");
const FONT_IMAGE_WIDTH: u32 = 112;
const CHARACTER_SIZE: Size = Size::new(7, 14);
const BASELINE: Option<i32> = Some(11);
const UNDERLINE_OFFSET: i32 = 11 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 7x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHAAAACcCAAAAACCDXBuAAAJcElEQVR4nO2XC5IbOQ5E1/c/tPe9BMCipJJUrfb0bIQ3BWQiQZAsfdwx8+s/P4x/88LfqX//Kt1hxxxMHQ1dxe9f23Tt7AtbUBMmFrq+lUt4e+FvMpcRGEhowC4QAYqfgMVtGbfgRaz1tXiEJA4sk2KeJuYZWNyWcQf6Ki5WsEniwDIUzIHW6d85PDUx0A7WaF1swuksVBOkmMWYc2TJbMQP+iIqhUfvgBa6jkAEKD5FlswzzNEyqUVQeoM2LTfgSfduRkJ7uxoncCHJuLoQE7rHfMAFN8Jkin8AdfaBzR/VD+GvuHC+4HzQfL9YWQV0l6M4nFo95RFsbHRVk2xM6OGqpBa7KcAoqH8kh0B32Hupm9aO2msWFadLiBZBSagtQ/fS0JnQ444uhhJ8GgqdwMqEFD8FSF4SIsuWB6cXRhmuK8g6kMk0AT2IRBFWqEMtoRtUB96GQxwQAVxAaQqcK1pzoyUb7jvl5RyaaqOCV4jq4Cwkc6MlO2wd23VA4QNLANkEkZBMqC0GCIdu2SNF2cJeU+ajgsF6T6pd+ki7ERKMCy+tw0SPgVVZEH8Inlb0Em8H/jT+hgv5tmE+flkzWksj9z+ofFuQDkuN4ClKLEItDjB42ESnjBikDHbp5kEAJUxC+BbIuLVhtMUgZaEudyIHAbQiCbjMJKjrlLqQ96poDVIW6nInchBAezakcBfEy9pmLqQKab2cB6IIVNaoLFpIoIufhsoFFFbCnRyMkxBbKC/6OPfbpgELdblDGK7mQQDt2VBLtbwBwan2EK1BykJdbpduHgTQiiSIrMmWUqsigpQRg5TBJscDN3USJiEivX0JqyhcVoObJjzSVwAsdRyEwys08BIGzEiLI8xofhT/v/CLqO9pcOsK9QXTVeuLRaItjFDxrZ+KQWaQHTrQAqoqDmbjqTSZ0KkYSYAQogWk6umgyu7cSmBlQrfSYRmxqBSjKSaDmu5NtxJYmdKKzVlG0CEwehSD+vSPvTAuXx5CAZvSRP+dQeijkXJVgylGF7IVRDiLF0WQSjKlCc6OGuCRRVctB9wqOMYDQliRQjJD3FSUTIBwaAnoquWAVwjE6xDUiBaZIe8Jkfo0Q6YYBVUWb6iNoL61OKgcoIbIUrg6tW+RE85YN6os3nDjH1a/DC8+DrlxGuzyP4V/48Lvf5BfgZdtN27lYGvxNSyztQ+sZn5ep7jrrx0HjpaVGaziqPqnGxzVHe76J3NH66jAjRkczaO6w+rzcWF4SKWd4lMjcSTA0eSFxsqAwgmzuTozCSMUB1yigcAtdBQdTKUj6UebBCP0y0Vs8KrJ7siBxx1nKxahLLErNkwS0SaRyTZp0siry4gs2u5iEYqA1jo15iCRSUJEbPjqMiILLe+dQGdRoaPgDFBccpDoDWSzDV9dRuTA57arxrXQUuJIUC4G0rVlM4w8b979aC6BrU07bJhvcWXmDvWk96D72DzBpaE/iT9+4f5O93rw0Pgm5nu8UYgojJ5jjV3G7NjVH6oajJ5jjV3G7NiVJBpcXZ+0TH04pLimIcIkSHkTOLPU0L1sYJQeO2TKaBxigDBEmIS5Ig7IHhFoxGoM5uwWQ/RcuTBEmA4jNWOx0RPZcPYOA027sNRjcHQmVgFaHnSBrXz+dGVWCRCGCDKcmSphtIKFbR/2iS44vEBNCI7xBhWRsxCCUcIKUpgAuO486IJ7F/b6IrIl1ODq5fZ68N0LPfTx1Bf4yuwfwScX5gf0Kbat/YGW8ElFhR2zGSJMwsFMosoZWHJNOfm/J9r0chQZWJjNoUhFShHhF6tsiA+JUZCe8xQECENpiZL9I2Wpfng8qG08fANaNRKMgvRYLYEQkqqbFkVEI0sVZuwd7HEllRgFGWUN4SIY9Qhd+8DOcpknMkRtWYYdCkzz9YUUhECIaFB67KbepkFsYSvYsNwoqB5PhVatwckI6PZSqzptZvCwGKXg5VowCnoiIjEGI5UiChGNVKFG79oQHxKjYPUseeLD4iFQejwuYHC3Z7gZWcVP4S+8cL6mXin7CmcT3StZZwWUt7+B+ZGVFDfYmQ03mN2jousWitJIfuIEKG4pvgH3zdPsqBacgEDxnRQcMsDOhx64cCEpI9CS0QhZ75zjdGniamMhJR3WU96iW3UKTt85EUqHI8hGvKciZRqpIc6EKWukMBsGHm+PIQRiH8EQAkKFapwgCxJHwafI4eSMjFKEfFHU0DtkVFq7nmJNTDHsK4Uc+IhPrs/nQTKeXc9xDHCcVVtdyu3CF+CYmqeALyKj3oRQ+Myasq/B8Cme9V/ikz2Nj+77zoWfgQvnSUv5uprxFWUfkBVQWiPURDRojUj1o7HauMMD1F343ikZw+mhgwmTADsjFpXQBfTtbkGGPgIbc9oF2S9ESLafSi2eC5owCZMwCZMwE1CMeCWgq0eZQy4Jz+i9OD30REBXj8Ixqa4I0ykghKQ6FTBTD+Ix4JLwQkOvBXT1KHPIJekiFLGA7gV09Si1sfe/E98qX+Ji9FRq8Vw868vIE3yIT7Z+576PLvwW1oX+MfHRTVFqtxhfsUNrilJH4TXZcqC+56zXrBVY3OGaijTcGFe9xYRJANkMckitmF+EW3LCW/hkDrKDCJEyklXKF8IQpRskpN2jGKSMdITIPCwCv5E9fKV5KgYpIwSU77BqXMjFV0Kyg4Iy1PWjmDBhRUBsla3RULnnQrKLgjLU9aM08Za8x5oIkbCEtHsqK4ZsnkqTKVT/E6M0zAeMtHsnPKsCP5MmU6hc2G9YJ2kpX0mj3SuhgE3Bg9Bs83XMMV/Fh9s+x//ohX74fwgcNN9GaZ0t4ytM4h7TKnUH3JMhULqvdQUWd7ikImUekKWDCZMAd0yYBMXPglvD1yT/pJd7Lqh8JgTwHPBW+LfLq4vnMnQm9Q6/Iu4CMTjoQciZepDav57tjRBJ8EpAV4/iMTztRfFWBPtShs6EQyCqSyIj2PeCngmrX4fXfopPtn7nvo8u/Bb4aBM/Bq7it0Ahe7EpSu0W4yvuMa1SR+GZLC5QExIC96wVWNzhmoqk2FBuMWES0SaAEjHol7DmV/EKt0PcWuyfJVbgd9Iol9PY/FIYXZPYaH9m415Ibpo/ARGaL+Xm6NHrAgkKW4j1dRm9Lk3xoa7PhbenHC7a9po0sRdKk/JUSGsrJttF6dCYLu6lNNohHHMuggu50knhhR+iz3uFjHCdYhGS/zHwznhruQmU+y98nncV1eCFJAAAAABJRU5ErkJggg==" alt="Font7x13 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font7x13;
impl MonoFont for Font7x13 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/7x13.raw");
const FONT_IMAGE_WIDTH: u32 = 112;
const CHARACTER_SIZE: Size = Size::new(7, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 6x9 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAABsCAAAAACwMeifAAAGMElEQVR4nN2Y23bkuA5DO///0ekNgJTlu52uvDSK2gQpSq5LZs5Z8/XnVN/aM/5F47xv+v5SKmpTluIbi3CSk3Gr768x5gO+LRScCMPboKhEjHWswwfw1nnzthIpYYtCpxoyjsTG2MK3coznQArBtuSOEyMMhSw0eyocsVUP+TkOQZ2ISlLylhKYR+Ld1rJcRT4mdGIK2ypLIsZClSQdSa21FVfS5oNARME8bNkaax20+o5DHc5f6Hj+sPkp8cF5OUEhni9Hya3yc9++eCnO658Dp5qXqQzUsisizByZKXaaDDOeI7wkZQ/MP3WI0hLqlHxYQM7A9BzhhUbCEBIpYatFcJizk9SbpVrL04RXU5kLtIvoJbwEAol6j8UCcgYW2QU7IQniqWB5z7gB4yeuNlAbtYkPSDed6HznhS7u/8wDrjS+cb0LbLxIEZJCkkBJuJGk0kENWBXa7D9H/mzabAMNjvBCSj4LVXIht7K6ScpLBY9kF0MWFemELMjCpHDiunJALyU9QjUJzwzCq6VAMIGaHnV4ISX6pku9SASATjJa4TbQ4IhehFbCVssBII63jlGBxeNEipCkbZS+S/rygksCV5d20OD1y/ofHuBvim9OnDxWhb7KCrChh8yUKL4Kie2T8BI6ACQMewQJFCZ7JvL2JryKoCMvtwQnchZaEuElUQlQJ+KpxIYCx75ebqGJBkoOhzjsBmfjR3gJBE8g8wwlP4XUKCIbY5EOkOhj5Yl4OxgPVef2XrR9gyDRG741VWz/gn7l0ogPw/2/+ICIB0xfzWSlqVxsuSlpQd21l5oZkBZnLWV+XGuYoepU2mg0+cbG30BSE9Rh+7mdVE8XqJPUJy3/uqY9B4IOTeWAywovpKQJZMv95CloWi6WQFCxHBZGeCEl36mkpRdpBM3Ib9IovyYLymMVsgirpVCHrJWgtXxFD6Vjk/x2r3W3v5Le0Vq39+9O/EzLcxZX2taTdrOnyuTMRZty1nb0XJlcM1kP8K9PS6n68QKdEQCug2HIBppShi1vL+F/ECE7qo2OvGhpG0M4F1Zc5Lk5rLqDMuGG6vYUCq8jLmJOYEMpu7bIVYq6VB0WQYcWgBRrzGKoW8M8EcPESts6muaGeSR/pgd6NrVXfznHmnYr0RsOP9rq9oKa9y9BUJJp7MWGwPj+f5O1x2LXL5tqBxFWO3q5chqi9t+7FKLUPFRHcZBMrQ4FmcoJZZNQx8UsNwgpRK6zg4HUvUpYwiJ7VipTx0R3CClErr0DsZCsec1gkd+v5ZqJ2nQpA5Cz4f0otU/Y6pSMVnPJGmSylEllIE27lYaY0e62Txvs2g/0/sRLfe4B/pDh+K2oytkjs/ddWHw51RvKZijlUEhOQpg03Vr9WobF/bUx1IdoG/JgRWftE1zBJSyqQoT1bi6ZlNpkgDSznw/JSlAodkI4RR6g1Vp7Kr0LrCax9izlyBU7JIbwkaapSHBST5rZ9CBSYY+Z/q/jiZhkjkHxWLXHw3QrJYvs5K9IUqf9Shrf6qgnuc9NToLU+YXG2UfS10RCzgYkG/JgpeokmR4lLOcCCQOR6VYxiudze1IgiQZMluLMqU2Ov2LNAwXuhAztyIZPX5JJBvHkU1ba0qfJl/RT8EI6B+ztDX00F1xRGVOB9qy0pY/mgivmTfpHxpIOmd01yS+k573UqxM/uH95AF8DXgslmd0XW/JaKMmsGWPR+O6yS25AsrFWDrDTgMmSnUFiUFcs4dYZva9UxVEAyDgjNvzHcbxceBNA5DTuJABMAKXE9TGF181b8RIcQOl5gGcP4GuB6rCWgiR/Qu+TPE864QAk270Sh97o3fQPxAPqLSWZ/mytyUbVSAqlds6GJePKgGSjfHElNwyo3KQRdgOSb8TQv4jT+bO4pFEegj2NPTl8H3lhOAV1cE8Suztq+zZ4MYjhUN7XAXt7Q6I/+AUBVDqnsSf5hfSWXurViR/c7wf4O+R7Nqk7menvVd2kMJo9opruV25AslF+LTcMqNx0Ayjp36ao6ls9nZsHcf6xDfkrRh7liqvQiA2fwOlJcErnZMSHofU0Iq7HPw6OcQKoIMmf09DH6HRCI175k+I9JBD0J/is/Fm5WpL/C0WTwbi1b7/TAAAAAElFTkSuQmCC" alt="Font6x9 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font6x9;
impl MonoFont for Font6x9 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/6x9.raw");
const FONT_IMAGE_WIDTH: u32 = 96;
const CHARACTER_SIZE: Size = Size::new(6, 9);
const BASELINE: Option<i32> = Some(6);
const UNDERLINE_OFFSET: i32 = 6 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 9x15 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJAAAAC0CAAAAABeEPpEAAANMUlEQVR4nO2ZgXbbOBIEb///o3NVPQMQoChZspTE7+1WenoaQ5CCacfZe/fP/34Y+4F+7cuX6fu7fYv91vEkOtrodZr267jVZXgQnuTXP/sN4346OjofD6Rf/7BGFiruBbz0NOzddzMIdHTuUWxMihFGh4rurkTEvoRd+z4GK3kvbmGOhIaOvrIMOvKACmPwGDehBScrrBHMr9SI0U63Ql+Rit5ViYh9QfZYBxktsEZy87XmQJ2bZZWItViAs+Z2Auw8j07UbbVlfK04ssZksixHHP0I96ktsbuwBwU6snIQlFpZ1kssbgY39I7yQQ8nrNHoyBoHSt+Y6xkmc5Jv9UZPekf5oIe35JvHxWwom8uDuZzhlty3UZNxS7evGTc8pDd1u+bhxRcO9DF+2oG+OM+fP9BX/MgD8VfIBv0+GWSUniReXCZGug1IRpu90yU+52Cu+i6WjizcJW1x6JCLneFI4mqt4RecL/S6mv89NJKl27H22euiFWYIrqw4gvJbbuc1Gb7cXxqM2J2GKN9sAn2SlTZquntlhBrvLLPtQFAuI/H5JpepsZonE9bCxJBVbRxthlwDtspYdTf4aNLmMAOQUXpDqg8gSDXd4hoflXTD46mdR+OVhx89sPAjYEydJN9zmGHnajxm6VePHq0Zq9GTENxzmOFEzdevsCaQQNktqFYOFcqrW3EE8RjYrdChvrMww7gyOa1dcdxuwofhwCyJHnLnnMxQc2GUSK9gPAfiTLKsEq0/Qj7tCZ7c9j4/7UDPnuePHehpfuKB/Ethh3qxx2Ck0SsYZ6hkp8rHBCo6JHNXAlTAEei1lT8uWOr8A1WtzYqj9FQ4BRzB6sFoxaxQAUeAI9j/cV0rjooKOCruhdWDcVSZ5NeSZpEIdjzJt4X3SKWnQgUcFffC6sFIISgHvxf+SXCpoA5kphDgno8DJhcV8OMSVUGTTGp2DCtyCQcXwQ/1D+EQzAP55miAo/RUqICjYgv5QANx8WDMR0i3hCkKR1AH4oEZMQBCX1NFBRwVp4AjWD0YKQTlQJhitXzoSFABR4CjogKOilPAEawejKPKpAKOAEfw4ECaFUfpqXAKOIIb16yYFSrgCHAEHogXBnYqPgYsKNPoFYxsazJiVRN9TBLSNd0BEOZkhLrmL8Yfxn8H+op/64H8id0+6mYw4Occt9cmQ7qhEiHd5F8Hlk8GGrhkJd2KsRi94LascQQ4AhzBpSPAEeAIcATlkBCD0aVz/uE4WO5HgCPAEVw6AhyBPipmhQrlR5fKujVYnogAR4CjSeXD12qfX+sMRmy2JXSMtQc+Fbf3FJ8Tk02Yw+FLlaAcbsLo12llPAsq4KXGf4mlJocvVYJyGGH0I1ymlfEsqICjVFPxxvv7gpWMadC9GxzpiDNs9LOknkeh9NRdr+201iyoVh6WOPPoO/WsQJgCHMEdnweqPwRWFx6WOPPoO+NBwo+wg57oYwIu4WJCG3VyCsw1aHrR7cRpeL3pfTzb9ujzwLX4H2gb4y/VX+Mvf/wtP/JAv+sH51vkLKcTnZaT05yfw3WwXF1+EBOPS6d7LqjLxx2yrw72uStrsOYT66U1X3F1+d49+9yVNVjzifXSmq9YL/M+M+AXb4U5SfDXcUIm7MLAiZe2u2xSkauwOfP+bhINjgzGDW7IzgS7juiME8pj3TQv41p7MKZymZ6GtzI22PG0FS4gwFFRAUeA99eTXGbtHoy+BlpwHZ8CHMF+oHwId45rqqiA1xb2ZCVpmrV7MB7HISfiU4Aj2A40RgpwVFTA0WBEz2is9erB+M03lJF3WiSCXUd0xglFx24jrB6MKe4Wc3wKcATbgbiHr5ULfkEkyj6DlxIyYRcGY1JrfZtoVvl2SUeg9yXSe/is4ScyjL3Ai9tvqQ8sP5Fh7AVe3H5Bv+orvHR95T6v7v/t/LED8bbyWfUtHKtb7ow/Tp0DziG/DjrLkR6z3PIt5v2ngJcGR3rMcsu3mPefgp6XNPB0fEPpXqCzQ5FY0RLSE6iYFUeAI8BRet/lbeBAzv0M91O5juiuynpSHuumWXEE+lplUiFPDLWGY7SxPBGBbmlo0DFtWlfJ2G0YdD8+vQeEOdroZ+kIdCvkpWfRo7RpXSVYPDYbjHTuZ5ZnIdCtg6xi3WJ4OlYydhvWLjPdhh2eReUyortKgupxjULpMD++/piW8jFJzYy3YcfP8Bvj1XT3WcFJLiUaaLWHGE8bVc6ebKELiRyH27DjgWgHp+V3uXkMp8uoLozVLb/hQHlC7Dv8hgP55fPcb/LtG38X+4Heej3ebL3H/oDb5/H611ltWJ3eO1xbDPqmceURY7PdZH6AH0AhwBGsbkMLWWkUKgjolgxjzRI3ek+3Or/ZgQV7m7D2zdDohMAK3ZJfgtu1I0lfYFNf6EG6pVc3YeDHL2RDmxIauqD3YYMjSV3Aa48Hqx1ZUcuo1mnooG6lUXSaGYGDGjVsdl1Xw5GkLuAozE713jEasEYHfIYTbEJGg8R+oPtYo8GRpC5kT9Ghmt4PEpdAQwcs/CamQS65SJAjmVnFBkeSupANiHS6LrkgI5w3ZR47YImKGSSLWLNE6Ct+aaacDCcezOUMSwrj9hW2oAvOm9fM1Vp2M7B/LJrlIpaGPsYbjxrHOH/L3uOTz/oI/x3oK54/0PiR+c3kU+aPZX/osR6BOFPBXz84DYu5lT0jNselGY4tmVkEJNVwBDgKow9c80sBv2F+hFctDcFxaYZxaeCCCZJuRxj9Bi/cORBzDOxWHEE5zHBOVp+XFyzEWodsOqgljij9ljnliR0ZofRihiUmWAcZtQMBrdRS58Pqfw4a0yY13WGGUjEqt9mjRIhNapbPokzVJ70hTqtw2sXttSaMyN7ItdsJOhdrZaCgfNDD+9SGclqHr2EjCstdHTJBoduz1G3ltA5fwj4U6EjyXoBlCbb/r8PXCMvkljwkBk8fiG2o4ObK5VATBNuBnoEzcOS62UWF1+AAOI0nUZCJazrpFR6fIA9+kxef8OhAXEPv8v4TDj7xgnzE9mX1ottLbPf0ottkW/eiW+NimXQcP675mo2j8LrnitoQOs7n4Nbw0PG0B08vOti6+vNJqZjFhfyFMwAdckE62KyYBd1msFkxCn8NPt97uBXB6p/AB/VnPBnqDWMIyoEdwhyHq1CJfj8QfaQVs2JWzIpZsS4E5TLTg7DEO2F5LgIcAY4AR4AjwEtQLjM9CEu8E5bnIsAR4AhwBDgCPC+Yu82NoyweBGL3e2E8+uUQQbnM9CAs8U5YnosAR4AjwBHgCPASlMtMD8IS74TluQhwBDgCHAGOAC/B4VZ4EJZ4JyzPRYAjwBHgCHAEuCKxtM1uehAq0e8H47vwSxv/EO8/ynf1/lMmH3jUR1/Q9bO2r7gX3Z5h29qLbhtXs+1A+VKzK1Z07H/Cjj2jTmQUKzrO23FLRt/hBwCnZQPZlFx0sHX1HlLqgAt1iVhtBpsVs6BaeeB+FnwAXvYeeURe4CvkrmCi+kA6Ap2zetSngvvjiMIR4AhwBDgCHAGex/gcFxYjQmJ9dQa7jgBHgCPAEeh74QhwBDgCHAGOAEegU2j8d6g5T8MV4AhwBDgCHEG87qMdAhwBjgBHoK8V53XgHiZNXwU4AhwBjgBHcPIpwBHgCHAE8ePLAJ1CnzjQfLSNQoAjwBHgCDanPAUJo5ZNiMIR4AhwBDgCHIF+VLkCHAGOAEdw62KiepNmceKe6AhwBDgCHAGOAPfOCnYdAY4AR3DrYqLYxIKWFejHiPoqTObkmUBMKxfesFfQhzge/RafecoH+e9AX+GB1u9+525X5Efviu2eXnSbrOvO3QZZHbNO/MR3YINxVBxdssw7jufo1nDpNLYQFJ8nJuluszSPlJCCGlzSO2awWTELuo1us+Lo/sP/Fhxo/rJ6MdQvMgK8EGwPgiW+rfBkWAU4AhwBjgBHsDrcCZZkIc8HvFRUwBHgCHAEq8OdQPGu4FvBToUMHD0IcOy/Duy26QhwBDgCHAGOAC9BOTwTYOY7oZ/rz+erYQpwVDwIMPOdQPlC89vpxcDfsnG7/UPB+C6cDf8Q7z+K14U+xgce9dEX9NFnfQQOxAtHK9uyF92eYdvai24rFyNwON55erbFio7jvxLmnlFXLPOO8/bjix99wBrV4w12Ps4BkIsOtq7eQ0pBt4U56WCzNCR7AxLKs7P4Pn3C4tUn3exnzQOr+5vOMCdPBWI3ykmWmhWzYlbMilkhAav3lJgfjwS7jgBHgCPAEZQXZr6mvqRZMStmxawYd5AS6FjfryPAEeAIcAQ4AhwB7vOAwITHV4hZMStmxazY/hawTjoCHAGOAEeAI8ARlBfkqaxSMStmxayYFUfpnXQEOAIcAY4AR4Aj2N1OofRUzIpZMStmxVF6Jx0BjgBHgCPAEeAI4nzT0l49UL5bMZcoizSwU+Vj8lQgdqMyMdTpHobq44d6TIyfgYe9iHdYB6y29Z+DTy6zQgJm+xvwDcpnc4SmJ/8HWhP0POOkzWQAAAAASUVORK5CYII=" alt="Font9x15 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font9x15;
impl MonoFont for Font9x15 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/9x15.raw");
const FONT_IMAGE_WIDTH: u32 = 144;
const CHARACTER_SIZE: Size = Size::new(9, 15);
const BASELINE: Option<i32> = Some(11);
const UNDERLINE_OFFSET: i32 = 11 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 9x15 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJAAAAC0CAAAAABeEPpEAAAM80lEQVR4nO2ZCXIc1w4Ev+9/aP3MAt7SPT0bOZIZYVUAhQTe0s3hSHbY//zvh+mzL/Qr95V/UYej4yYqAWz6lT6etV/zbNpo0Khvi4OHk/R4KnEE9I9FY4IRDIM4mrDRW/LY4aADRSU2wH5JX36hm8Gl3HXY52AXz1d5h/yy/JXBAmlFNVhVFeMEIOFPlE3EUia7GBDWuTM9GnVpnzRzLhWo+ki1x5yq0SYGRAAJUBd7PrClXonC7kFcUWLUup2gnElO1Wipz2UoA27hNVJqMuVsqJktqUDVB+otsaGeLTEgADBU2dPYptU3UQhAwh9p7Ci/J3YRVUOVdF3Zs+SwtKg1BzeHmGTQpb01hrfiHvfWBn5lEH21S6Mf9UKcO64xcDKPdHku3+Kp6tryO7pZPA0OzZ/Q6fk3erz6eT17n2fLf14/8oXmxzigv/mpAeViBjUSqRbVSNngUt6ztDpP0dBKlEBXH11ZhrI4GturzoyZ1zqvjD4Vq7Mm5jOty1FBLRajgvlXAS1xAvxWt/OepGCH8+WlwVVxogpVYDqUThtZ3XA0YNRd2wzkAba7R431eEo9KG0BB6kRDU6piS0M4pYFLil7NbqqAe61210dySyLIG7sMaqqm5IbinTU0OWkMU09nCxHExDME6IeZiLfczThqKvxmFWtn8pEXWaNRmM1Q0TBheerLlyp5vtPWJMpGlcpqNe6VI2pgGbGTLQVM2aiLsKihtbqi8wiivKyUlPWUO1yZF0AjdqTW5AKxEFq6wrN36d6hlr0RC9u+7Z+2gu9/D4v7/tj+okvxJ8AC6oP1r4nwf7TRu0B2NVZ0D1wPAMqEh3Y6KaqihMFmnsJO3OO0hyMSG1DozZhxOjaI9HcDPEkx54CILtKRkyCe2ZCAPzNW6CbatQz7B6JI6tTvEfEzRgt4AoeyljIiEhNRgW6qUY9w+6RSBI0Ep46gid3C+LSjBq5yW0ygAr0sTYqgKmaZAbrkehSDewU9TZqLs2oEZGajAp0U41awBPrFLQ8ElnGlZ2i3kbNQ5yIRZlkjQBQgW6qURtwomB6JJIEjYSPihOU/aFNqAAndnjlS40TBdM5SIIjq1NVccLaAPW/5GtEjYjUZURqGxq1CSNG124hUndDVXHCiktm9WNiwyZlQ4dcUk6K3GMXZeQeJ3oGVLsuwxmoPo4TBRoLiZ+lvy/0TP/RF/ILe3zU7aTF9zxyTaRa0KT5h8PNTGlfAjKitUNdWt11GTqcX5kJUXDrxFPAUSCGRo2q8a2tQ+s8cQml4unECWbGzFIohkaN0mjm1LqRuIRS8fI9a4JxHbkAZaUdTUBh//FS1uKp0Xb1nNAoQLQs37Ki59GEQV3Qoh039V2oAM9ryCUmF75lRc+jGxh1px039V0ImGHDKCq8cV87xf2EmAlKi0ZdsOOiXXWXEnKnAGFR0Y1naxXl2Up0LGjRxhMOqsdH0sh0RMH0FBNV4YWc3N1cWrTxhIP6ooh76XuiO2BEginUHkDW6UTBdEoE96A0mlGPupp9Xnm1w6POk/SIf2P8Yfr7Qs/EC11/uf4t+S6nNzq1U6e5X8NtsK+e2SxxaOAdZXkdUMdu6Ti3M4cesdna8FoXy/fOHOd25tDOZ21rG15rX+bzdMAZKAvUIoAhBFiEFCqqv07BzEZFIMxaElXp7ZmyxREYIIZYjLGSnWnKKmOVNUYpLpkAnX1KJJMETbXxEfTUjJLgQcyIHVABTgxQNHCZWTS8JLu7J7bxGbMtsLQ8htYaAaACvLfY0gFVNLNoeEn2WE9s4zNmW2Ap0ZQTO6ACnACigT5MrH73kkwSNNXGZ8y2wFKi0VyDumurjPWkbJYBu5fkkagKPqPamfYtnpgdiVqgFgEMIQAk06EM4Or1MQHazNhYgmcwiLtGIb6l3NV+VqaxN/Te7lvV88rPyjT2ht7bfSE/6XvXZO166a7e3P779adeiA/r8KhzP3U9/bjmN2lCU16ssLToofYjX9E8P6EIq2BQWvRQ+5GvaJ6fUKTlQ5ry7cbEmpYAM0A5gjLAdOIpkMoOTNlgo4M4z0IWte5iLDEV2kbBiRMcsiaqarlatOOmdSNRkFSjosYq+pYVjrnOgrWfClq046a6y0WiIBn5CLaQPaqib1nR8ytHt7DRQesuoiC5lC42StyXTfEgIY4JEI9FkyZsdBB3sZBFbXWoalxbiemjKMeVMcYaXWnihI0O4oWIWrPSUtNmgNKIAjV7JD1lZHVY7aEiBo5SNtjooJzcdGq/rPM9vNxhcO6nPv9CuSH2JX3+hbgCffmWLx/8XfrcC/nZmtIb947NVkleqgvvq9bLV/Eea7+Qg4wEykO5J7sGgI/kNrb0g/oLV95FN6eqKR/FSgAn1bC8tEhtK/n/D/aVkVCpqpZv8j8ksJXz1lplEwGctTYPLVJ9yk2h7gNEoFZQDShzEvEM3qWMRZdwIhCJpd6HDS1S4xibsiRFnkjCWUG1qJtTfT2WzTDLBAxF4YjNeJ0oLVJ1Dq8dbs4E4uQgfMnenOIZTLQhlgmgJOdmK/vYSQwtUu6N9w4It0acxK1LbjGneALuDdassE4A0YZudonNQ4sUa3FKMIZSY+2qid0FrRqXD9ERgFo0uLy0SPWKz4C6HNVb0BWpHDtMnG1vtOtm84aoD/Un6Kepqmv1FlSkm5s8dRg80nnzzmex9/FyVtlV8Bl98KrP6O8LPdPlC/G1qPkEcdBT3d86VzYY1LJjmApUGX9uJoBdpzzUx05ipceLSrNfsJ5AIxFWs6g0qaHLEleq07Q0NlOJVUM4mrBjiLCaRdEtnOULjcecNcZU4gQ4mrBRyS6XxxWTbdOGKq3m7n7KjeaYTUVMiFQ7jzNIIzpBktFiQ8QyGbFvcimt5m7upKOPL/V4FyPCEiZj/lNVp6m54001oqBBqVPZ0bYuSpmir4FQBLrZgWaywIRMxy47FBvq2QNlR0wngJtP6FZsJABxPPQ4GV15awzvKztiOgE8F/sIYMcuVsISxlu+qFqTC7GJ49nDYQJ4KrYRAOLwPI6jnqTH8FYW0JpcKJv8KorrMW+pTtVNJMqkB9A7ytH7erL8it684NETWSOAb+nNCx4+kQ/9weqL6m/TUDddUvOFWXJ0rX1l8KitvR08aotmmzTyowasIecmBFivVDvUIC+Q7M2yaNB5y+CowWL66NS4CQFWiHQ0Kqotx2rGzCK1VzNmgu/JQ8r3kNLHPiMuGle/BbwCkUrnp+YKegE4cR/cVBkzY2bMjJkxUyNSuUdNegAb3oO61oZ4D8yyaNID2PAe5Oo0xDtQApyoGtE9ADHlPnBj6V34cZ/Q8YXypQ7B92HDe5Cr0xDvgZmmzIwewIb3IFenId4Df3+0llmlpzA3XwH4bfl2H9O3r+JtCOBD+v5VfNbfv2Rpv8uflD8uqVPddEl1zwO5ZaqbLgddzQ4vxI+KHGxbG10TrCHn5lm+7DZv9Jhga6pRT3KDYpG7xqZRB1jMtUc3N82LIB01WMyYiWapI6iwN8S+J68w39Q8IpCxtL6eFR/NK8BnhvwPtgDDAugl0FBq2f5Cgt3ImBkzY2bMnEbUaxFchRP3YOaxM9mEZGVl0huJpxAjLG+8EObTkF0nwdAVIAVgSEA48RTiFpjfnm73BKZX3bPcOddqeg1egrgF/uILITA9ljsYOZfxMXgJYgQFw9M8g+mninlTEWHFJTNmxsyYGTOXjYyZMTNmxkzU5VgxNygrk3JHlJeA3xPy66yqIZ8BlDoH9LDfoQ+pXuz7+tA1n9PfF3qm8WUuDd7r6cvh6FL7wuBRh/Z+8KgtmjUZxNc9ZA25YEKA9UK1QQ3yvGRvFpUGnfcMVns1fXRq3IQA65Vqx7GaMRN1OVQzZoLvyUO/U9zuh2Z9C6j8RQZIMClYn0DqfeBzsnj3O0DVnZCxkTEzZsZM1OU+mKga9DrgFTAqwIl7MB3dA189+hJYvQXVgNEDILf918BuC068BUQxjl4BNPke1L38DG8D4VfKEjNjZsyMmWXt6B64G7d5EyhU70EvQupjAL8tXg7/kL59FW9DAB/S96/is/7+JUv+Fed388fIz5uAeC+q72YdGrxX9zyQW4YGj7rpYqS4XlMAktfeQa5J1pALJuqytAaDPCbZm6jLlL2pY4of3Im6qubao5uoy6Yx2asZM4vUejrV9PdFMPmMeOP3dH54fakdYuN1qa/DlBNm3EPBiafQNgpOyUV+XrEMXgdORdLKrBNPQUPbqUKmaxPxDpSAXFPQS8+hjCRqsoj4AuAleMZsn0IZSdRkEfEFwEv5dPC1RDyFMpKoSbqZGc2MmTEzZsZMtBUzxutpZsyMmTETw6WRvpCFhoKV9+Q1gKz2PQHrJ34RQBJwIn5E/vXxpnh82xDN1v1B8eC29ioa5d8Qvx9+QyRgCQT/D5RGzDxHGQJeAAAAAElFTkSuQmCC" alt="Font9x15Bold font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font9x15Bold;
impl MonoFont for Font9x15Bold {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/9x15Bold.raw");
const FONT_IMAGE_WIDTH: u32 = 144;
const CHARACTER_SIZE: Size = Size::new(9, 15);
const BASELINE: Option<i32> = Some(11);
const UNDERLINE_OFFSET: i32 = 11 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 6x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAACcCAAAAAClo/GGAAAIjUlEQVR4nO2VC5Ikxw1D7fsfWn4PYGZV9b9HO3aEwtgkCIJk5mz3aPXvf/0yfvLAX2wZH2GPuUFwKA5QcxCi6szvcJpSEhxSHl4yRaBz5Xc4TSkJDsk7r5y5pBAgc0yJh6CxO2hYeGcRh4ZOtKCGm6ehfAgau4OGg66VSVsWGoKsv7BMUW31b/4MLIo1ZOYO7XKxtDkjlQ8Qm1OkKrp2YolEgDVI5pgSDxCbcwfvw7evFLlrUFk+g9lt2TVurHvgckhg9TUWn3FynM8iifgWbhpXXJyjWPnX8I94oJ9XeL4ckWpr8tKmGPANWIB3zswahaWGPRjiUKlgwBhycWPjKKIkgkMS5saidlUyQHBItGAKhQpRLtQTHFNCmA2IZUmmFggOKWYUHRRpSwsElMQxJYDJKdgW0hC0MSS1TCr0jTNSQ6y3acSBzaxPoW5xDn1541qmki6LJZFLgaU6fY5VNJ5sLKidnWmyrNYhKRFhQ+KQHEBxYiZCbZlgkiwOpbBiZIOLIGI1Dq5bDVkc/DhX/T14z1O8aP0Z/BMe4OuCSZWT8k0PD1JADpDQbgkd2XppVAR8FDeCo+5auU6Vnm4d2ap67VJzSEdRDU4aeeEVIjMUHJRVQA1RIzjgXDjashp54RXCGadat1qIu6xdQI6uEjTD+tGoVlMjW6fayNTZouCQidVTI8saaFRCxEK3lnVOk1WIFhxKomU1crhOlZ4uDgUHJY8D1KijYWHnqtst16laUUe+zpSN4Ebk8mHg8vIdIKERMFVEUS3TQGTzN/H/B0Q+a0KcdZBvAst0r8GUT1gwypUhEREK0nX0ls9R5569BOYkUwFFB4J0W59ZtGPoLGahQj6GqgSZY6JgSNHpM4t2DJ3FLFTIx1CVWHmQedm5oIKheok4EsmafOLQbVpgzVpeF4jWO2yFJjGEOzS8cnmDeWs5mwpgOsW0RBIzuEPDK5c3mLd2S3hd+VEUqiPApBGhA/MA9mgiWiu0dcOyyhDMMyEiQgdOxU3nc/jOWj1p5S5+D/+FB378yXwGLj9eOFRx1HyeI5c3mcRBoJpvcPFmdGPXCA4iEt6ZxEFEwre4eLcju97iIgfLWfmK5fEXzM/CqYYttiarAZ4OFMuSo459OBbn/+lb24GiI0cDyh5DN6sknGgkMRhXr7AGWrJGtWAaC+pRJQCJqSWJokpO62guPjvAQiYpVwASdjSSKKpkOwChg2eyAUtoQHsovAKQeiKJokqmEaBikdEI2GJrcrSSWLrn7KARMAXCxQ/BSuIAFQfxAm/aF+QnIg7gXI17vOv/bfyRB46/yKEWbsofYb6HpjD0F0lMegCG4E8wk00yf49986QHcPIzzGSTzE/PKfw94+j7LqSOAPCUT1iovC4cOoMrEqVJWeF0fjllfARZnQEMRGQ4aYO2o4hhgCDbCFEOC9U1OjUcOoMHEqXVdcwIEfZCJGuDkmDGAp5UPuAMR58NREeIHBzIKE2aAQVAw5PKBxhbxmkhgjQXJWGQjwouYoGm8oHVBYd6D2eNgqdGHmrhhw8wzC7xFh8N/R3sB17//Ol+/FOfsRdyBUnmJhPQvYYtVIREeQ8b07229Wa/DfM5ljNjZJnID7ZATQTHAzUZ5A9SbTg3FRQtMmQNyCwhNuziE4sBU4aTJEmcJnEygVKnRQEj7QgMwAN0q8tAg1ijJE80haCbEPgWhUtTZIE6iVgMXCXo0dVli6NnIR9byhlTs0MGk3PF6DLAknS2XBgL7JwrEIC8btuJPzNZBmM4omRZaiyrIcxGgXJvgzohymAZKrAKKmXYBik463ucupN+D/+wB/joqMvmpMdoc42K5cDLtvDrTmnRX65yjAVnTmUMSjhZoFHDvYGKB6QVtUpnsAYuZmb83eSy3FHnwkGavWAKMGnDPvfAGxnRJ/cFQ0ueJNtgSBKxqcxC1Z4PWA0qaSW5wYnAMkG0fWBgka5852LNQqG3NPDGhUrYg6AIUt1Bl2DHB26BTYPMgCkFCUa3Ml7A4YkHDwz2/ZkhhXgiewwAKjDFCQ5MuPwQcz8z3nDoowLLuMXyGXz6wAGmCOejI9Uv8OhWVuFP8Wb2wQPf3f/d8E/gh9ZHkvnYwn5+bRkXLCPZMQXBwdWyDDVKZcgToib4jDwMoKUMIcJQTUQiltGjwnuF7OdFZOM7MN+lV3w8YP0YjEBpX3jSa74+8JhXvuVZ5ZZXzJSxlh7xyrecNdbfcHOXAFvgwnagO277AxwPPOaVb5kHXO0FT3mmDApCfeWVb3lWXXvBTJ3pEdLI8JV96GP4w3yLbzZ+cv9PVr7DfoAfr/89iWQ+wjAugkxjY5XJa4BjAR3ABLQRNEJlyBOiNs5oHYb2sUgspOIgvwD/wL7C+qFlEw+gzuIxF9HMVrzlhEXZ8i1LS98wHyl/PTJ6OWRiWOsZd2BynHtOD1IHx0dUWD7jc9S55xWAW1UJjVDKZ5wJyYhzz0f4ZVURaUsp33FHq2/5iP2ALXkXisdcVD9jkhIBA76DL8GvyYfoZOg38b9/gE/y7cwrrG/Em8i9Dpn/HDl8kv0sNzRFcuZRPbhaOiE9THh8ySONJkEX6A5D+1iMT+yj96vwuT/G/LR3TFaZXjGRcegJNzp9Zn3g+huGuvQYXtT+lbPm0kueqbX0iFe+ZVe5IXeAJwxxGIfgrl555Vt2h90PjpPe/iX70MfgHfg7fLPxk/t/svId8vH+5jP5KryfV/pvkEhuAxlBprGxymQHFEZJKAiaCP/RTJLKkCdEvSLIr5Q6DPUWxI7Yxp79ABnsXS+RuYHXw1ic6qccvB8LM8ZBTwIoKPoZFxkc5xl3SI0wtXjN/SuYl/MJf/VAwx1I9YilSRIxjbd8DhzpjglOM2OTzjUa8ZiL6gw/ODQ7UNT5Cvxav4AXrgeqVX8SuX2ujf4P/yyZ1F4e1ZEAAAAASUVORK5CYII=" alt="Font6x13Bold font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font6x13Bold;
impl MonoFont for Font6x13Bold {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/6x13Bold.raw");
const FONT_IMAGE_WIDTH: u32 = 96;
const CHARACTER_SIZE: Size = Size::new(6, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 5x7 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAABUCAAAAAAw6+9nAAAE+ElEQVR4nK2UC3bcOgxDm/0vOr0XoOSxHU9y2sAkCH5EadJ33sef4hPx+SEv3LSkv8cacDILSzQMMSDr5YzApFd8fqyqbQdZBRAwRGFUfPigC6itKrLICtZClkJBlWwXh3GzCiQRO8E2RdaW+SiIivALMRUoUtFFEpEVrDVXmRYKHTLoJeHCYMI/Yu87QCVVyVt4G2xaddRUpWdwjEF/gIMGx6FJo4MOldItEcITHaANqynIDqbEu4ZSR5LWUKVmmUGBy8IQPDMq+4plbSxQ2VDjzHgK3o7psZCmY42cKjXLJaiAoKQ679qDVSV81EE4mDARTvwHcPRrzJMkXviqoNdXs0EzZah/C0iNhejyQuMTZTqUFIvLfgQ1zsXkpPwrE02QdAz0oZWGUsOJ0TCHDGrclJqjsEmK5hAzr0pyxJqaAkzFEI3xzYi8i1dKI5QU02N+hKO7F/JkXOOZDCRVQZkBSJWGgnCNbBqhNHLgN+EtLHX3oSAaVK6E8XksIwhQRTNgAH2htGMXwqDVjgviNO4LNQDHDuIhdJLA9dIOgT+AYS1qulCsRIGIpxEvnbjwCBWjnz1MH2NNFqp3A5QbQgMHQc7VUORQlESyLtOzXyYVWyH+8N/hgMlfAVetTVuInSwxsSEPBr7/AgrtHTFYyTqKgA5MOuEFU+Dn+49Jlksjq3DUqUvgmnYYUx1dArCUurRT+hmcFEoLkFIHpIwwRyylTGQgOWSKUamEkAgsLvtF1A6yCsy4iszDEFw1LmlpOGxIaiiRrp/8DTxH2ODsI75uvTux4dByKOF/FmZm0d7HnwCnBusviomYX8ImjzHHMJkUOZiZg9JE6jE/Qo4Z2rrRQqcXiTmox/ioovCkEIXyQQtOk8upKxI4F0kVkS6mJ8WR2w84GKz4Ht9PrYEvJvPzijzYlNdJq37ABv7y/0PBCYgO0fON9UAx/TOs6GzEhRk3cD2iaiTEDhTwDj5T6tiCB5qGABmTUEovUiQVrqqEqSKI0NEIATIGyPTMwPiQKXAkBaUWBw5UhoCZazggkcxEaDhBv8GizgZ8o6U7uAMW3km4YzfgrJFyLpQ02IMDG3oQUeoU2h9GgJu3pQjIthbkNFcpsUS9IFLhHUgEKCdyNFWYQkC2nC6CAB2cCJA9FxJUYtKqoZacsn2iZrLb0WdQiUFfgCqWnTCkchgJuPjYPaBGxRkSscWBNYMvBT+A5gnXvLDq42DJtUnPRE9DQbAKwd/pTBav5C/Kz3olOtmFWAUjpTNNOJGDmX4i1qKY5fYrUb0RB2i9Iw7P7I0mnMjrecYzJWBQDrzShBM5mOkn4ifz+3QeeyGLV3Lvt2D3j+FVrCbA+IVYhQUOohk9E+bHBCP85JV/TZkJKNwWUgYm3obAkMkl8zeUoagTYX4rLPEwHcoz8Ukv5BPpOqLp+ZyPvhPj8Ukv1IUEiYXHbs9QvZOtziW9Ek3NSFD8KmZ5Wco1JZovoALkeplSWbKYUIf6IcVlIe3lcqHSoYS88HuCVSPek4uz/YkcQuKoC1nlUSdq5x0xGM/5MxHkE3k94Zm++cnwmaj+AGz+KVjrcF+jCvf61qADTeRjBKYmTNG0VCnUg6kJJuDAmv4CU73gDXEIfqDAGc7lnjvRJGCTvyfWodjd9C2V3xP7fCAJ+kaYHwFjoZOoGOqReGYUfKEeVagM/wm2xMt/AX8KPKAVBtEJAAAAAElFTkSuQmCC" alt="Font5x7 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font5x7;
impl MonoFont for Font5x7 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/5x7.raw");
const FONT_IMAGE_WIDTH: u32 = 80;
const CHARACTER_SIZE: Size = Size::new(5, 7);
const BASELINE: Option<i32> = Some(5);
const UNDERLINE_OFFSET: i32 = 5 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 7x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHAAAACcCAAAAACCDXBuAAAJiElEQVR4nO2WCXYbRxJEx/c/tOf/iKzuAthYCMn0zLNDmbFkbSRByf7jPz+Mv/HBP+NhCv3jT9gowcQoyBBEQ++CzeduQplC8yBPEPQMW0zKd/IW2HtuJpQpBMw7kMwMN3vgW3E9mvQALp7LpgO8kVfWUKWpCYtBTXnJNVw8l00neIvouwhajgw6AxqWAF+QmPkXuNEaJC7kJJkhD8NEuF9GQYSBJvtZhxBa3Cai1r5A9nI7m3hCxVjbATL8SKo7umQXzQu8QeQcU1aMCunAhAq7KTQJqRYZhvbxDAbcb8wQlwsk/IEuSl9wM+UccUah3465fGGPp/sh/CMenJ9wZX3Awozld1N0Wl7KjpEv4BwslutODqaMYRINKo7rsPsatQmzW+yzepleJ/hrgNGl6+TbBwdYapOMWLiTQZJEfzkxJiSnwOwKdNOUQrvlFAgWdXL0OFFSVBhxRFMVwJqriHGkCo2EbpAJW/kMtXZpWOAohaBQSnzJBiMb7ifN4YtrBli+JJBJ0v1OG4wEHDFkhAcx0IBTqZmGKjZAqE3SmduU0gx4ZDQSLB+lomwEnBfeIQtXJx0CKZN2QWVwmNPFUL8HuW34GV6t/3b8Ex6cj7siI9EjRe5/ofy0aIMRJOHmX0lNyNR2I/+IMkTwNHUtYBOaUpoDDLUEKDS1yeMHgUJTBB28i10SmJaOjAgmtsXEBxkckVoCFJoi6OBd7JLA5Ko4DCIc0haTPKhnCipZQCP0MQSKAxuwjYwBGIYGHByhGQIkKc+dkVoCFJoi6OAITSnNASZXxWEidMpO4kGW9sUlQKEpgg6O0JTSHGBaOnKETtlJeRClngrYhKaU5hBNLQEKTW0i8TycNuCwirzENRZAYhKdEFtmSz8koommFJoZf34Y/z7428FHCnhW7QeLRCe5RXsv7MrvRxOBndiyiyzTupUFW7OB+iqzVaKph8JNEbuuiFvvBusEdSdBjERTm/DdWVguoXVMaDBSs7pY56k7CWKkdEuX5rSLEQ0SOqSGMxhJcDdg2p6EjaCwlG7p6NKdlL/qCZ7ISKHXQ4C0UdtFeSmL6MlLDjOygScy4ri2RXaCHmQvEkxSZjgEli4zsmHdj3ATzTWHZFyyT6IppbkEli5X3pGDgudwSXzhTdjhLbEqjdAZEtyjEoHjMwED8cjAPX8dLh78EfwND/61P8gv4LH9xd0X+4SP4QjHfDcURjvmK5yvbWCzg22CpTDicLuhMOAwX3A3/7pxm2z2Liwcw8N8wTHnp0CQkegIqiRBCt8K3wwgo7BgmVLiQxQCEhXMAVapJ+IBnU0pzQGmVS+JTGhKYV5kdU0pks7OUi0q05TSHGDcmYSXBMPspBTmAdamlgCFpgiNwHsyT5cEpksAX6KpTZgHWJt6KOnuBIpdEphWfYmmNmFe+MU5BczUEVRJghSTgTYYQdI+xDVFaGb8+S569u6cA/s13tlzh36ld7gcXuGtTb8T//cP8qP1yn6eW/D/DYKl13Drt7AO7MqzoPnUa7j/W1gHduWbUwb8LQHEarnJfUBJU09E5CB0LzvYOicuJN1jNPVQ2BWxxb2eWCeoJWK0ItPUJjSn4XZ3Lb6THes8taSIC4Xb7A8vlc8GI1/0BE8AptXZoEy3dDQ7wmgHNkgSj/TEsRVgKYyWJRpF4FEov3UU3gZIdpgwMFKO3ICtx2yzb8Mz9tv41Qc5xCX0u/jO3t+C88E3vr9ume+JQCFJzzBblO2/+OR6fiEAq1Hhin3SdEtHbwdu0eW1uHTBgwzZQGGFzt7oQL68UBegphPJIbGUkR7i/3Hn/nC7G/VyU8Buj2TItOEWbnGtWMpcD+UAGhZeIw9csguWiC3OUmvlAA+6gBNLmeshT3BNlvAWITFgS7pgjUjhPYvNEOgEQ3ilpcz1EOueJxBxmhUj+3GWGWaP2DyWduftlqXdIQM20RPYDCFmSByG20rZo6yrcbDgQa5YaSnz8TlAaOkSGVYku9DZC/hcoIGFzi6Wdr7gl7TFtVZhKVrcpkvcbDnMT+Ef9yA/bicjoB/bE6wN5xEww8qxkugvkDzwt4UwgqkU7tyzYJYJOg4QtEtuLkuwS0vKSwrOg20QzBb/LnBzrsOF76TIDrs0fOoGH+RKeMdsdBE3L4YOWRphD7aMZsbSMoGedksexGMHK7Cq6ck0xQB2RCJC1CCxWxR4wRWaIcQcjy0ypA9wfXYyRmjKzhVO6aL5Cq60OflgE2usorkbTVIVU13pBdxqS48eHJzvdSda7gJtFnwNYKVbsC0t5ZqH6LXA6zB70tMZvID7OaF58eAG9ko5p/E1Q+ILPHjES76PT84Un7332aFfgT/aebSGn3SFjwvS0B3ewIWghj1uPDuo2dfIMMMRmOJjC69OYQE2bJRH4JYukeEI3NI5eIk+IUfaH4KDc/qlQJTCBBFMxDfEC5Be8Fx4iVKIT+UwF7LuoN4QXqY5NqcfyGEuJOdp6rWAOBQxXIvmkfSaN8XqsTn9QA5zIesO6g2hdDbFHTr4Tg5zITlIU28I36t2Mfpt4c1vg5fhD/HB0V9675fOfoTzQb5w/sGE8aCGH3yFNUhD3HFManJAaxvlHVwDmPZCNDwCU7O2egNf4RpV4JYucQj4kv/FJ1OEb6Ln34Ab2ekjdKilo118KgWbybRnviGh1rj35dWD/sQtXYaUZFO2NYsPhWYvzMAeeyE0O+B0iz9wSiBr8FDy05QyobDX4qYQ6IAOrbLX4KHQbJSdUNhr8a1FFTrUGve22FbThfSRUoVmEcROm57KYNIzQeFQhOLPh+DX7yN8eOxz/I8+yGfy3sbX4B4/SlGduwl8SrKRf73hO2QFVHsOTymMQEyXENfG7Uz5ArwSKt8jSydTS1SGFYhaivlZ+EXJv13gS6G07wnUnzVNPZTs5NCVuAF8Q3LM04Ik7sRV6FLY401vSYyOw0/lMBfiJXxdNGVTNmVTNkXDJY9SD2XuvhQ3gDeF9zxn+FQw3wYPwx/ig6O/9N4vnf0IxyfzU+ApXsQoPAzjQQ0fdGXWKOKOY1LTA3gqUS7wlNd0jwlbHoGptQZRUdYEX8VKFZhaog4BlIpH4bfR7fOFvka3L/jVAN4GLKgvpPAamnpDKJ29PJfRVtMzEQhlz/CZ2JZpKfWm8JrQ0dT3ZCn1pvQtKBMK+1Bm15agFd8U3xqiUzZlUzaVdRDLhKSoDODVpqcymKSglwK4s4VPqvkA/hV8Aa63KYLOjv+r4DeaV9CV/guAxyUVnsXxHgAAAABJRU5ErkJggg==" alt="Font7x13Bold font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font7x13Bold;
impl MonoFont for Font7x13Bold {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/7x13Bold.raw");
const FONT_IMAGE_WIDTH: u32 = 112;
const CHARACTER_SIZE: Size = Size::new(7, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 10x20 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAKAAAADwCAAAAACiKjAGAAAQBElEQVR4nO2ZiZYjN65E3/z/R8+LGwGQzK2UWsrdnqMwEbgAl6RUKrnd/s///eX6XvBd7S7432qQia2OHbVGb50Nx9/T7oQ+kkwgZQ0yEkA1LdNQSxowsfOr0v7dAerIkwmkrFFJY6FCLZIGnNJL0vbdfnXk0n91j8ZcQqGhOsSbWAhIA06ITKDOj8XK3VpaW3EVxEU0RO4Vi5xRvyA1mwaTCNT5sVi5W0trJ7U0Zo4aO69ae83LK+nWQ3mhxir3tlJLw4AgKS2l5I1qzgprmcXbGNFtnfWQ92qscm8rtTQEK86kIdhobTVziZCw4IGyjliU5iIORm5rVkMAyZ0UGiqG1rKYRKDOj5R1xKI0V6mjIZAEGg0iZQp8ldtWE5lAnR+oltl+ktZpDNAwa6dQg0K20exMuisdnS211TZV3UXqaAhECFSjh8PNVZqSz3whbT3Md6+3xoe6fa6eJecguRL1VunEr6XdhwXd673xP6O+y496vOIP63vBd/W94Lv6d1ywv3MW0ldA4STEH/roIKG5M2omr3Qlzlu0lLVXo1ESNfTXL05EJlsEEiUjRqx0pf3srCFCNYmA6oJdzgmcDHTVNF82NbGnKx3nupMsV5I7MA31oXIiJOl+QFXSJMulzU6ELnQ2Vb0k+e6YStEsQnINZ1VokuXSZidC5+oZXjfqijx8d0ylaBQBnMB0pmAhpEruRMTKlZIn1cRBsw/VWeWdrOY60GWZfEeoQd0QDRXFyYOYPFfPOHOCUnknqY6x3J0WgURIquRMSBxSeNTVRPeT+6x4pwUsKj3M6n56VPFtiqNJe53PjG5AARBonwvi0oAgEWrvHEeTDqqpzUuvnhRSAASqXEkKxSWDzU6E2jvb/QVputBx7tCh5BWQpZqvhEAiCuHEniKIWAkuWAgeaK11mChUijg1og2TrWWlYVLNoNFbCDyQ5Dd5ai1zIvHPanO/x3pm7R/R94Lv6nvBd/WvuKB+8Z2lfPcoSaYwSF4JnpRucjsdRF1F/dEBJ6IQToRwLfegJqRqKi+twp1Zk4xErLyTZbbZiSiE6/lCyEbEU9uIvdmJldCecCIkVbLMO7N8rWlySIAHNSFXAgsWs0I4EV1TvJNl3pmlp3NozJ5gJhekr4ZIGScWs0I4EV1TvJNlxohQ5MdLzMQTzPQFGVCaI1JFIZyIrineyTJjRCgSZWwpM8YMKE0F6ioK4essCqUfR3RVOFlmzypDTpIoY0uZMWaIIkjT06wQTkR7womQVMkyY0QoEmVAinED3JgeJMCJlVAIJ6I94URIqmSZMSIUiTIgBZeBPBMkXJYTK6EQTkR7womQtKYzs0I4IZOb8EYUwolHhPaEEyFpTWdmhXBCJjfhSvIYsZDNTuzNmmTcHi3vRNYIlBNRCCdCNkKoLAmo4+4oIxgirwSztkX3P/pvivRwOii1kzIJV8eCZq8JZ47xd+t7wXf1veC7+l7wXfHdg3JRGCKHwhAZsSP1MxSpDgoWrT2Wr2KzOjY7sZidQJWTcOIORSabNQlVxV0NrfUY4pqkTcKJPdmIlSyjzZqEUuHEVNc4cU6lKpJwYk82YiUUilsLVmErL/UJOHFOpSqS7BgRG865aBJsqoQW3BWL6nw7sadYqTjJjhFYhmc4F02CTZWsK141zy+zEzESq6zRIdkxAsvQDIUcHakzuuJVnI+YzRp89ixaqHJSe9apyjCnJ6UhHQEtxYIbcZmeCeEdreJNaucMMrfTMKcnpSEdAS3FghutFxFlQC7VtAo3qZ0znNkmk0MCaZ9XQrOatNU8K2v0DLt608q3KT6Tsk0OCaTKldCC0qwmbTXPkkAiFscItGYCVU7CiZC0TWhBNMoBO/WBkd4CWsqy4W6hNKRjLwknQtJIkYqgIPKCXW+jY9db/rhyZ+lvuMyP+l7wXX0v+K7+FRc8frEcO63DDN8Hm9ZhxUbL7GHnqbxi2RUdGkP7GWpialvttZndFBfKiv3KfT21n6EmpjbVoSCGNsWFzldc79zP7Otd51AQQ5viQucrrnfuZ/b1WWfVZnZTXGi7gs8tLe0UCiRBoUhtUDRyKFh/gSkePQNKoVlQJlX+rzYwRGRAEkAaU5qPaRJTFR/mCTkkCCJnz5aplDpbFB1WgXZYzNjo4gS8FV1iJRTCiVB75WnlnSIKhYYKVKSUAaV2mEhDegFomXZEIbyWiVIj52nlnSIK9o4WDUkpA0rtMJFaKsuJlVAIJ1rNztPKO0UUXFApoiEpZUCpHSZSS2U5sRIK4URr8HhyOvFOEUWHVaCUAaV2mEgtleV6GtblNE/I28pnbop3iiiImFRZKQNKHSOEU3q8LrD9qhBLRpHaoEisEDkBSEU6eFpuWulR4XNWVQakTjtzjI+oD8VP5Anbs3phy6ny8PiJPGF7Vi9sOVUeHj+RJ2zP6oUt5+Ij88Nxnr6c/UGv7PlH9b3gu/oTF+Tz6OfWb82ojf1tWFrwn9J8/oGUMsSlBR9qs/F1zWMORPKbOLXyI7H9A5rHnNFeV/0zXZ/ylOYxZ7QXb6l+6MoIUK2sJFaQTQNE7cQdklki2Ek6o704xsYC3GU8paqNd8aJPdmIWDmCdLC8RKO06a9iCxM4EYqhAVJzsh0jYsM5lzxdauh8TnvV+XYiFLN4GsukbibbMQLL0AyFvFJ85gmntFedbydCsamU8c7tnEHmdhpiCnmleCfpZ9prvRYRik2ljHdu5wwyt9Mwj97OrYlntBfXsrEAdymXocB0wpZ1YUmlpYIO0SSTy0qTz2gvTtALV0aAakBmqSWlgk2CrAOdnInY8FonQCKJwuQpUtVnqlMX7et39eZ5v3nBnBR/Wb95QR9FvKPjCfv6Hf346bqnN7f/vr4XfFF8ONDmL4+k5z+AYwdHGukQGzG7793Sbs88eKXDskW9jkysNKW/kJLtmrd0uWWcBhBTflqnmnKyrdq+tp7WRo1zbXdIm4L5asyFtBR9pLIqwN5p5FX0CMgCxfnX35mYIKZWlnqSTEgz6SkKVRiBZtasYlGmcCJk1xCcKBPx0oKo5pL0ZklhsuUCNgyxiljF///QOh2TGbtMw1UEt+aOoQVR9naS2JBV7tnkCJpihlikx6mHaUaCtERDhShSEQS8BBtaEI3NTlazs03qvIgWMVVPspW0QEPQ2lWa1Z3WHQuiWp+UdeHKNjmC4GSoeyU9Tne2qUKQ7rCs2hRezPKltyDq9WRCqqRsdZG80pYlPUzOExdpjYbgTNrB7LpjQaRZueSjFeQCbSUoqOSoydk2lCo+pVpDcKJMxEsLojlXpFfEedxNlZRXp5YcNZGJVdTEVupoCE7EBDG1sjQni3wz20Y1Kw3Sosahs566OvHYjQ47NsXfqO8F39X3gu/qe8F35QuO77IpfR9VbxI8MNIk2nWH5notbGzN78JBm1XZTKjtvEiTGoKFzPJF7ES7dkvTNaOdGoKpWU+SxmWrrQCInWarqfOUbqAjjv1WzzjH9CwllIwTca8xtYQ0ia1mZ9JBL1xwLvc1duakgbKO4DFkKU1pgLTyVuycTzyqZ5xtuIZByXc5IynLiELU1NmkITiVThzzYudFNWPQEGxw3OWUsopo6QmWekZlSRgY8k6bpKwhECVPqdMtYZFw7Bhvl/9MpwwxpGrYpqr7s7zIJilrCETJD6X1Gsq6CxmCQVl68cOR3f5RXmSTlDUEP0l3R1mm5RrKugsXM0m4Ij2vwEnPStvKJGUNwV1ptYay7qIyJOEKemp6CU5qeU5ae2fStjJJWUNwU1qsIYh0pWhAMGvkpGelF6KH+AQKcfCOtFZDUOIySOeNtnusIuBnxWF6TrbqEA3BPWmXxh3xFK3UeFoPL/RwwRN65aTHz3+84rZeOejB4zWtIfiIPnbQIn16PnfsayfpCi/ufFp6zPbn0VVng3/xDIqVzrSZGUUDmZAqWYMbyAQVudU83iMgyBSx0qmWqYEcY6RDoM4LHdaNImoiE6KGVeOYoBkgS2PDBoidutXZQEyt/FDbrda4FnPEp8TBHO0T58E/EnsGBpQ1AuWsQo+oUHBKMJmgS35AbCCktacRkKO7JM3imhQA8QJhRCi6S9Isrml9APE8EamiuyTN4prWBxDPUgTRQ3dJmsU19dE48TwRqaJcmupnktZNV8QDotco0aehuyTN4prWBxDPE5EqukvSLK5pfQDxPBGp4kT0M0mzuKY+ECeeJ0IiETHrZ5JmcU11qp14nvhEpp4ZZQadU6HgR4I/JG77eX3yzF+54ceO1O00BB/W547Uh+Zzh039xpkf1faC/JD0R2gJag3egNcZFBdapwY3kAlrwF6btn5KUlpzxyCmjUCQOeIo33+ZG8RmIx0CdT6KNcjzOnSubOpsIEQN5/KJNd3rOhuIjUZjgKRjVNRq2yvSKfutnEU8UJbEh5ZSqKF/cJeOEN4P/pFYTCxyqQU9gRN3KGaZZOu0T5WXEQ/IVt5y5aNMduIOyZBAJJet08ziie7doykq/T0zrqhacU02IjSNiHf9+gX1eyPhqeyKqhXXZCNCNt3FZ+ASDUcGpC5O3KZhT15Qzh0kl20atJ3sigxo9m4RppFKb6Tc4dpxTTg3kVwOI+KzzlJo9u4Q7qT8/AU7JW9sXZjgOdDs3SHcSfmtCyLYHdm6kJDJTXbiDmEarkgwcYekSiMXyWrhtDhG3KZpduI+hVHnEIEj+tTT07tL+qlKdlruBe+QMNDZpDEWfFC+5Of02dN+Qd8LvqvvBd/V/+gFl2+w3xaP8RdiafAGdt9tbinOtZ3pqvPUpjOKASVXszlovEdAkDlipVOtU80cY6JBSJXQwMM6FybU2UCIGpwIaRxzrlolNZGJlcLWBohJ8F8tXZC3w/Aa5d/pRhUKk+FnGvAj5X2kfok06oZbIx5ROLomAqVCT5KSBkkFCuHENUmVpGtS8GZKL1FYkNMQDcRlonOS1k1XxBYyTjxHMo6QXFl3SZrFNa2PI54mDRWQHN0laRbX5Ico48TTpMHvCvikjSRdk4Ifkx70IumGHAJKT5CzpNcnR6eklZ+SLiv/tD555q/c8GNH6nYagg/rc0fqU/O5w6Z+48yP6nvBd6VvHN2R+Ct0/D3TBZdfP8CXBaa66mzwugdi3VAXnTeqZqVFquevn0hysa5sZtoEBJkiUOdVS6+RzSYaBNrnEKFaO+RIbwo9a8AgMiFqcCJQ541Gs4FMrNRpARNh11Drs9KLeEHHm+iCaqUvH++m4EmaokdT5wlw4g7FUECeJAmU19XEbdJeyzh6Mjtxh2SSsqBcSa6wqbITz1IEKfSjkCt69h7ZiBhetZ14laao3rqgjTAt6HiVplTpYy13uHbcoWmEaUHHqyTN9NkL4o5YHCPuUxiRiSaZ/BkS2OmFBQp609O7T0O0qif0p/E2CYQKI70Un5R+vi9JlymbUrmp/5B0jXYbCshJf1y6hyMUQUTXf1b6vOUmXKlUvf8HpQQlpJiT8voAAAAASUVORK5CYII=" alt="Font10x20 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font10x20;
impl MonoFont for Font10x20 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/10x20.raw");
const FONT_IMAGE_WIDTH: u32 = 160;
const CHARACTER_SIZE: Size = Size::new(10, 20);
const BASELINE: Option<i32> = Some(15);
const UNDERLINE_OFFSET: i32 = 15 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 4x6 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABICAAAAABjUaxvAAADs0lEQVR4nKXUC3ZbRwwD0Hj/i3YuwBlpJD0piQP+QJDDJ7c9/foVfH/xMSRerSGlNPNHfH+NYpCFbxRoOFNCH+MBx4Egj77n/USQKhw3y1C7avQhCwYER6iTIeWMA2k5hEEeOaIhoEhpoi1vBCrP8hOiPqBPbzh5MGdW5OtZT9XcamZ3TJOcUdYtZqeSHAuzJW9D0zbmMC55fG/q+h4U6SfSDNIOMO62zBtNE2PDkg0HaD6SGpREmIjoSWr0FDwxjZg0WUr5a9h/wZw3Sdl1fk2JP4CFG44NS46jV9bxCpkPi2EOhsU4i+AjHbbHE+Ulc2DRaA3GCfKjRUuMIXzbXY+ZzCUlk7QiWj6ayvSdLS6zzP0zUP4LLrrh0tT04sronc+aJIBwGkUKe7Y1iC8qgcvTpCIN3Zi5ptpLLFhLPxY2zjJLMEK84ya+kfHSmA8i/kYkNRpDliY0LQuL2fgBctTVebsKbDa1eaWU8+uBjkzlqNQITyx9oSfqLDoup247uOPyWNdWtXTOKNG2Lb4WbabzApdiCr9bJmYW+0i6hV6q+eLJ5Ropg/fInY2Tn3hVXX0Pw/HQFj9JkdqnRrsyukd2JygIMaSWuTDhd9NwKXnNZ3FQ0YjpsPFl5hryrkcsWJMtpCppLCMp1Fi0+7EVCx0/KBfIvcHsTR4MjZKYJKs8L4fbS5zQ51fdxDR5VJdkmgpDk5MG9jtN6DJL1QvDdLSqlYhJitrQmCVs5B2uIqsmcFDSIak8IMqTqonMV2m/UrL7a/cJl+LG+shBXuG2mchOok3gm0MgUgJSGpkifi3LrmYHhSZ5EOCj1fRi0vGQN4KseSTdhB3kKalIS7EmWArSFCg8qNTU3Cdct2uzmtaCnCopa2dIhrb4iE9Yb97B2MtWx5rxkdJTOJ6mVIOkjWnbx/M/VWSHndZEhrXceROWkHfmO1Z69Iw8OiOL13Gz7t2i6QgLby1DX1U+hDPv4f0f0Ss2fU6HxPNZEwRMhsl1aWXFM7wrfIf3UnwMqX7CI5J/jYh96cJsWLye1RyxoX5aujBvHBZ4XitR/MTngC5m54jMeHXl58iRfBvkfsoHovCNoR0kMy027k+Q2wcRlEF+X9qL2MdshV2HBVP5ybzskYS1q5XT7Fk6Iw/PGPU6XG99iJHv4cwH+MKf4GfaktyHch+66bSqINelI0fhTVEsPiGnoyc2cF5gvqZUeYr9Sy7NG9l/yh/MB7r0YvuwmjO8KdsvkS81nWHQoib/M/LY7dbf6QY2hZV+LA8AAAAASUVORK5CYII=" alt="Font4x6 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font4x6;
impl MonoFont for Font4x6 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/4x6.raw");
const FONT_IMAGE_WIDTH: u32 = 64;
const CHARACTER_SIZE: Size = Size::new(4, 6);
const BASELINE: Option<i32> = Some(4);
const UNDERLINE_OFFSET: i32 = 4 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 8x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACcCAAAAACSQfz3AAAKPElEQVR4nO2WjXobtw5E2/d/6NxzBgC5K60syXbr3H4ZAzMDEORSP3by918/jN/rAr9SwcQRVcrmwi7KFb+H856qYGLrX79IPRUBflGCKsCY0Tdw3lIVTBw1ghoAJqLBaBuYAMUf4tffp5nTDl+3r5zu9Ft3o7Crcv0GdfURmDjOUB5BSQCuUhpGqxxMG2iZBuyFkYXb2g4lseD2hczXcr/iELAcL7Yvl3NBVY9R66FGdQZUhKjnkUQ0ubGqGIgAxQ/Ry8VXYIBASRS/L2Jjg04wegtOOK1kLgRagundIn3JhNlUblBV8QXccEDdZ6Zb/knMoxZOjaP/Efy5gBdYX5L6cPiS2FBihCtpoBoEBm2QpRdw+8Iq2JC/hDQIUBeJD4HWrLSPmgIlTnqBm26Xiv8WqKH6NUeGDqIxQUugNyUiCoqXtmyk0XS5cVyxkshbhtIqxA4R9U7BkVEXABVoX6LWxqJmlDYYqZlUGA/CAQpgnQYm7xQu3Bq6QfeU2hJq5qBSYGcy4G3QbioOgdEjLnrdihwOPHFBzwsDGEBNRJuKQ2D0hGoejqlGa04kRCQEoqESUyKiSRAJ3Ymni64H55Ki33LmLSNoDCKyZeqlElh1yTZ9LpjRYBdx5ncjBzc/xWtT/yD+XIAL8KVRNMqhjGmJ6kZjFDJkbSOGuVjIBCWHPyQ1dh4kTikRK8VJISIKioFmsmhkX4BQlzHuFUQhIrjUUDPQkAQo5gWjEIH2Yl+ANwOqhrFSRCEiuNRQM9D0a9VAyv6hMGxqsBU2vA3XiQ2i0OqTUQlYp4EtKZNXKiwAktiUJhbHcM5hD9VKEYWI4KBsgaxDzUCTc8VJqsvjcCl5MJ5aHyK2gihEBCeFiCgoBhqSAMUtdQHEiFYDRCFipUSsFEf15mpRsXTMopGLC3CK76Q+1CWexLREdYwUaHgagYeo6UThtELUwFGwL5A+Qvws/lzgN7iAXwZkfSsUNaaXY9KrLxFqgNVgAlDUBiqNQgZdY8INNlhCxKU2Fa/n9TrcykmgilITtATxs6XQB0HEvQblwr1ZMsOESSRFNARGgXayURsl4l6DcmGIWFm8bhUBpcVgmYNb4EBYcU3qOkbBKItIYpRgEAJ60doClltm43xraL8OEBsqlkjCrCBlcCli7OgRs5lF3IpGXKhYyqCyI1m0ZKlo33JCnQbQHE2sPFCxlKmS+qGgqhwGy4gqis/oAwAfuZ9laqlrTPi+RkgCyCZMZlCDFFg71RvnFnP/FrwP4L9kP4zf4gL/4ht/jzz7dINTMTg1+fhO5bE4YPXnD9Ul7pfWviOOTb052P7szMJ297hfupw+No8e3JQbe2G7exyWeGct+dsSnVrNXwu0aglQ21/zNCIgjrVks1P21zyC6g5g1KEoIhErJff1SOUUw0LnkxBgJSXSjEtTLhwPhiIgKs16imLzQM1ClycJi3ACmkKVg66NrSAKEYUxXEiXMtQsdCQhIlACmiKqBHjOZIG0IJbaU0MmaCkNNQvdZDOUgKaIKgVfEQ1GMFWP2lerlkDXKSXraUxK3ccBTlcl+wjxOXhG0Q3smS/i9clb9Cu4BwuX/Wu8MfrP4D99gfVZ1FdiSipisN1DHKbfwt63nMZfBLWx3UMcpt/C3rechiQWsFyoFGZpSmtVUVlIEivlKESUIntDMGb0CFr+nWPN1bZJIpoUKkmsDFcki0pyVsFSHFoLdLJca2Ul0NpShqwwK7DuLSdVjYTBmNETOINkzdW2tIOUq1YrZaTjMLYdaAHLLXOAZ/CpsSI7YYKIdJ9yxB+MQeO8f+nBLXNA727oTcGBrALMUhdNoJBENECZAtY4BRNZ5Rk5cnD07yJ7Q+/h2y7gK7x8iU/wiS3fi69coL70vm/EZ3HaOecsPaxX74aJrWt89BFqHQb7P6UHsJS2x5pBmXvesDZFFCLuYM8sLCOq7W93XKgYIpJg5LSbZgeFC1r1BhzO8loZDarLRGmvojHT10EyseBy/eiTxD2c8AcrRoN092IrQuQex3F6xEJtS1JljQCWNmJs1g9FsAyorqtxoQghWkZ9Jzfc5A82wBGDbZlh366XAd3lpjG7tFZU0AsHA/AdFA5qkWAZbf1gg2XAdEu9KDbFJlEGJjYszAUKIhgVerOx3alNwV/pB8ttmIA3bsYf4jx3sD+DPxe4ukB/uPuz6g/9KR7MdbvlcK7Q9gMHDBJBafECB9S+W6xzlgm6agn6RIVACNEy5rhlg+fX7+Y9uo2sEGfBHM/FskCIEp+AgKpv4PLlwowjxElFixirEhwpI6A3AZTYSAEx9vEFmChDTUwaLqh9gEwsWMNkm8ggqxJ9JQZeYOOpdtigzSTCMjUNCjRFzCCNh8iqNPd/gHoSYIoQa8cSjdmjryDjTZFnYIgQs6FeN5rahOSCN++JK7jbxGX/MzBDBJ6MnbJrSxJ+DTyWz85xnPIOsqGeG0yNEK8hex7go7Vn+MLWBd4a+JP4wtbvgRdY72CbfkWKHZIANq5Qq6BNz1ERoFi0qwGL/i3Qgi2EuQaJg/Jl9OYQLNI+CrE1LmhFiE7oZfBkubYf6EvwgDr4VeWZRDTwjfCcR1rmoWoYRiTiqZJENHimYOyV7tOIV5W7Q9bgmYKxV9oHQcRr6qa4QtcPFfeR5kDlVWUXQSmBZwrGXmr9XYeIV5WIDRHBIwVjL/VTF8ANRzSPtMxjxX0a3OXr+MIZ3/L8r1zge3C4gH/o8qpCQTtXim1MHmFtFtq5AVARouUEFsNZ7R3aoBXpyDomOZgDVrMVIbaeWLDRL6EN87Nwb93sNfBYH4yB/NdQlQgAz8QTZRIfzf8Siakf6mSoshakehU5CzzXDjNNoovHOhkiifq/hS6nNL+ipDtx+AoL41JJGWmqugbICQg8V5LNsJ0KC+NSocwjAGePdHsMQfpDHYd8oB0kXGFhXGpTM4jhO6BOcrvUmNcUYkspIhGX2tQMYrwAUhaSbdh5pgNry2fq2aAY8BpcmPLTWAd+El/c/nX8X10g37fvhkfuj7FcP0ixQRJb77C6bdwIqAhQDNqc1scGpTBh1qRG7fICtXwQYmuc2EKM6n4UXIBfR+UN5TcYs+uPFRO5VkLwXgSvKcyfES1EfKig+FoZxCHvKW8CNkhN8UjBzF5pHQQRryoRGzxTMPZScwE/jDc0AdHQfayg+IEyCWlf1/e+g6rmgeI+Da/zZXzhjG95/lcu8D3gArwS4qfgk/OdKvEmZqGdC0CxQxIX2N127gD9K6OBFqgIWr0OrQ0QaEUIs9Y1atwJU7cihBpBoWaAIarCfAZ723ZPcTNK5S+kwq+3OvVTHUztyR8k0coGPFayEcM5GBWRiI8V8SQsXvo4idKeJdRljLd04CvKh/xxykgl1FJbNcRbCsI5iniWRF68KhGp3Y6rhkS8pCDsIXFPk88L3fNV28FRk6Gun+pg1RhPfKgA4R2jxgvqsZ8GZ7yEzEFEEIWUfwe8cJ/GMwtd/w8JSH4TtGfaKAAAAABJRU5ErkJggg==" alt="Font8x13Italic font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font8x13Italic;
impl MonoFont for Font8x13Italic {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/8x13Italic.raw");
const FONT_IMAGE_WIDTH: u32 = 128;
const CHARACTER_SIZE: Size = Size::new(8, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 6x12 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAACQCAAAAADSYTH9AAAHSElEQVR4nO2VC5IbNwxE4/sfevO6GxiS85V2ZVcl5R7ioQmC5EjaxL/++c36wxd8/foahcmWqqKkeElbn3fMF3xp8UvhISUdea2vX1uDW3OBKfD4Aj2s4sBE1ZMpnImFbQlf4tV5+WxFpM1LbZVphbanYmFbwrdy3rzZKLV3FmhMK5o9Mxwj0qSUC5IYcTZSG2XOg7YnclmxE7uo60g9TM3RGCtqPVmF6ax4lxWWZ1Ht0iJGzQSuFDfTogdG9lkzzpWGE2lBschnnui8+kH9veBRXJCfx79a/nIwJGbDq6c8yQXxXu5hQwZO2RGqpIwqycg2KcwaBTtB/7MjMxySs+BTeIlxjWoaRMgyuag5FikTiESThwM5aQeGIXEctFeFSUhSo7wpbPJMoMnD0VTWCawiPHkfzSHNe5d86NMZjgGFwCDCNYx6d5OchIbTkte5muMoJFGhBuNDF8p708ITqZ/xU12eb92tfUR/L3hU/82YACq1F/nXDGQC1ECSx5GYURGVFaEX52XCA8AMgAZT6UCkbSD+ojTw3UnSRZlkAInUfs8OREqXQvTDm0MquUAPi6oaLCBS6uTQdXkmNVPScTZIZFc98iS2QE0ygERqv5GzoJwDkdKlCD3fOp1kHB4AZgA0mMoU80Bi94Ss8AEwmmDxVbEX8y6ICRU8YQ8FBwVK5U1WWcb+Xv294FGfuMA/Zqk8P3HJvz4zkn92qBTPqqbXpEWtTAJJhiVo2WW+co4rsl9koNCZ4ZCqdU9JOTGTAYrKiebIJX9M7dJKvL4dyLxjZmKhsU8tb1O1HyZIeQovCJWqIyh2DjelW9V57MOHCnpc2QJVqhxu8gbE/1W2zdNXtHo1MEZQZFnWsjGG1tn3xD3bMbO31tlHxV2c/xsviP7ABbsf5dPi9HHDcNGY80cGpa6NXG60LFKxOiZT2uYYhtV5qCudV3WRn5xDMBRIIQnKE1AeQBJAZC1u4SmfSsu0Lv8mU2Z2SnWSFWFCcoUhp/BDO/QcWJzhyymJygqXZcgCFTMhUXGLnCJPnAikTM6JnIRUOiRXGHKKPHEisHgNPoFgH5KgPAGJ2iaPQX1QKprpGRXsO2LbLKaMWz0sL/IbrdLPea+n9R/rP3KBf4iBWfv5pEPvpdI5c2g3nbVvvVY6Zw5Nf/t4ZpsHUNNr0uINxELAsLo1pBviJeXETIZhHyjQSGohWTaZhogXk9Es0dQjt8aRBCOqLYORXMX2sYRKWgjp7TRziK8FUFfKoi2B4bhUFYDEGEGRHvuVQ9pTGu4Frc2eDcz65gWv977c+F398AJ+Ininp/VF/Kj6lcf3M/uD6KZ/W587x3up2hGUtJvp6FxUq1Klls8AjKje0WCfE/LcOJMXjOkCz7mZFMvJPLippGGRWdSyChilTcy3P88Qac6CnlQxSSDCMiwvMqxMMOwjAReyGiLNvaBUYI5Tj2ZQ1mLOYmabwQHkCkMKkeZaIPcAqoHiLhuoOmUAcjZ2F/CKFOgPeSgRRa+W8NPEnZOm1UqtXeMH9PED99pd4E+gb+hR7ixaseZ8AG6a9pcZ3imbuh9VwSQnIf2aGi1bY0to+tFaWQyt2JmWT2c4mkmGxfnjVUv7eRXCSiIjH0sTrPxUiFSeppJKzShfl8kBMKpM5+ZnqY89rNmdtWjd5axm1p6NZEuTo1Ql6IdK1A6qM01EKi8wIk2OUjUhXsmrfEyfhi0vR7oVPX5tH3GlXjQNJwYBJF2pg/ai7t19xpm2NSftUAK25Dup76DT4pUeek/O8lf2uvJVSc5BvpZouFIXnI2orbNhYTIx/XWClPHFRSmYWQ1JIEzFJN+Lnh+JFxaeSYIG6UgtpHOlPgVv+cRkhagMFnbe0xuy7Y7J7Llk5z3rAz6T74B2NhH4A1kRDqx3e2Iyey7Zec/a+sRkhagMFnbe0xuy7Y58Df4uDNKRWlDznuQ3pPve1Ds7/B/4u+otbOb1GJZz4PpRXXQ2Wstku0DfsmxWTUq60XUSXJSCmdWQBCbtpq9pf8ideEmDLYT9JSN7eokraiB3etILL5JvDcafko4cTDiz4AL2hoyCHlWOZADIMHgXLtMkj9au2JEnbs+EoArJL84QdJHWrsgo6FHlyITgCsPw5FV6S/kDE7ysuzD6DcYk/pJR/BV1yga9jNxb8t439Gb7+/o/XNBfqnOQP9houFIXnI2orbNhYTIx+eF1IzeULy5KwcxqSAJhKib5XvT8SLyw8BGCIxm4R+rTdj5n4kh/glcIaGcTgT+QFeHAuv+JyQpRGSzsvGe/wItkx5skvyG90pt6Z4f/Kt7VN7a8J77Z+t1+kzjb5/PxyQzLOXD9qC46G61logv8CQC2V01KW50EF6VgZjUkJZArtuSX9J0+XlLkMqz8DSO8+jHXVAtZfiupUP6KbPPRNENVXmClV8jxSDM9cc+s9Aorbj8rwxiexOfGpyR/SQsrTzqlTpHdPOOj0plBJcUH5TMX/guH0OzrNjThJQAAAABJRU5ErkJggg==" alt="Font6x12 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font6x12;
impl MonoFont for Font6x12 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/6x12.raw");
const FONT_IMAGE_WIDTH: u32 = 96;
const CHARACTER_SIZE: Size = Size::new(6, 12);
const BASELINE: Option<i32> = Some(9);
const UNDERLINE_OFFSET: i32 = 9 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 7x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHAAAACcCAAAAACCDXBuAAAJjElEQVR4nO2Wi5YbOQ5DJ///0dl7QVKlssuPdmd79pwsTAIgRUnlR8/k1z8/jH/zwt/xv3+VnmDLbIyNht7F71/bdO3sC1vQEEL887seUAvO8hZeXvibRPGESURB8ShMRB+DxW2ZasGLWOtrQRQiBsuWOb/7S7hGNKwO9FVcrFBCqwqqJXQsuahQNay2MoNEw2qwJudeCS4tLF/mzTdYU0HKQZ3tvQjVCmgwNgoR0UeopdA95uQwZI2g1I32LWfwqHvbGRMUi25cwJUk4+ogPnSH/oAbbpzB4j+NPnxhqw/3Q/g7LpxvOJ80XzClrAK7KUt/kaAKpPgC7muUrUksJYF20dRydJcAHIG6WvYGp56FCc1GFYKrAuGt2yx05n5hCJQWL1AS6jJDImYowceh0BExRbSH5NEs6kHZ4T7mIIRmMx1ivPsxOIgmYB1KIwRU84x0pG04xAERwA1YM6gFSgKEQ8077jpphHJo2YMKXAgzQqbSQAQIh5pPSOvYnrI4+wkgmyASkgl1F82Jmr0kpsvCXmD7syJ5vxCSyi59pKsSG6IrCRwawbSCsVHzjyCnNT/Dy4E/jb/iQr5vBYUtRvOTWHL7i8rXxY8WpgK4+gnTVDShFgf+2f7TVtEpIwYpg126KZkBhjAJ4VsgUx0bCLXFIGWhrupCJDPAVCQBl5kEvk6BDN4qMiUpC3VVFyKZAaZnQwp3Qbz0NqG4UJUIbAVU1nCaFhJYWU4dwwUYnXAnB1NJSFq+GOv9dmnAQl3VIcxWUzIDTM+GWqrFDQqVag+xNEhZqKvapZuSGWAqkiCyJltKdUUEKSMGKYNNjgeuDJGESYhIb1/CKgpXaUE1TXikrwCU+FQIzEiaMAlRgDVS4ggzFj+K/1/4VdQXNThXQX2XSPRBxTduJzzV/BYUMpNMYEFxEBsq9EhtvJPKJmbh0waSSAqUAMWBtoYLPdKts4iYImbjTKmClIVqghaAI5JBldCFiJgiZuNMqadaNENLwOEK7IMVF9gMU9mFUbnJV5n6ozbBSYoPPUwjO0GEo3hhAp25qBbh2oWBVejEYMzoQm9iAdNEKTTmIi7SwMkEtdQ8cpjRBa8QiNchqBFNLvIaDJykmCwCo8u1HMgVgi+tT5GqigVdeQ1c1FLJkmCfRdCu5cCpvlv9Mrz4OORUWVCu+qfwb1z4/Q/yK/Cy/cbdN44WX8PyVJsfrF79wC5w2147DqyWxmwsOwYlGoc747Z9MbdaywTnqnE0D3fG0ebzouIPTOlK8S8OocIEVDR5oSnlUkfMYjqRnoRv/yxcooPALSSBki6mInuyGwJli21AQfqaSZwCBX1eGpkAVdBEaOGAPLkRQGenXvKFtI9AImY1I5pQRAMBrselOgigFUJxJ0qUj8jCkscj0FlU6ChdgV0OAiiRLHYLSpSPyIHPbVdN1UJLocIEWwVZZSgFIj1sfvIvb7Y2bbA2X+OtoTPqSW9A8653ifem/iD++IX7W9394K7xTcwXeVKIKIw+wJp7F7NhV376vCjE6AOsuXcxG3YliYa+PmrZv3KEPoIWM4Iyh5CESagj8DF7KxvYSLLgEjZCE5EIEO6R6nVWxAKYEzAAL3iOMgvM2OnBOlSUFpdkjSB92WFTmRAVLN3KDj4LmnUCVilQQGQzNOHLzkwsM3yvB9hbb1vG40AEIsgwM+3gUoMF98WCljs94PBCnSQ4hhuiiOyKKatmkcIEWPfe6YFsHuz+TWRLqMDNq9j9wncv9NSLYx/jK7N/BB9d6C/oU2w7+wMd2dZsmUXyCioSReBrsMaifPvPREDbnmeZgcYsOphIFhXzi1V2pBECLSIt5zVmM1S9WHD6RFnKL48HtU0Jn0EvI0GLSItFBYUQEnfqQsQgSyGYgG5hkytxoEU4ygqM9gqVgata2FgFlsLAY3HAgg0KTHPdt5QhLUu8YiCZADtzTqnIOFFIVVgGx4ZVjtqHOCytKqwoZYRSAqOgT5sRHCxGdbxcDEbtQyMyYzBSCSIQMYgNFXrTjjRCYnRrAbbtS9TQyPG0gsFTfYHTyDI/hb/xwvMXVdVzXMx0q2TOCrCnmpmawgwN/PJPs4Xe3xK0b8GUKvmNE2KXNXuA++ZhzkgTSkCguOV0ljOGiMxqihMeXVg9mCCL6Sw5KdFXcB7GHqh9g/isx92ge3OMM4Q5EcoaR6ANS7ZVK8UgBZStuBppzI6GM7YYQiC3McQMSoNspL5EVjiHF+YSOZycmVFMCCFA6AUyCWXbc8zI0lyQitDJgY+Y1Xu4K+m+p1gTnKbZq/jtwifgmP4uoHeR0dwkrCioMC/h9BUe9Z/ikz2Nj+77zoWfwQvnUUv5vpqpK6q8Q1ZAaY3giaRojUj9o9Fu3OEB6i588VjGqKyhgwmTiIJiRFMJvYO+3T3I0CdwX057Q/YLEZL9j4X3Ct8KmzmAMAmTMAmTMBNQCvFMlrkXj4l9R+DZYA09kGXupfdz2kvhk4pKgOt1lzJDF8KF8FsyF0vPZZkLmUPekjahiAa6lWUupDb2/lfiO633W4w+lh65Fc/6MvIEH+KTrd+576MLv4XjQn72eXZTlNotpq7YYWmKUkfhmSzewAKsEAjAgcUdrqlIw42pqreYMIlok/AQvLX5RbjFE17DJ3OQHfm/BUrKSFaxT4QhrBskpKtrIYnR/UKOUuAXsoevNB8KSbRCXFRP7StC/VxIdmCwofb3QkDwVilHlZPKPhTSqdSh9veyiGQLngiRYd4v0tUjWTFk81KaTKH6HZaG37pwhGdV4EdSWQRULjzeryWXWj2VRlfPxAObAA9Cs4uvY475Kj7c9jn+Vy/00/8z8Jz5OkrrbJm6wiRuMa1Sd8A9GQKl25oGgQ7ucEhFqrhDlg4mTALcMGH2hT8JLuw/mrckf9OreiyofCEx2NKXwh8vrzaPZehC+FgxXxE2BVVdC6vSldT+9WgvhEiCZwLa3YvH8FxvircilE9l6Eo4BMK9JTJC+VrQK2H16/DaT/HJ1u/c99GF3wIX8sDET8Gb+DE0e7EpSu0WU1fcYlqljsIzWVzA57+l9Wsma1YHFne4piJFB6pcTJhEtAmguRBgv4Jj/HBPcBrC+/fh3eiqnkqjqpzG5qfC6Jo01P7MpnomMI4Kz4vyhdR4V4b6vkAidaj9nXgPhYF21WtfkEoICbW/Fu5Rjira5XtS6V4oTeylkHodk11F6dCYLtVTaXSFcMy1CC7kSieFF36IPu8ZMsJ1iiYk/9fAO+Ot5SZQ1X8APjRnDfNlAFkAAAAASUVORK5CYII=" alt="Font7x13Italic font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font7x13Italic;
impl MonoFont for Font7x13Italic {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/7x13Italic.raw");
const FONT_IMAGE_WIDTH: u32 = 112;
const CHARACTER_SIZE: Size = Size::new(7, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 5x8 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAABgCAAAAACvAKrcAAAFdUlEQVR4nK2TC2LbSAxD2/sfOvsewBlJtmxnm0AkCH6GGrnp3z8P+Pr7RU0Ptvgu1rwHs6tkg4jLhCH9Hb7+rgEnPc0qgJiUh1zSw7X4E54XDrHTvF7Wy182ox5BbVWRAatc5tr1UBAVsCP9klbwCCQRuyIrGLKhJisq5NMNKwUp1nMiiegKPWgaKMaxeEl4hST6vyMriiUn/B5YmN2SPwhXh02rjpqqdAaDQSPMoN/uoIFyvClCAh0qpUtJJhuoHKANqynKzqXEvYZSR5LWUDsnHjOQRWU8DeQxQxnTx9pYoLKhxpnxFKztiMdCmo41cgohJ8tLUAFBSXXulbmpbcJHbWpxeEU48V/A2VtQ7gt/C3yj5lI+mc1bQeefIWMoJaSWODBKoxnJALnqSpkOJcXi0i54mi0YDxWYpwtpEOhDKw2lhhOrqR0J7jBEeS0ERHKImbOSOI7C4hI16UjWSPkFZTqUFNNHh47uXoivIrdEQlFQZkBqNWpGPScygpLo8o9CT/o1/OauwGuz1FseCqJB5Ybs0TUkTYFYjURk8Expx54JjmNxQWwPGJMMaQCOHcRF6CSB66Udgn4yw4yqpgvFShSIeBrx0oULj1hhHEWULCxjTRaqdwOUG0IDB0HO1VDkUJREcnkT/TDp5KrBlnR+Cjb/4X/KL4Mbr51biJ0sMbEhvwDwB3kAhfaOGKxkHUVAByadcMIU+Hz+oZP51khFT6hSgiGCr8F0SqS7SwDWKUOabko/g5NCaQFSGwqMhwSWrO46pOmkDEVCnEZgcZmI6WpnYYmiyBE8krpSwQQlA0JLg0SNkaodTbo++QMcPIOzL/Gm9QHrLSsOLsnGuysMOhKW+GwYlB8xzbfIjFSXBb+nS+Up8glmzNR8Uo2WGIIoUpZHFzMTamKXqMf2QyOFtE6kH2COQdagiILIcR4Ctp81R61TIBxaYBIGnIjKOTKcLhz3SZDoWwxRMB5wJljxPR5O3+BTPy/iTvCBd2tXx3PFUka8BvhWpCeiHtAGdOk45/Q1Zo4usJTC5BuO0AAhYJYqvhWM28OEm5acBQUNDFECdDuiQwY9xG4YeIgeKUxJFYpTIOCCbGbk9KnSJgNlgg+iMgEiOB8ZAjZYwRQK07FG6ihxkgXtYDfKg9X9CS4rXDg3fwVHSiDxoACxk3yuoR7wKc4s5HUzp1h0ML9Q2np4jpAI9qW9kSQEEnMeGyBMPAjjiw3MUkccWAmt9KFMwKkmCCRF0nYAFa3jUyOlXUodAdRkRDuvQBfD5SekzGYXbGWRXFAYtTElhwhgxTOsdVAlGe5B84LH/AW8JyGcX8QbkWimB6gA2QaxJgz1fHJl3AohIGp5J57CcqyIkPY5mvhLylyMzfyDX4mRJ+qBN5TA6TuacKG+6gNlFvFIVJ/I6bnGPSVgUA6cacKFHMz0K8ovGedOD2Txkdz7Eez+NmaUm/CCJLJXQvgQKC40kR0hYmhpQAnKyTbGrRBOcA4+HCsQGPAl0YoJtyTY74uNT8SUb0PxPwVd5yG/I6Y5w1TTB8J8VlDwglfTIYzHgdEX8l01n72QoLilHnm/kG9M5M9GHYmye0cMOIa6IdqakaD4VczyspTXBH39Rsvy0TADBl0QlDrUZykC1Q2z8RgE7wYsoV/huMj/AWvfEY5Cmo54T1wyN31BzkSiK89klTdeiA7hDXkO6eYnIsgXYtqPekfh+jPBV6L6DfDm78KPYjqMAbmvt/aAVuSMFKYbXTNnpXErCdoJZuPnAV3i7zDlT/g4cgwo2BmRr7ijwBnOMXxHNAkYjo58STPCbvUHKr8nrucFSdBPRIcW7ox/NkymjKFuqU1V+Er2JJoqxQ/hO9kFVP8BKa5wrNLdmzEAAAAASUVORK5CYII=" alt="Font5x8 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font5x8;
impl MonoFont for Font5x8 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/5x8.raw");
const FONT_IMAGE_WIDTH: u32 = 80;
const CHARACTER_SIZE: Size = Size::new(5, 8);
const BASELINE: Option<i32> = Some(6);
const UNDERLINE_OFFSET: i32 = 6 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 6x10 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGAAAAB4CAAAAAAodqgSAAAG5klEQVR4nN2WS4LbyA4E39z/0PMiMgGS+rXUdtuLSRUCCRSqSKm98D//+8M6HvCv7t9/TCWp6FKXZHygf/855nKiVw99urYfdqnAhdd4rqcP4Gbujt2OQGGAzLMRPBMbxxZ+1WM8B1IsRuvJjDBUEujqqXCsyqLaoTznAKlaY5528Ki0jXv1GM/ZqylZq1p5Dcj8yo7hHaOWUW40JVieNY20xp0urXk5HPmn1PtGZ3GYP6W/8YB+nbC/HYZEdXr+CARKSkPeiQNVTWY4kL+fN2AbpS0zmqTRLhvLTUrbmFE/9FGySAucj7HnImA2fMchXYJ8FKGj5gTsLqR2xfEhIzq09DRSEMPglKVxjBmF8kqUUu9jdGcsT1kfpywCT5pJjdIQLKJ0JScu4NqmMDotxqJPLi1o0YT1QwLWC9gk0OY1JtbvyCuKV/pq70f0Fx5w+R2xelLaw9u/TeZIehyJio40G2U2d5sFIAvALoBOtrOBSMdCcmdKE/8iIEUXUKT199xApE4ZkKuyqO3kAf1YsJ8tJlsymk6Zvp5iKpPXxSBOXD7WJAfmGD1qNhBp/UFGoS6BSJ0yytTHZJImkQVgF0An27nEdSG5MyU78+oEFj+deNl3QRR08EQ8FAkatMaH7LKN/bP6DzyAH8qnkPKjQVM9u5avyUhHAR0UTxLRjj7wGq/IeclKrjQdiOJaX6nMjStZYGhuLJNZCZWvua3669ecYKsgkdm8MrhPqxxp1+NmZb5ENsSkmSiGm8tDnabbD8Z1H91imNXOEWjSmOBUDqD8LHoiPg6cvlFOsMM2iQLRO/zqUrH9o+JZ3P/Dlz6KB/z0m9/Ky48nHGZ01vx1odremcedIze6ac7ooaPGsKLNp7az+Vbb5C/iJXmNeOi7jScgQY8NPUBkN49IecxA8qptSC+khHp2Kcevo4notBFY8clMaxDljrak2bCdaZKgEzYUHSeQHap86iRQLa5PhpRQNXFTOxuqkxbCyk+dBNEc5s74kBrGExETWOLo33bIRNkO9lNxJHERJetLvdm+Ud7oVnxN+JXe7f+2Xj/g3Xe/Kl8kB+K0rGjSE+3EB+poafLPTIoweSjArYdAWtJ/QWYY0i5CglUxZEWNYdWrJNHOkhXEFx5DWjTVymm2TiJeDJNCENkSJHIiF5FVUjA8db41Bo5wWeIYAJMYyDRLNZWbVowxSzsOo6VN0ArsGoDESkgnHXIKEHVXeWZ0uvdy1nivX3wAb3r/qi/02dRV/DKfHeIduP+YvZ46f0i7G7SP/vpJ97q0N496Uf+UynyNlZ7oepA9Q02aBk+mUwuI+UEEm7CyTQ/Yi6O7cnIbk2jimJftbhIRjlV5r0VOzKI93F01yVFDkgAkO+8MNmBFbp7epSZx6mjhRjbmEHB1E4eGbSFrggYdDfUpK0NNmkYO8Bja1hvLMzu5lqLjpy67m0e3c+rSuRz7XN8/8U3dPcAfB0hy+EIPm22EuWWES6/CspA0RjzSyYvmjkmoriQnSW80k8CkHVtxf4cuSg1YVc2VEa7VlbeZN2BOd1VqwK6IR+UkyVJcoTeF084IwVeCutXMsAEYyEtQQj22G7iRxaPsNuQrsccie11HtWbSKNWDdq6nHpQ3ZtNFNUNQSyID5SSjD2IsbbLpqdhhkYlkalL49n/XDD/qaTNyJ9eSBJr0He3Rz8QXmfnkRRZIcattJA9MhkoekDAQhWkB2xWFnu/Nvz8iLvaOzBDDOaVyguINPcAipBnccCfvmQM99iW70CtuvqeHMW/J2/A6qOaR7IgHzi/4nlzEeA894+Z7+naYt+wipBnccPM9c6DHviRfwi8SQ3qkGw7fk/y5eNC39Z0jv3L/+YD+EFMmB5Ac3Gjr5EESGZzKL0hrtoOynfIqDthpO7yZ0Rook74dC8C2XjLK1MyXNJF5O4wzX59sK0zrLRmG9V+TxdXE8ZmNp2QN/Nh5JEvMN8Ams4QPcugVJ3Ik7gnPCFnBbn9NlsgRzFOeUfI32K9jwWn8K7JvxL+kEwnpy8T+Sf2FB/T7+I0WsFmdbrSN5KAa21QqTIuwNz/yRm2E3S1JC2gOyV+Lmd8SP5F4TxKsf0HwSBbuPfm6zpJesPHIvNdHNKNYnoZuyA7xyHn+W3YR0gxuuPmenMN9Qt6F4NT3SP5cvtF39Z0jv3L/+YD8uqwoOYDNd9pmcvBC/LDZnXuCsp0rb9RG2N3SDKJ0qOYRH4j5t3LGWPGzBP0m8S9Z4fnelJx5RUfI+qNlY/wrcixXMwztfMBJn5DrkZWfuie8Ph5vavGWE/tdMY80sstMvcn+tvAvGWH1OfyEiAfAWHzzj8lH8ABoNsCPipfny/TW+P8DSI2w05KAiUUAAAAASUVORK5CYII=" alt="Font6x10 font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font6x10;
impl MonoFont for Font6x10 {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/6x10.raw");
const FONT_IMAGE_WIDTH: u32 = 96;
const CHARACTER_SIZE: Size = Size::new(6, 10);
const BASELINE: Option<i32> = Some(7);
const UNDERLINE_OFFSET: i32 = 7 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
/// 8x13 pixel monospace font.
///
/// <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACcCAAAAACSQfz3AAALPElEQVR4nO2WW2LbxhJEb/a/aN9zqnseIAEKFBXbHyn11KN7ZgBRiuJ//veHsb/ArySYwkz8Mv1y73EwU0zoXfz6Zz9UV8DUroBn/8N7pDExUmvLO+DIfoZYTE1l8WyIt0DYMNGxZRneNBj5GuzYtxCLKSTQHy90oOqiLcsc97+AG/Yt5gmfA3gBiAUyl1wTW9C6HwwzZydwuzWRxgSJUgGKGeya2EJZDrQAXeEx0zC4BqrTqP3paNEed5rotijLBkUTJZc5oqehM3gMOM8vIQ3EpYUm7ARlYAqjg9U2jfRDoLgwegUSFdFQvkkpa2GmMv4KuhYeXoBIrs26hdErkCgV1AFsCDCYcARanvE4SB7v2PISY+8l6kbpDJeD4NXst+BveAF+qDCoz4poI6IBDlbWbTmm2+pd9H4vqF82jBIfAqUZlAXTADw1NRvP4HCgfQTKQQ0rx7VFQ2pQ3lQaEKiDVhdeukxQKQydHRxuMoUI1WZB75KoqOS+pZjIxMq47WAzKFMX6GuF2C+cm8IgIxZkR546RbSNRLmQEGpeGkcpBIDhntEIT2IACA9w9ojqhbkQDjUPEdi6t1vJ2EXFITA08KTx0Ay6E9kuPHChfUsMFW0qDoHSYrVMZRIoJzf86OrSHoRANFTikrhnhZ4VDWFHVBPFlBZmiqF6HwJ8gPAhwIEjdOShGBkBOBUBPZi6u93HUf8Wcv+XuLPnX8Xf9AL8EEkwGKZ/FaY+/47kg24yCxz78BBFJzZERVmAP0TDbGqPjaVjUcxrLKIQFR2tAEctBVGIOigmQKsxFvWcwUEhKjpaAY5SsgTPoVeLgNKHYmyqGEMWxTGgYVGEiuKgoWaBq4qloQY0adCkbOYF6EnVYAGFLDSsbmNBFALJ6WBlgbNvxMLR/JM6/Sh9SMM/Op0F2FoUEWhYFKGiOGioWeC8z4iFW6EUIzRZ0y9QjbEo9gENiyJUFCo3VT/ULHBVsTRaIQopxdQLMIeqMRb1nMFBISq6tUKUkiWiEIWUYnwBXXxRr/TRsSh21RYRhajo1gpRS7dPmDqol4vZ6KgHc9zqnJlIwz65yRxBu8X+AB25lAX8Q/SH8d8L8DMRvEdM/2jQMkPdF3+pY9X2eIAwDjqD2RIcIELUqbInPIg61bmE6gItQXneRGnMC6hTZU94EHVQFzU9+w++JYiHKFKhAkQ9ayF2EHVQF5VtXMfSoSEwFGhdRYU6DFGtIh6gOmSSP0GWdpAcHUYpBtNsbmE+MEMoGRuQB09iUUplSAZJjwKmm2ZDLhNRiFLogDLhSSwqMg0CSoqXgmGH7hiHawhRSlZR8U7F5y8QEtOA9i0HjMMOuVkiQ1SUXninZnG+T0wD2rccUBeI3JhvSK8B+Eoy0PgrCOhSSnx1pKAHA/MIfMSxc7bju8ibbNc95EQaq/OH8De8wPFzP6bGoemHt+JhtGH12T/sCRytveAQBvam3tU4tTgKE2z2GRntO3Y/sTd3D7a4LI7CBJt9xhrxSRnZjaMND6XjACRDIDkjXLV20M8CJTBVgmLA478Jx/SgSinGgFnWGUQvLtDRoghGlJ6gSaJD2dz+M2QC5oAixoaoVpE75ipqFjj3dSSF8qebqtAKF/AhaimIQl4I+olpzVXULHDu70gKffoCqBimn5AYaha4KnzSIuqgcAHPchCzqVKKMWC0NFmJoWaBoxRCySDqoHCDG+2maghK6XgbSIZA58SmPVNR6bRvGyF9D95RdERaoXu4vfEJ/R084ap/gfs7/yV8/QJvfJwP8LPwbN0w0gNOWg+o49/APHg0vAi/gZjCNJfIqe9gHjwYni+6g6FJjywnUJ1boRaI+kKDChhwMHzvbOkG8IAtVFFjUDZBZJBW59fK5gd50gPmQWqpGKY1AlEHdVFK5cURUBms1kIOMzlq0Nu7EYGoofmfi6H4QGKYRz1g3HHURnyoRWJRkWlKWCE2ihdmA4fpMqGQUhGVelFIaHgRT0jDQNEW0z2bDR7wqFOdRRuQ6bA0KFJDDS08pcTTiTIVZAIGPJsNHNiah/AecjQ04busfEwTP/YCnAXvH3//xE8hL7z/g8TWnojMWQ2nrnKg5yRKBa3IJZxvG5ZzVIm/lsDkGtC7yhX3opTqgZZTZBYqTDMx3rA3RaSxKg2e8I8Cr+Ie4IgNFOaAua8xDch2iD1l6CEBXodoWjoHJI5x0LIPUUyOcI9f2GAakP1QplwIcDZUTPKATVeDKZ6jGC0jig7JPMDDiexrTAP6QE8xMIpk4ff9zl0Nxhx0YwGfIX2Ai+FI+mvfNMCJxJ4y9KIURgdP9caYgENEDiIYhMYY0x5gX76wwTSgDvQFKKQE3ijs9QQsVzbUwFMYsJ5nu6kwDRjtbPd5pXCoeajsGvDAFm3MN8iNjYd9yznaEh8TvHfmvNV5zEe4vIHrL2c/id/ykFf4G16AD1sZICYNBf1DbzBYow1jF/MyhdWObtN06jfZNeBvn2korrXADfx6750C/TTRdgExYeq8lhamHu7qBmhtmVowccneajgBjlgkyhx+1gaBd1M0u2xGJEAU9/OEJzgBXqcjUlEaSx8M1chBctqAp7QLEiDKEcEET4zIRTFkSsGrAKkmPTowNZEGXF1UGchQos/NBJw8kD5rgbmPo61b6h3sDGUNVOcCGTZxDXoBxmxgsZnC4Eox0eT2oTvI/hDy6gUG2Eth4oaBFTLsSifgtcDKD2DMhTW/8wLcTWG0de6YTaxq3YCnPCceXoBr4FeoHV6h6tCKmHvImQZHt8A3stLbuH10f4Ejrid38M7Zz550Aa6c97YZHzDRz1ZJg0CRBR5GbMigDQOVRJWBRbvDhnbwJhRaMnOcBEp9CVzzJtTSOHgqQg3FvAfOUFHS5+CWfC/gnvJgKkpPDWgGT1o7kAv1YwQ0bioPpowQ+ErBsKc6b6NuqsiLoGnB1wqGPVUuhCHqrlKx3KEC7JWCtfVEuS14Qy1PwziKtvlcwbCnWhdA1F2lchzkHVDjuYJhT7Vug6i76qMJJilIEzxp7UAuVPc98FeR+hgfXJEP4GOc3DEvbsNH1Xr8f8PPYLuJa31ADLwJhZbM/ITZbdMvTqIwsfAAc/LqjAd8C3nzPr+EWhrXJDQshqKbIg3yVzrwmL9Atrufh7tCLipN6m3l0ijmUseiZqb4klleUlobqLuaHx418qlC3E/F2nfxAhiJGjo2vFYWUG6/wNivjaH4kllWiNj5pUrF987x9DCnQEyeG3KlmKAZUi+VBZR3XwASGlbIRSGlmDcUEx35XPuRRULDYoOIH00QD67VraHOX6oOCQkMxdeH4Bf7I3x4/HO88wJ+Yj8Orpz3tuGH1srvd0kaBOoZNQVt+jyJKgODNsd5W3gTCi2ZOQ5+RveXUEvj4E2oqbhvgm+kvpXPwMcKI29o3pwouUD64EQ1yoVSwIvEDe2SaKQVok51bAGn2t/BG5qDP/gCYYi6pSygkEEijStlra0nykYYou4qpYVxFHeZzxUMe6q5wMveUCuBiHALRJ0qm4ov1Btq932NgBL6TK415lp134MfwvdPT3xwRX0An+In7vgIfI7jZwrnM0Vh0IafVWvmSjUeMLtt+hyJwujgARLFF07mB2oWrUsotGTmZ3R7CbVUMxlgKL54PvwdeL7hR3MP26EgySaLVyHCt1SndE6AqK+UE2u/TswulQH1UjkTaD2rYm4oJlp5GuotZQlzfjuo9Km3dBrqLeXhAfaPvUAYuv0CfGhRTNQcg3tb1wvAOAq51hBBwbsM45dQyW0aEA9e6MDIqBdeqkBTHc3tvo+7fwF4VogixYbg3wS+b79znomCzv8HXycyJOmCxa8AAAAASUVORK5CYII=" alt="Font8x13Bold font">
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Default)]
pub struct Font8x13Bold;
impl MonoFont for Font8x13Bold {
const FONT_IMAGE: &'static [u8] = include_bytes!("../../../fonts/latin1/raw/8x13Bold.raw");
const FONT_IMAGE_WIDTH: u32 = 128;
const CHARACTER_SIZE: Size = Size::new(8, 13);
const BASELINE: Option<i32> = Some(10);
const UNDERLINE_OFFSET: i32 = 10 + 2;
fn char_offset(c: char) -> u32 {
super::char_offset(c)
}
}
|
const UNDERLINE_OFFSET: i32 = 10 + 2;
|
series.rs
|
use crate::datatypes::DataType;
use crate::error::PyPolarsEr;
use crate::{dispatch::ApplyLambda, npy::aligned_array};
use numpy::PyArray1;
use polars::chunked_array::builder::get_bitmap;
use polars::prelude::*;
use pyo3::types::{PyList, PyTuple};
use pyo3::{exceptions::RuntimeError, prelude::*, Python};
#[pyclass]
#[repr(transparent)]
#[derive(Clone)]
pub struct PySeries {
pub series: Series,
}
impl PySeries {
pub(crate) fn new(series: Series) -> Self {
PySeries { series }
}
}
// Init with numpy arrays
macro_rules! init_method {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
#[staticmethod]
pub fn $name(name: &str, val: &PyArray1<$type>) -> PySeries {
unsafe {
PySeries {
series: Series::new(name, val.as_slice().unwrap()),
}
}
}
}
};
}
init_method!(new_i8, i8);
init_method!(new_i16, i16);
init_method!(new_i32, i32);
init_method!(new_i64, i64);
init_method!(new_f32, f32);
init_method!(new_f64, f64);
init_method!(new_bool, bool);
init_method!(new_u8, u8);
init_method!(new_u16, u16);
init_method!(new_u32, u32);
init_method!(new_u64, u64);
init_method!(new_date32, i32);
init_method!(new_date64, i64);
init_method!(new_duration_ns, i64);
init_method!(new_time_ns, i64);
// Init with lists that can contain Nones
macro_rules! init_method_opt {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
#[staticmethod]
pub fn $name(name: &str, val: Vec<Option<$type>>) -> PySeries {
PySeries {
series: Series::new(name, &val),
}
}
}
};
}
init_method_opt!(new_opt_u8, u8);
init_method_opt!(new_opt_u16, u16);
init_method_opt!(new_opt_u32, u32);
init_method_opt!(new_opt_u64, u64);
init_method_opt!(new_opt_i8, i8);
init_method_opt!(new_opt_i16, i16);
init_method_opt!(new_opt_i32, i32);
init_method_opt!(new_opt_i64, i64);
init_method_opt!(new_opt_f32, f32);
init_method_opt!(new_opt_f64, f64);
init_method_opt!(new_opt_bool, bool);
init_method_opt!(new_opt_date32, i32);
init_method_opt!(new_opt_date64, i64);
init_method_opt!(new_opt_duration_ns, i64);
init_method_opt!(new_opt_time_ns, i64);
#[pymethods]
impl PySeries {
#[staticmethod]
pub fn new_str(name: &str, val: Vec<&str>) -> Self {
PySeries::new(Series::new(name, &val))
}
#[staticmethod]
pub fn new_opt_str(name: &str, val: Vec<Option<&str>>) -> Self {
PySeries::new(Series::new(name, &val))
}
pub fn rechunk(&mut self, in_place: bool) -> Option<Self> {
let series = self.series.rechunk(None).expect("should not fail");
if in_place {
self.series = series;
None
} else {
Some(PySeries::new(series))
}
}
pub fn name(&self) -> &str {
self.series.name()
}
pub fn rename(&mut self, name: &str) {
self.series.rename(name);
}
pub fn dtype(&self) -> u8 {
let dt: DataType = self.series.dtype().into();
dt as u8
}
pub fn n_chunks(&self) -> usize {
self.series.n_chunks()
}
pub fn limit(&self, num_elements: usize) -> PyResult<Self> {
let series = self.series.limit(num_elements).map_err(PyPolarsEr::from)?;
Ok(PySeries { series })
}
pub fn slice(&self, offset: usize, length: usize) -> PyResult<Self> {
let series = self
.series
.slice(offset, length)
.map_err(PyPolarsEr::from)?;
Ok(PySeries { series })
}
pub fn append(&mut self, other: &PySeries) -> PyResult<()> {
self.series
.append(&other.series)
.map_err(PyPolarsEr::from)?;
Ok(())
}
pub fn filter(&self, filter: &PySeries) -> PyResult<Self> {
let filter_series = &filter.series;
if let Series::Bool(ca) = filter_series {
let series = self.series.filter(ca).map_err(PyPolarsEr::from)?;
Ok(PySeries { series })
} else {
Err(RuntimeError::py_err("Expected a boolean mask"))
}
}
pub fn add(&self, other: &PySeries) -> PyResult<Self> {
Ok(PySeries::new(&self.series + &other.series))
}
pub fn sub(&self, other: &PySeries) -> PyResult<Self> {
Ok(PySeries::new(&self.series - &other.series))
}
pub fn mul(&self, other: &PySeries) -> PyResult<Self> {
Ok(PySeries::new(&self.series * &other.series))
}
pub fn div(&self, other: &PySeries) -> PyResult<Self> {
Ok(PySeries::new(&self.series / &other.series))
}
pub fn head(&self, length: Option<usize>) -> PyResult<Self> {
Ok(PySeries::new(self.series.head(length)))
}
pub fn tail(&self, length: Option<usize>) -> PyResult<Self> {
Ok(PySeries::new(self.series.tail(length)))
}
pub fn sort_in_place(&mut self, reverse: bool) {
self.series.sort_in_place(reverse);
}
pub fn sort(&mut self, reverse: bool) -> Self {
PySeries::new(self.series.sort(reverse))
}
pub fn argsort(&self, reverse: bool) -> Py<PyArray1<usize>> {
let gil = pyo3::Python::acquire_gil();
let pyarray = PyArray1::from_vec(gil.python(), self.series.argsort(reverse));
pyarray.to_owned()
}
pub fn arg_unique(&self) -> Py<PyArray1<usize>> {
let gil = pyo3::Python::acquire_gil();
let pyarray = PyArray1::from_vec(gil.python(), self.series.arg_unique());
pyarray.to_owned()
}
pub fn take(&self, indices: Vec<usize>) -> PyResult<Self> {
let take = self.series.take(&indices).map_err(PyPolarsEr::from)?;
Ok(PySeries::new(take))
}
pub fn take_with_series(&self, indices: &PySeries) -> PyResult<Self> {
let idx = indices.series.u32().map_err(PyPolarsEr::from)?;
let take = self.series.take(&idx).map_err(PyPolarsEr::from)?;
Ok(PySeries::new(take))
}
pub fn null_count(&self) -> PyResult<usize> {
Ok(self.series.null_count())
}
pub fn is_null(&self) -> PySeries {
Self::new(Series::Bool(self.series.is_null()))
}
pub fn series_equal(&self, other: &PySeries, null_equal: bool) -> bool
|
pub fn eq(&self, rhs: &PySeries) -> PyResult<Self> {
Ok(Self::new(Series::Bool(self.series.eq(&rhs.series))))
}
pub fn neq(&self, rhs: &PySeries) -> PyResult<Self> {
Ok(Self::new(Series::Bool(self.series.neq(&rhs.series))))
}
pub fn gt(&self, rhs: &PySeries) -> PyResult<Self> {
Ok(Self::new(Series::Bool(self.series.gt(&rhs.series))))
}
pub fn gt_eq(&self, rhs: &PySeries) -> PyResult<Self> {
Ok(Self::new(Series::Bool(self.series.gt_eq(&rhs.series))))
}
pub fn lt(&self, rhs: &PySeries) -> PyResult<Self> {
Ok(Self::new(Series::Bool(self.series.lt(&rhs.series))))
}
pub fn lt_eq(&self, rhs: &PySeries) -> PyResult<Self> {
Ok(Self::new(Series::Bool(self.series.lt_eq(&rhs.series))))
}
pub fn as_str(&self) -> PyResult<String> {
Ok(format!("{:?}", self.series))
}
pub fn len(&self) -> usize {
self.series.len()
}
pub fn to_list(&self) -> PyObject {
let gil = Python::acquire_gil();
let python = gil.python();
let pylist = match &self.series {
Series::UInt8(ca) => PyList::new(python, ca),
Series::UInt16(ca) => PyList::new(python, ca),
Series::UInt32(ca) => PyList::new(python, ca),
Series::UInt64(ca) => PyList::new(python, ca),
Series::Int8(ca) => PyList::new(python, ca),
Series::Int16(ca) => PyList::new(python, ca),
Series::Int32(ca) => PyList::new(python, ca),
Series::Int64(ca) => PyList::new(python, ca),
Series::Float32(ca) => PyList::new(python, ca),
Series::Float64(ca) => PyList::new(python, ca),
Series::Date32(ca) => PyList::new(python, ca),
Series::Date64(ca) => PyList::new(python, ca),
Series::Time64Nanosecond(ca) => PyList::new(python, ca),
Series::DurationNanosecond(ca) => PyList::new(python, ca),
Series::Bool(ca) => PyList::new(python, ca),
Series::Utf8(ca) => PyList::new(python, ca),
_ => todo!(),
};
pylist.to_object(python)
}
/// Rechunk and return a pointer to the start of the Series.
/// Only implemented for numeric types
pub fn as_single_ptr(&mut self) -> usize {
self.series.as_single_ptr()
}
pub fn fill_none(&self, strategy: &str) -> PyResult<Self> {
let strat = match strategy {
"backward" => FillNoneStrategy::Backward,
"forward" => FillNoneStrategy::Forward,
"min" => FillNoneStrategy::Min,
"max" => FillNoneStrategy::Max,
"mean" => FillNoneStrategy::Mean,
s => return Err(PyPolarsEr::Other(format!("Strategy {} not supported", s)).into()),
};
let series = self.series.fill_none(strat).map_err(PyPolarsEr::from)?;
Ok(PySeries::new(series))
}
/// Attempts to copy data to numpy arrays. If integer types have missing values
/// they will be casted to floating point values, where NaNs are used to represent missing.
/// Strings will be converted to python lists and booleans will be a numpy array if there are no
/// missing values, otherwise a python list is made.
pub fn to_numpy(&self) -> PyObject {
let gil = Python::acquire_gil();
let py = gil.python();
let series = &self.series;
// if has null values we use floats and np.nan to represent missing values
macro_rules! impl_to_np_array {
($ca:ident, $float_type:ty) => {{
match $ca.cont_slice() {
Ok(slice) => PyArray1::from_slice(py, slice).to_object(py),
Err(_) => {
if $ca.null_count() == 0 {
let v = $ca.into_no_null_iter().collect::<Vec<_>>();
PyArray1::from_vec(py, v).to_object(py)
} else {
let v = $ca
.into_iter()
.map(|opt_v| match opt_v {
Some(v) => v as $float_type,
None => <$float_type>::NAN,
})
.collect::<Vec<_>>();
PyArray1::from_vec(py, v).to_object(py)
}
}
}
}};
}
match series {
Series::UInt8(ca) => impl_to_np_array!(ca, f32),
Series::UInt16(ca) => impl_to_np_array!(ca, f32),
Series::UInt32(ca) => impl_to_np_array!(ca, f32),
Series::UInt64(ca) => impl_to_np_array!(ca, f64),
Series::Int8(ca) => impl_to_np_array!(ca, f32),
Series::Int16(ca) => impl_to_np_array!(ca, f32),
Series::Int32(ca) => impl_to_np_array!(ca, f32),
Series::Int64(ca) => impl_to_np_array!(ca, f64),
Series::Float32(ca) => impl_to_np_array!(ca, f32),
Series::Float64(ca) => impl_to_np_array!(ca, f64),
Series::Date32(ca) => impl_to_np_array!(ca, f32),
Series::Date64(ca) => impl_to_np_array!(ca, f64),
Series::Bool(ca) => {
if ca.null_count() == 0 {
let v = ca.into_no_null_iter().collect::<Vec<_>>();
PyArray1::from_vec(py, v).to_object(py)
} else {
self.to_list()
}
}
Series::Utf8(_) => self.to_list(),
_ => todo!(),
}
}
pub fn clone(&self) -> Self {
PySeries::new(self.series.clone())
}
pub fn apply_lambda(&self, lambda: &PyAny, dtype: Option<u8>) -> PyResult<PySeries> {
let gil = Python::acquire_gil();
let py = gil.python();
let series = &self.series;
let out = match dtype {
Some(0) => {
let ca: Int8Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(1) => {
let ca: Int16Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(2) => {
let ca: Int32Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(3) => {
let ca: Int64Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(4) => {
let ca: UInt8Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(5) => {
let ca: UInt16Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(6) => {
let ca: UInt32Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(7) => {
let ca: UInt64Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(8) => {
let ca: Float32Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(9) => {
let ca: Float64Chunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
Some(10) => {
let ca: BooleanChunked = apply_method_all_series!(
series,
apply_lambda_with_primitive_dtype,
py,
lambda
)?;
ca.into_series()
}
_ => return apply_method_all_series!(series, apply_lambda, py, lambda),
};
Ok(PySeries::new(out))
}
pub fn shift(&self, periods: i32) -> PyResult<Self> {
let s = self.series.shift(periods).map_err(PyPolarsEr::from)?;
Ok(PySeries::new(s))
}
pub fn zip_with(&self, mask: &PySeries, other: &PySeries) -> PyResult<Self> {
let mask = mask.series.bool().map_err(PyPolarsEr::from)?;
let s = self
.series
.zip_with(mask, &other.series)
.map_err(PyPolarsEr::from)?;
Ok(PySeries::new(s))
}
}
macro_rules! impl_ufuncs {
($name:ident, $type:ty, $unsafe_from_ptr_method:ident) => {
#[pymethods]
impl PySeries {
// applies a ufunc by accepting a lambda out: ufunc(*args, out=out)
// the out array is allocated in this method, send to Python and once the ufunc is applied
// ownership is taken by Rust again to prevent memory leak.
// if the ufunc fails, we first must take ownership back.
pub fn $name(&self, lambda: &PyAny) -> PyResult<PySeries> {
// numpy array object, and a *mut ptr
let gil = Python::acquire_gil();
let py = gil.python();
let size = self.len();
let (out_array, ptr) = aligned_array::<$type>(py, size);
debug_assert_eq!(out_array.get_refcnt(), 1);
// inserting it in a tuple increase the reference count by 1.
let args = PyTuple::new(py, &[out_array]);
debug_assert_eq!(out_array.get_refcnt(), 2);
// whatever the result, we must take the leaked memory ownership back
let s = match lambda.call1(args) {
Ok(_) => {
// if this assert fails, the lambda has taken a reference to the object, so we must panic
// args and the lambda return have a reference, making a total of 3
assert_eq!(out_array.get_refcnt(), 3);
self.$unsafe_from_ptr_method(ptr as usize, size)
}
Err(e) => {
// first take ownership from the leaked memory
// so the destructor gets called when we go out of scope
self.$unsafe_from_ptr_method(ptr as usize, size);
// return error information
return Err(e);
}
};
Ok(s)
}
}
};
}
impl_ufuncs!(apply_ufunc_f32, f32, unsafe_from_ptr_f32);
impl_ufuncs!(apply_ufunc_f64, f64, unsafe_from_ptr_f64);
impl_ufuncs!(apply_ufunc_u8, u8, unsafe_from_ptr_u8);
impl_ufuncs!(apply_ufunc_u16, u16, unsafe_from_ptr_u16);
impl_ufuncs!(apply_ufunc_u32, u32, unsafe_from_ptr_u32);
impl_ufuncs!(apply_ufunc_u64, u64, unsafe_from_ptr_u64);
impl_ufuncs!(apply_ufunc_i8, i8, unsafe_from_ptr_i8);
impl_ufuncs!(apply_ufunc_i16, i16, unsafe_from_ptr_i16);
impl_ufuncs!(apply_ufunc_i32, i32, unsafe_from_ptr_i32);
impl_ufuncs!(apply_ufunc_i64, i64, unsafe_from_ptr_i64);
macro_rules! impl_set_with_mask {
($name:ident, $native:ty, $cast:ident, $variant:ident) => {
fn $name(series: &Series, filter: &PySeries, value: Option<$native>) -> Result<Series> {
let mask = filter.series.bool()?;
let ca = series.$cast()?;
let new = ca.set(mask, value)?;
Ok(Series::$variant(new))
}
#[pymethods]
impl PySeries {
pub fn $name(&self, filter: &PySeries, value: Option<$native>) -> PyResult<Self> {
let series = $name(&self.series, filter, value).map_err(PyPolarsEr::from)?;
Ok(Self::new(series))
}
}
};
}
impl_set_with_mask!(set_with_mask_str, &str, utf8, Utf8);
impl_set_with_mask!(set_with_mask_f64, f64, f64, Float64);
impl_set_with_mask!(set_with_mask_f32, f32, f32, Float32);
impl_set_with_mask!(set_with_mask_u8, u8, u8, UInt8);
impl_set_with_mask!(set_with_mask_u16, u16, u16, UInt16);
impl_set_with_mask!(set_with_mask_u32, u32, u32, UInt32);
impl_set_with_mask!(set_with_mask_u64, u64, u64, UInt64);
impl_set_with_mask!(set_with_mask_i8, i8, i8, Int8);
impl_set_with_mask!(set_with_mask_i16, i16, i16, Int16);
impl_set_with_mask!(set_with_mask_i32, i32, i32, Int32);
impl_set_with_mask!(set_with_mask_i64, i64, i64, Int64);
macro_rules! impl_set_at_idx {
($name:ident, $native:ty, $cast:ident, $variant:ident) => {
fn $name(series: &Series, idx: &[usize], value: Option<$native>) -> Result<Series> {
let ca = series.$cast()?;
let new = ca.set_at_idx(&idx, value)?;
Ok(Series::$variant(new))
}
#[pymethods]
impl PySeries {
pub fn $name(&self, idx: &PyArray1<usize>, value: Option<$native>) -> PyResult<Self> {
let idx = unsafe { idx.as_slice().unwrap() };
let series = $name(&self.series, &idx, value).map_err(PyPolarsEr::from)?;
Ok(Self::new(series))
}
}
};
}
impl_set_at_idx!(set_at_idx_str, &str, utf8, Utf8);
impl_set_at_idx!(set_at_idx_f64, f64, f64, Float64);
impl_set_at_idx!(set_at_idx_f32, f32, f32, Float32);
impl_set_at_idx!(set_at_idx_u8, u8, u8, UInt8);
impl_set_at_idx!(set_at_idx_u16, u16, u16, UInt16);
impl_set_at_idx!(set_at_idx_u32, u32, u32, UInt32);
impl_set_at_idx!(set_at_idx_u64, u64, u64, UInt64);
impl_set_at_idx!(set_at_idx_i8, i8, i8, Int8);
impl_set_at_idx!(set_at_idx_i16, i16, i16, Int16);
impl_set_at_idx!(set_at_idx_i32, i32, i32, Int32);
impl_set_at_idx!(set_at_idx_i64, i64, i64, Int64);
macro_rules! impl_get {
($name:ident, $series_variant:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, index: usize) -> Option<$type> {
if let Series::$series_variant(ca) = &self.series {
ca.get(index)
} else {
None
}
}
}
};
}
impl_get!(get_f32, Float32, f32);
impl_get!(get_f64, Float64, f64);
impl_get!(get_u8, UInt8, u8);
impl_get!(get_u16, UInt16, u16);
impl_get!(get_u32, UInt32, u32);
impl_get!(get_u64, UInt64, u64);
impl_get!(get_i8, Int8, i8);
impl_get!(get_i16, Int16, i16);
impl_get!(get_i32, Int32, i32);
impl_get!(get_i64, Int64, i64);
// Not public methods.
macro_rules! impl_unsafe_from_ptr {
($name:ident, $series_variant:ident) => {
impl PySeries {
fn $name(&self, ptr: usize, len: usize) -> Self {
let av = unsafe { AlignedVec::from_ptr(ptr, len, len) };
let (null_count, null_bitmap) = get_bitmap(self.series.chunks()[0].as_ref());
let ca = ChunkedArray::new_from_owned_with_null_bitmap(
self.name(),
av,
null_bitmap,
null_count,
);
Self::new(Series::$series_variant(ca))
}
}
};
}
impl_unsafe_from_ptr!(unsafe_from_ptr_f32, Float32);
impl_unsafe_from_ptr!(unsafe_from_ptr_f64, Float64);
impl_unsafe_from_ptr!(unsafe_from_ptr_u8, UInt8);
impl_unsafe_from_ptr!(unsafe_from_ptr_u16, UInt16);
impl_unsafe_from_ptr!(unsafe_from_ptr_u32, UInt32);
impl_unsafe_from_ptr!(unsafe_from_ptr_u64, UInt64);
impl_unsafe_from_ptr!(unsafe_from_ptr_i8, Int8);
impl_unsafe_from_ptr!(unsafe_from_ptr_i16, Int16);
impl_unsafe_from_ptr!(unsafe_from_ptr_i32, Int32);
impl_unsafe_from_ptr!(unsafe_from_ptr_i64, Int64);
macro_rules! impl_cast {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self) -> PyResult<PySeries> {
let s = self.series.cast::<$type>().map_err(PyPolarsEr::from)?;
Ok(PySeries::new(s))
}
}
};
}
impl_cast!(cast_u8, UInt8Type);
impl_cast!(cast_u16, UInt16Type);
impl_cast!(cast_u32, UInt32Type);
impl_cast!(cast_u64, UInt64Type);
impl_cast!(cast_i8, Int8Type);
impl_cast!(cast_i16, Int16Type);
impl_cast!(cast_i32, Int32Type);
impl_cast!(cast_i64, Int64Type);
impl_cast!(cast_f32, Float32Type);
impl_cast!(cast_f64, Float64Type);
impl_cast!(cast_date32, Date32Type);
impl_cast!(cast_date64, Date64Type);
impl_cast!(cast_time64ns, Time64NanosecondType);
impl_cast!(cast_duration_ns, DurationNanosecondType);
impl_cast!(cast_utf8, Utf8Type);
macro_rules! impl_arithmetic {
($name:ident, $type:ty, $operand:tt) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, other: $type) -> PyResult<PySeries> {
Ok(PySeries::new(&self.series $operand other))
}
}
};
}
impl_arithmetic!(add_u8, u8, +);
impl_arithmetic!(add_u16, u16, +);
impl_arithmetic!(add_u32, u32, +);
impl_arithmetic!(add_u64, u64, +);
impl_arithmetic!(add_i8, i8, +);
impl_arithmetic!(add_i16, i16, +);
impl_arithmetic!(add_i32, i32, +);
impl_arithmetic!(add_i64, i64, +);
impl_arithmetic!(add_f32, f32, +);
impl_arithmetic!(add_f64, f64, +);
impl_arithmetic!(sub_u8, u8, -);
impl_arithmetic!(sub_u16, u16, -);
impl_arithmetic!(sub_u32, u32, -);
impl_arithmetic!(sub_u64, u64, -);
impl_arithmetic!(sub_i8, i8, -);
impl_arithmetic!(sub_i16, i16, -);
impl_arithmetic!(sub_i32, i32, -);
impl_arithmetic!(sub_i64, i64, -);
impl_arithmetic!(sub_f32, f32, -);
impl_arithmetic!(sub_f64, f64, -);
impl_arithmetic!(div_u8, u8, /);
impl_arithmetic!(div_u16, u16, /);
impl_arithmetic!(div_u32, u32, /);
impl_arithmetic!(div_u64, u64, /);
impl_arithmetic!(div_i8, i8, /);
impl_arithmetic!(div_i16, i16, /);
impl_arithmetic!(div_i32, i32, /);
impl_arithmetic!(div_i64, i64, /);
impl_arithmetic!(div_f32, f32, /);
impl_arithmetic!(div_f64, f64, /);
impl_arithmetic!(mul_u8, u8, *);
impl_arithmetic!(mul_u16, u16, *);
impl_arithmetic!(mul_u32, u32, *);
impl_arithmetic!(mul_u64, u64, *);
impl_arithmetic!(mul_i8, i8, *);
impl_arithmetic!(mul_i16, i16, *);
impl_arithmetic!(mul_i32, i32, *);
impl_arithmetic!(mul_i64, i64, *);
impl_arithmetic!(mul_f32, f32, *);
impl_arithmetic!(mul_f64, f64, *);
macro_rules! impl_rhs_arithmetic {
($name:ident, $type:ty, $operand:ident) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, other: $type) -> PyResult<PySeries> {
Ok(PySeries::new(other.$operand(&self.series)))
}
}
};
}
impl_rhs_arithmetic!(add_u8_rhs, u8, add);
impl_rhs_arithmetic!(add_u16_rhs, u16, add);
impl_rhs_arithmetic!(add_u32_rhs, u32, add);
impl_rhs_arithmetic!(add_u64_rhs, u64, add);
impl_rhs_arithmetic!(add_i8_rhs, i8, add);
impl_rhs_arithmetic!(add_i16_rhs, i16, add);
impl_rhs_arithmetic!(add_i32_rhs, i32, add);
impl_rhs_arithmetic!(add_i64_rhs, i64, add);
impl_rhs_arithmetic!(add_f32_rhs, f32, add);
impl_rhs_arithmetic!(add_f64_rhs, f64, add);
impl_rhs_arithmetic!(sub_u8_rhs, u8, sub);
impl_rhs_arithmetic!(sub_u16_rhs, u16, sub);
impl_rhs_arithmetic!(sub_u32_rhs, u32, sub);
impl_rhs_arithmetic!(sub_u64_rhs, u64, sub);
impl_rhs_arithmetic!(sub_i8_rhs, i8, sub);
impl_rhs_arithmetic!(sub_i16_rhs, i16, sub);
impl_rhs_arithmetic!(sub_i32_rhs, i32, sub);
impl_rhs_arithmetic!(sub_i64_rhs, i64, sub);
impl_rhs_arithmetic!(sub_f32_rhs, f32, sub);
impl_rhs_arithmetic!(sub_f64_rhs, f64, sub);
impl_rhs_arithmetic!(div_u8_rhs, u8, div);
impl_rhs_arithmetic!(div_u16_rhs, u16, div);
impl_rhs_arithmetic!(div_u32_rhs, u32, div);
impl_rhs_arithmetic!(div_u64_rhs, u64, div);
impl_rhs_arithmetic!(div_i8_rhs, i8, div);
impl_rhs_arithmetic!(div_i16_rhs, i16, div);
impl_rhs_arithmetic!(div_i32_rhs, i32, div);
impl_rhs_arithmetic!(div_i64_rhs, i64, div);
impl_rhs_arithmetic!(div_f32_rhs, f32, div);
impl_rhs_arithmetic!(div_f64_rhs, f64, div);
impl_rhs_arithmetic!(mul_u8_rhs, u8, mul);
impl_rhs_arithmetic!(mul_u16_rhs, u16, mul);
impl_rhs_arithmetic!(mul_u32_rhs, u32, mul);
impl_rhs_arithmetic!(mul_u64_rhs, u64, mul);
impl_rhs_arithmetic!(mul_i8_rhs, i8, mul);
impl_rhs_arithmetic!(mul_i16_rhs, i16, mul);
impl_rhs_arithmetic!(mul_i32_rhs, i32, mul);
impl_rhs_arithmetic!(mul_i64_rhs, i64, mul);
impl_rhs_arithmetic!(mul_f32_rhs, f32, mul);
impl_rhs_arithmetic!(mul_f64_rhs, f64, mul);
macro_rules! impl_sum {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self) -> PyResult<Option<$type>> {
Ok(self.series.sum())
}
}
};
}
impl_sum!(sum_u8, u8);
impl_sum!(sum_u16, u16);
impl_sum!(sum_u32, u32);
impl_sum!(sum_u64, u64);
impl_sum!(sum_i8, i8);
impl_sum!(sum_i16, i16);
impl_sum!(sum_i32, i32);
impl_sum!(sum_i64, i64);
impl_sum!(sum_f32, f32);
impl_sum!(sum_f64, f64);
macro_rules! impl_min {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self) -> PyResult<Option<$type>> {
Ok(self.series.min())
}
}
};
}
impl_min!(min_u8, u8);
impl_min!(min_u16, u16);
impl_min!(min_u32, u32);
impl_min!(min_u64, u64);
impl_min!(min_i8, i8);
impl_min!(min_i16, i16);
impl_min!(min_i32, i32);
impl_min!(min_i64, i64);
impl_min!(min_f32, f32);
impl_min!(min_f64, f64);
macro_rules! impl_max {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self) -> PyResult<Option<$type>> {
Ok(self.series.max())
}
}
};
}
impl_max!(max_u8, u8);
impl_max!(max_u16, u16);
impl_max!(max_u32, u32);
impl_max!(max_u64, u64);
impl_max!(max_i8, i8);
impl_max!(max_i16, i16);
impl_max!(max_i32, i32);
impl_max!(max_i64, i64);
impl_max!(max_f32, f32);
impl_max!(max_f64, f64);
macro_rules! impl_mean {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self) -> PyResult<Option<$type>> {
Ok(self.series.mean())
}
}
};
}
impl_mean!(mean_u8, u8);
impl_mean!(mean_u16, u16);
impl_mean!(mean_u32, u32);
impl_mean!(mean_u64, u64);
impl_mean!(mean_i8, i8);
impl_mean!(mean_i16, i16);
impl_mean!(mean_i32, i32);
impl_mean!(mean_i64, i64);
impl_mean!(mean_f32, f32);
impl_mean!(mean_f64, f64);
macro_rules! impl_eq_num {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, rhs: $type) -> PyResult<PySeries> {
Ok(PySeries::new(Series::Bool(self.series.eq(rhs))))
}
}
};
}
impl_eq_num!(eq_u8, u8);
impl_eq_num!(eq_u16, u16);
impl_eq_num!(eq_u32, u32);
impl_eq_num!(eq_u64, u64);
impl_eq_num!(eq_i8, i8);
impl_eq_num!(eq_i16, i16);
impl_eq_num!(eq_i32, i32);
impl_eq_num!(eq_i64, i64);
impl_eq_num!(eq_f32, f32);
impl_eq_num!(eq_f64, f64);
impl_eq_num!(eq_str, &str);
macro_rules! impl_neq_num {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, rhs: $type) -> PyResult<PySeries> {
Ok(PySeries::new(Series::Bool(self.series.neq(rhs))))
}
}
};
}
impl_neq_num!(neq_u8, u8);
impl_neq_num!(neq_u16, u16);
impl_neq_num!(neq_u32, u32);
impl_neq_num!(neq_u64, u64);
impl_neq_num!(neq_i8, i8);
impl_neq_num!(neq_i16, i16);
impl_neq_num!(neq_i32, i32);
impl_neq_num!(neq_i64, i64);
impl_neq_num!(neq_f32, f32);
impl_neq_num!(neq_f64, f64);
impl_neq_num!(neq_str, &str);
macro_rules! impl_gt_num {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, rhs: $type) -> PyResult<PySeries> {
Ok(PySeries::new(Series::Bool(self.series.gt(rhs))))
}
}
};
}
impl_gt_num!(gt_u8, u8);
impl_gt_num!(gt_u16, u16);
impl_gt_num!(gt_u32, u32);
impl_gt_num!(gt_u64, u64);
impl_gt_num!(gt_i8, i8);
impl_gt_num!(gt_i16, i16);
impl_gt_num!(gt_i32, i32);
impl_gt_num!(gt_i64, i64);
impl_gt_num!(gt_f32, f32);
impl_gt_num!(gt_f64, f64);
impl_gt_num!(gt_str, &str);
macro_rules! impl_gt_eq_num {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, rhs: $type) -> PyResult<PySeries> {
Ok(PySeries::new(Series::Bool(self.series.gt_eq(rhs))))
}
}
};
}
impl_gt_eq_num!(gt_eq_u8, u8);
impl_gt_eq_num!(gt_eq_u16, u16);
impl_gt_eq_num!(gt_eq_u32, u32);
impl_gt_eq_num!(gt_eq_u64, u64);
impl_gt_eq_num!(gt_eq_i8, i8);
impl_gt_eq_num!(gt_eq_i16, i16);
impl_gt_eq_num!(gt_eq_i32, i32);
impl_gt_eq_num!(gt_eq_i64, i64);
impl_gt_eq_num!(gt_eq_f32, f32);
impl_gt_eq_num!(gt_eq_f64, f64);
impl_gt_eq_num!(gt_eq_str, &str);
macro_rules! impl_lt_num {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, rhs: $type) -> PyResult<PySeries> {
Ok(PySeries::new(Series::Bool(self.series.lt(rhs))))
}
}
};
}
impl_lt_num!(lt_u8, u8);
impl_lt_num!(lt_u16, u16);
impl_lt_num!(lt_u32, u32);
impl_lt_num!(lt_u64, u64);
impl_lt_num!(lt_i8, i8);
impl_lt_num!(lt_i16, i16);
impl_lt_num!(lt_i32, i32);
impl_lt_num!(lt_i64, i64);
impl_lt_num!(lt_f32, f32);
impl_lt_num!(lt_f64, f64);
impl_lt_num!(lt_str, &str);
macro_rules! impl_lt_eq_num {
($name:ident, $type:ty) => {
#[pymethods]
impl PySeries {
pub fn $name(&self, rhs: $type) -> PyResult<PySeries> {
Ok(PySeries::new(Series::Bool(self.series.lt_eq(rhs))))
}
}
};
}
impl_lt_eq_num!(lt_eq_u8, u8);
impl_lt_eq_num!(lt_eq_u16, u16);
impl_lt_eq_num!(lt_eq_u32, u32);
impl_lt_eq_num!(lt_eq_u64, u64);
impl_lt_eq_num!(lt_eq_i8, i8);
impl_lt_eq_num!(lt_eq_i16, i16);
impl_lt_eq_num!(lt_eq_i32, i32);
impl_lt_eq_num!(lt_eq_i64, i64);
impl_lt_eq_num!(lt_eq_f32, f32);
impl_lt_eq_num!(lt_eq_f64, f64);
impl_lt_eq_num!(lt_eq_str, &str);
pub(crate) fn to_series_collection(ps: Vec<PySeries>) -> Vec<Series> {
// prevent destruction of ps
let mut ps = std::mem::ManuallyDrop::new(ps);
// get mutable pointer and reinterpret as Series
let p = ps.as_mut_ptr() as *mut Series;
let len = ps.len();
let cap = ps.capacity();
// The pointer ownership will be transferred to Vec and this will be responsible for dealoc
unsafe { Vec::from_raw_parts(p, len, cap) }
}
pub(crate) fn to_pyseries_collection(s: Vec<Series>) -> Vec<PySeries> {
let mut s = std::mem::ManuallyDrop::new(s);
let p = s.as_mut_ptr() as *mut PySeries;
let len = s.len();
let cap = s.capacity();
unsafe { Vec::from_raw_parts(p, len, cap) }
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn transmute_to_series() {
// NOTE: This is only possible because PySeries is #[repr(transparent)]
// https://doc.rust-lang.org/reference/type-layout.html
let ps = PySeries {
series: [1i32, 2, 3].iter().collect(),
};
let s = unsafe { std::mem::transmute::<PySeries, Series>(ps.clone()) };
assert_eq!(s.sum::<i32>(), Some(6));
let collection = vec![ps];
let s = to_series_collection(collection);
assert_eq!(
s.iter().map(|s| s.sum::<i32>()).collect::<Vec<_>>(),
vec![Some(6)]
);
}
}
|
{
if null_equal {
self.series.series_equal_missing(&other.series)
} else {
self.series.series_equal(&other.series)
}
}
|
chapter4-ownership.rs
|
/**
* @author Richard Alvarez
*/
// CHAPTER 4. OWNERSHIP
fn main()
|
{
// 1. Each value in Rust has a variable that’s called its owner.
// 2. There can only be one owner at a time.
// 3. When the owner goes out of scope, the value will be dropped.
let q = {
// s is not valid, non-exsistent
let s = "hello";
s // s can be used.
} // s is dropped
// stored on heap!
// str literals are on the stack.
let mut string = String::from("hello");
string.push_str(", world!");
}
|
|
spktype21.py
|
# -*- coding: utf-8 -*-
"""A supporting module for jplephem to handle data type 21 (Version 0.1.0)
This module computes position and velocity of a celestial small body, from a
NASA SPICE SPK ephemeris kernel file of data type 21 (Extended Modified
Difference Arrays).
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/FORTRAN/req/spk.html
You can get SPK files for many solar system small bodies from HORIZONS
system of NASA/JPL. See https://ssd.jpl.nasa.gov/?horizons
This module reads SPK files of data type 21, one of the types of binary SPK
file.
At the point of Oct. 2018, HORIZONS system provides files of type 21 for
binary SPK files by default. You can get type 21 binary SPK file for celestial
small bodies through TELNET interface by answering back 'Binary' for
'SPK file format'. Also you can get type 21 binary SPK file from:
https://ssd.jpl.nasa.gov/x/spk.html
Modules required:
jplephem (version 2.6 or later)
numpy
Usage:
from spktype21 import SPKType21
kernel = SPKType21.open('path')
position, velocity = kernel.compute_type21(center, target, jd)
where:
path - path to the SPK file
center - SPKID of central body (0 for SSB, 10 for Sun, etc.)
target - SPKID of target body
jd - time for computation (Julian date)
Exceptions:
RuntimeError will be raised when:
invalid data_type of SPK file, or
SPK file contains too large table in EMDA record(s)
ValueError will be raised when:
invalid parameter(s) of compute_type21 function
Author: Shushi Uetsuki (whiskie14142)
This module has been developed based on jplephem and FORTRAN source
of the SPICE Toolkit of NASA/JPL/NAIF.
jplephem : https://pypi.org/project/jplephem/
SPICE Toolkit : http://naif.jpl.nasa.gov/naif/toolkit.html
"""
from numpy import array, zeros, reshape
from jplephem.daf import DAF
from jplephem.names import target_names
T0 = 2451545.0
S_PER_DAY = 86400.0
# Included from 'spk21.inc' on the FORTRAN source 'spke21.f'
MAXTRM = 25
def jd(seconds):
"""Convert a number of seconds since J2000 to a Julian Date.
"""
return T0 + seconds / S_PER_DAY
class SPKType21(object):
"""Class for SPK kernel to handle data type 21 (Extended Modified Difference Arrays)
"""
def __init__(self, daf):
self.daf = daf
self.segments = [Segment(self.daf, *t) for t in self.daf.summaries()]
ssec = lambda s : s.start_second
self.segments.sort(key=ssec)
# initialize arrays for spke21
self.G = zeros(MAXTRM)
self.REFPOS = zeros(3)
self.REFVEL = zeros(3)
self.KQ = array([0, 0, 0])
self.FC = zeros(MAXTRM)
self.FC[0] = 1.0
self.WC = zeros(MAXTRM - 1)
self.W = zeros(MAXTRM + 2)
# initialize for compute_type21
self.mda_record_exist = False
self.current_segment_exist = False
@classmethod
def open(cls, path):
"""Open the file at `path` and return an SPK instance.
"""
return cls(DAF(open(path, 'rb')))
def close(self):
"""Close this SPK file."""
self.daf.file.close()
def __str__(self):
daf = self.daf
d = lambda b: b.decode('latin-1')
lines = (str(segment) for segment in self.segments)
return 'File type {0} and format {1} with {2} segments:\n{3}'.format(
d(daf.locidw), d(daf.locfmt), len(self.segments), '\n'.join(lines))
def comments(self):
return self.daf.comments()
def compute_type21(self, center, target, jd1, jd2=0.0):
"""Compute position and velocity of target from SPK data (data type 21).
Inputs:
center - SPKID of the coordinate center (0 for Solar System Barycenter,
10 for Sun, etc)
target - SPKID of the target
jd1, jd2 - Julian date of epoch for computation. (jd1 + jd2) will
be used for computation. If you want precise definition of
epoch, jd1 should be an integer or a half integer, and jd2 should
be a relatively small floating point number.
Returns:
Position (X, Y, Z) and velocity (XD, YD, ZD) of the target at
epoch. Position and velocity are provided as Numpy arrays
respectively.
"""
eval_sec = (jd1 - T0)
eval_sec = (eval_sec + jd2) * S_PER_DAY
if self.mda_record_exist:
if eval_sec >= self.mda_lb and eval_sec < self.mda_ub:
result = self.spke21(eval_sec, self.mda_record)
return result[0:3], result[3:]
self.mda_record, self.mda_lb, self.mda_ub = self.get_MDA_record(eval_sec, target, center)
self.mda_record_exists = True
result = self.spke21(eval_sec, self.mda_record)
return result[0:3], result[3:]
def get_MDA_record(self, eval_sec, target, center):
"""Return a EMDA record for defined epoch.
Inputs:
eval_sec - epoch for computation, seconds from J2000
target - body ID of the target
center - body ID of coordinate center
Returns:
EMDA record - a Numpy array of DLSIZE floating point numbers
Exception:
ValueError will be raised when:
eval_sed is outside of SPK data
target and center are not in SPK data
RuntimeError will be raised when:
invalid data type of SPK data
"""
# chech last segment can be used
if self.current_segment_exist:
if eval_sec >= self.current_segment.start_second \
and eval_sec < self.current_segment.end_second \
and target == self.current_segment.target \
and center == self.current_segment.center:
return self.current_segment.get_MDA_record(eval_sec)
# select segments with matched 'target' and 'center'
matched = []
for segment in self.segments:
if segment.target == target and segment.center == center:
matched.append(segment)
if len(matched) == 0:
raise ValueError('Invalid Target and/or Center')
if eval_sec < matched[0].start_second or eval_sec >= matched[-1].end_second:
raise ValueError('Invalid Time to evaluate')
# selet a segment based on eval_sec
found = False
for segment in matched:
if eval_sec < segment.end_second:
found = True
self.current_segment = segment
break
if not found:
self.current_segment = matched[-1]
self.current_segment_exist = True
# get the MDA record from selected segment
if self.current_segment.data_type != 21:
raise RuntimeError('Invalid data. Data Type must be 21')
return self.current_segment.get_MDA_record(eval_sec)
# left this module only 2018/10/12
def spke21(self, ET, RECORD):
"""Compute position and velocity from a Modified Difference Array record
Inputs:
ET: Epoch time to evaluate position and velocity (seconds since J2000)
RECORD: A record of Extended Modified Difference Array
Returns: STATE
STATE: A numpy array which contains position and velocity
"""
# This method was translated from FORTRAN source code ‘spke21.f’ of SPICE
# Toolkit and modified by Shushi Uetsuki.
#
# SPICE Toolkit for FORTRAN : http://naif.jpl.nasa.gov/naif/toolkit_FORTRAN.html
# SPK Required Reading : http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/req/spk.html
#
# Unfortunately, I found some discrepancies between FORTRAN source code
# and actual data contained in SPK files. So, I tried to compose a
# method that compute positions and velocities correctly by referencing
# code of spktype01.
# Following comments start with #C were copied from original FORTRAN code.
#C$ Abstract
#C
#C Evaluate a single SPK data record from a segment of type 21
#C (Extended Difference Lines).
#C
#C$ Disclaimer
#C
#C THIS SOFTWARE AND ANY RELATED MATERIALS WERE CREATED BY THE
#C CALIFORNIA INSTITUTE OF TECHNOLOGY (CALTECH) UNDER A U.S.
#C GOVERNMENT CONTRACT WITH THE NATIONAL AERONAUTICS AND SPACE
#C PUBLICLY AVAILABLE UNDER U.S. EXPORT LAWS AND IS PROVIDED "AS-IS"
#C TO THE RECIPIENT WITHOUT WARRANTY OF ANY KIND, INCLUDING ANY
#C WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A
#C PARTICULAR USE OR PURPOSE (AS SET FORTH IN UNITED STATES UCC
#C SECTIONS 2312-2313) OR FOR ANY PURPOSE WHATSOEVER, FOR THE
#C SOFTWARE AND RELATED MATERIALS, HOWEVER USED.
#C
#C IN NO EVENT SHALL CALTECH, ITS JET PROPULSION LABORATORY, OR NASA
#C BE LIABLE FOR ANY DAMAGES AND/OR COSTS, INCLUDING, BUT NOT
#C LIMITED TO, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND,
#C INCLUDING ECONOMIC DAMAGE OR INJURY TO PROPERTY AND LOST PROFITS,
#C REGARDLESS OF WHETHER CALTECH, JPL, OR NASA BE ADVISED, HAVE
#C REASON TO KNOW, OR, IN FACT, SHALL KNOW OF THE POSSIBILITY.
#C
#C RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF
#C THE SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY
#C CALTECH AND NASA FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE
#C ACTIONS OF RECIPIENT IN THE USE OF THE SOFTWARE.
#C
#C$ Required_Reading
#C
#C SPK
#C TIME
#C
#C$ Keywords
#C
#C EPHEMERIS
#C
#C$ Declarations
STATE = zeros(6)
#C$ Brief_I/O
#C
#C Variable I/O Description
#C -------- --- --------------------------------------------------
#C ET I Evaluation epoch.
#C RECORD I Data record.
#C STATE O State (position and velocity).
#C MAXTRM P Maximum number of terms per difference table
#C component.
#C
#C$ Detailed_Input
#C
#C ET is an epoch at which a state vector is to be
#C computed. The epoch is represented as seconds past
#C J2000 TDB.
#C
#C RECORD is a data record which, when evaluated at epoch ET,
#C will give the state (position and velocity) of an
#C ephemeris object, relative to its center of motion,
#C in an inertial reference frame.
#C
#C The contents of RECORD are as follows:
#C
#C RECORD(1): The difference table size per
#C Cartesian component. Call this
#C size MAXDIM; then the difference
#C line (MDA) size DLSIZE is
#C
#C ( 4 * MAXDIM ) + 11
#C
#C RECORD(2)
#C ...
#C RECORD(1+DLSIZE): An extended difference line.
#C The contents are:
#C
#C Dimension Description
#C --------- ----------------------------------
#C 1 Reference epoch of difference line
#C MAXDIM Stepsize function vector
#C 1 Reference position vector, x
#C 1 Reference velocity vector, x
#C 1 Reference position vector, y
#C 1 Reference velocity vector, y
#C 1 Reference position vector, z
#C 1 Reference velocity vector, z
#C MAXDIM,3 Modified divided difference
#C arrays (MDAs)
#C 1 Maximum integration order plus 1
#C 3 Integration order array
#C
#C$ Detailed_Output
#C
#C STATE is the state resulting from evaluation of the input
#C record at ET. Units are km and km/sec.
#C
#C$ Parameters
#C
#C MAXTRM is the maximum number of terms allowed in
#C each component of the difference table
#C contained in the input argument RECORD.
#C See the INCLUDE file spk21.inc for the value
#C of MAXTRM.
#C
#C$ Exceptions
#C
#C 1) If the maximum table size of the input record exceeds
#C MAXTRM, the error SPICE(DIFFLINETOOLARGE) is signaled.
#C
#C$ Files
#C
#C None.
#C
#C$ Particulars
#C
#C The exact format and structure of type 21 (difference lines)
#C segments are described in the SPK Required Reading file.
#C
#C SPKE21 is a modified version of SPKE01. The routine has been
#C generalized to support variable size difference lines.
#C
#C$ Examples
#C
#C None.
#C
#C$ Restrictions
#C
#C Unknown.
#C
#C$ Literature_References
#C
#C NAIF Document 168.0, "S- and P- Kernel (SPK) Specification and
#C User's Guide"
#C
#C$ Author_and_Institution
#C
#C N.J. Bachman (JPL)
#C F.T. Krogh (JPL)
#C W.L. Taber (JPL)
#C I.M. Underwood (JPL)
#C
#C$ Version
#C
#C- SPICELIB Version 1.0.0, 03-FEB-2014 (NJB) (FTK) (WLT) (IMU)
#C
#C-&
#
#C$ Index_Entries
#C
#C evaluate type_21 spk segment
#C
#C-&
#C
#C The first element of the input record is the dimension
#C of the difference table MAXDIM.
#C
# The FORTRAN source code indicates that RECORD[0] contains MAXDIM, but actual
# data record does not contain it. MAXDIM is contained in each segment.
MAXDIM = self.current_segment.MAXDIM
if MAXDIM > MAXTRM:
mes = ('SPKE21 \nThe input record has a maximum table dimension ' +
'of {0}, while the maximum supported by this routine is {1}. ' +
'It is possible that this problem is due to your software ' +
'beeing out of date.').format(MAXDIM, MAXTRM)
raise RuntimeError(mes)
return STATE
#C
#C Unpack the contents of the MDA array.
#C
#C Name Dimension Description
#C ------ --------- -------------------------------
#C TL 1 Reference epoch of record
#C G MAXDIM Stepsize function vector
#C REFPOS 3 Reference position vector
#C REFVEL 3 Reference velocity vector
#C DT MAXDIM,NTE Modified divided difference arrays
#C KQMAX1 1 Maximum integration order plus 1
#C KQ NTE Integration order array
#C
#C For our purposes, NTE is always 3.
#C
# The FORTRAN source code indicates that RECORD[1] contains TL, but on the
# actual data RECORD[0] contains it, and all addresses for following data are
# shifted forward by one.
self.TL = RECORD[0]
self.G = RECORD[1:MAXDIM + 1]
#C
#C Collect the reference position and velocity.
#C
self.REFPOS[0] = RECORD[MAXDIM + 1]
self.REFVEL[0] = RECORD[MAXDIM + 2]
self.REFPOS[1] = RECORD[MAXDIM + 3]
self.REFVEL[1] = RECORD[MAXDIM + 4]
self.REFPOS[2] = RECORD[MAXDIM + 5]
self.REFVEL[2] = RECORD[MAXDIM + 6]
#C
#C Initializing the difference table is one aspect of this routine
#C that's a bit different from SPKE01. Here the first dimension of
#C the table in the input record can be smaller than MAXTRM. So, we
#C must transfer separately the portions of the table corresponding
#C to each component.
#C
self.DT = reshape(RECORD[MAXDIM + 7:MAXDIM * 4 + 7], (MAXDIM, 3),
order='F')
self.KQMAX1 = int(RECORD[4 * MAXDIM + 7])
self.KQ[0] = int(RECORD[4 * MAXDIM + 8])
self.KQ[1] = int(RECORD[4 * MAXDIM + 9])
self.KQ[2] = int(RECORD[4 * MAXDIM + 10])
#C
#C Next we set up for the computation of the various differences
#C
self.DELTA = ET - self.TL
self.TP = self.DELTA
self.MQ2 = self.KQMAX1 - 2
self.KS = self.KQMAX1 - 1
#C
#C This is clearly collecting some kind of coefficients.
#C The problem is that we have no idea what they are...
#C
#C The G coefficients are supposed to be some kind of step size
#C vector.
#C
#C TP starts out as the delta t between the request time and the
#C difference line's reference epoch. We then change it from DELTA
#C by the components of the stepsize vector G.
|
#C
#C Make sure we're not about to attempt division by zero.
#C
if self.G[J-1] == 0.0:
mes = ('SPKE21\nA value of zero was found at index {0} ' +
'of the step size vector.').format(J)
raise RuntimeError(mes)
return STATE
self.FC[J] = self.TP / self.G[J-1]
self.WC[J-1] = self.DELTA / self.G[J-1]
self.TP = self.DELTA + self.G[J-1]
#C
#C Collect KQMAX1 reciprocals.
#C
for J in range(1, self.KQMAX1 + 1):
self.W[J-1] = 1.0 / float(J)
#C
#C Compute the W(K) terms needed for the position interpolation
#C (Note, it is assumed throughout this routine that KS, which
#C starts out as KQMAX1-1 (the ``maximum integration'')
#C is at least 2.
#C
self.JX = 0
self.KS1 = self.KS - 1
while self.KS >= 2:
self.JX = self.JX + 1
for J in range(1, self.JX + 1):
self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]
self.KS = self.KS1
self.KS1 = self.KS1 - 1
#C
#C Perform position interpolation: (Note that KS = 1 right now.
#C We don't know much more than that.)
#C
for I in range(1, 3 + 1):
self.KQQ = self.KQ[I-1]
self.SUM = 0.0
for J in range(self.KQQ, 0, -1):
self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]
STATE[I-1] = self.REFPOS[I-1] + self.DELTA * (self.REFVEL[I-1] + self.DELTA * self.SUM)
#C
#C Again we need to compute the W(K) coefficients that are
#C going to be used in the velocity interpolation.
#C (Note, at this point, KS = 1, KS1 = 0.)
#C
for J in range(1, self.JX + 1):
self.W[J+self.KS-1] = self.FC[J] * self.W[J+self.KS1-1] - self.WC[J-1] * self.W[J+self.KS-1]
self.KS = self.KS - 1
#C
#C Perform velocity interpolation:
#C
for I in range(1, 3 + 1):
self.KQQ = self.KQ[I-1]
self.SUM = 0.0
for J in range(self.KQQ, 0, -1):
self.SUM = self.SUM + self.DT[J-1, I-1] * self.W[J+self.KS-1]
STATE[I+3-1] = self.REFVEL[I-1] + self.DELTA * self.SUM
return STATE
class Segment(object):
"""A single segment of a SPK file.
There are several items of information about each segment that are
loaded from the underlying SPK file, and made available as object
attributes:
segment.source - official ephemeris name, like 'DE-0430LE-0430'
segment.start_second - initial epoch, as seconds from J2000
segment.end_second - final epoch, as seconds from J2000
segment.start_jd - start_second, converted to a Julian Date
segment.end_jd - end_second, converted to a Julian Date
segment.center - integer center identifier
segment.target - integer target identifier
segment.frame - integer frame identifier
segment.data_type - integer data type identifier
segment.start_i - index where segment starts
segment.end_i - index where segment ends
"""
def __init__(self, daf, source, descriptor):
self.daf = daf
self.source = source
(self.start_second, self.end_second, self.target, self.center,
self.frame, self.data_type, self.start_i, self.end_i) = descriptor
self.start_jd = jd(self.start_second)
self.end_jd = jd(self.end_second)
# 'SPK Required Reading' indicates that the penultimate element of the segment
# is the difference line size (DLSIZE), but actual data contains there a MAXDIM.
self.MAXDIM = int(self.daf.map_array(self.end_i - 1, self.end_i - 1))
self.DLSIZE = 4 * self.MAXDIM + 11
def __str__(self):
return self.describe(verbose=False)
def describe(self, verbose=True):
"""Return a textual description of the segment.
"""
center = titlecase(target_names.get(self.center, 'Unknown center'))
target = titlecase(target_names.get(self.target, 'Unknown target'))
text = ('{0.start_jd:.2f}..{0.end_jd:.2f} {1} ({0.center})'
' -> {2} ({0.target})'
' data_type={0.data_type}'.format(self, center, target))
if verbose:
text += ('\n frame={0.frame} data_type={0.data_type} source={1}'
.format(self, self.source.decode('ascii')))
return text
def get_MDA_record(self, time_sec):
"""Return a Modified Difference Array(MDA) record for the time to
evaluate with its effective time boundaries (lower and upper).
Inputs:
time_sec - epoch for computation, seconds from J2000
Returns: mda_record, lower_boundary, upper_boundary
mda_record: A Modified Difference Array record
lower_boundary: lower boundary of the record, seconds since J2000
upper_boundary: upper boundary of the record, seconds since J2000
"""
# Number of records in this segment
entry_count = int(self.daf.map_array(self.end_i, self.end_i))
# Number of entries in epoch directory
epoch_dir_count = entry_count // 100
# serch target epoch in epoch directory to narrow serching aria
if epoch_dir_count >= 1:
epoch_dir = self.daf.map_array(self.end_i - epoch_dir_count - 1,
self.end_i - 2)
found = False
for i in range(1, epoch_dir_count + 1):
if epoch_dir[i-1] > time_sec:
found = True
break
if found:
serch_last_index = i * 100
serch_start_index = (i - 1) * 100 + 1
else:
serch_last_index = entry_count
serch_start_index = epoch_dir_count * 100 + 1
else:
serch_last_index = entry_count
serch_start_index = 1
# epoch_table contains epochs for all records in this segment
epoch_table = self.daf.map_array(self.start_i + (entry_count * self.DLSIZE),
self.start_i + (entry_count * self.DLSIZE) + entry_count - 1)
# serch target epoch in epoch_table
found = False
for i in range(serch_start_index, serch_last_index + 1):
if epoch_table[i-1] > time_sec:
found = True
break
if not found:
i = serch_last_index
record_index = i
upper_boundary = epoch_table[i-1]
if i != 1:
lower_boundary = epoch_table[i-2]
else:
lower_boundary = self.start_second
mda_record = self.daf.map_array(self.start_i + ((record_index - 1) * self.DLSIZE),
self.start_i + (record_index * self.DLSIZE) - 1)
# mda_record : one record of MDA
# lower_boundary : lower boundary of epoch in this MDA record
# upper_boundary : upper boundary of epoch in this MDA record
return mda_record, lower_boundary, upper_boundary
def titlecase(name):
"""Title-case target `name` if it looks safe to do so.
"""
return name if name.startswith(('1', 'C/', 'DSS-')) else name.title()
|
#C
for J in range(1, self.MQ2 + 1):
|
routerPeer.go
|
// *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package compute
import (
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/go/pulumi"
)
type RouterPeer struct {
s *pulumi.ResourceState
}
// NewRouterPeer registers a new resource with the given unique name, arguments, and options.
func NewRouterPeer(ctx *pulumi.Context,
name string, args *RouterPeerArgs, opts ...pulumi.ResourceOpt) (*RouterPeer, error) {
if args == nil || args.Interface == nil {
return nil, errors.New("missing required argument 'Interface'")
}
if args == nil || args.PeerAsn == nil {
return nil, errors.New("missing required argument 'PeerAsn'")
}
if args == nil || args.PeerIpAddress == nil
|
if args == nil || args.Router == nil {
return nil, errors.New("missing required argument 'Router'")
}
inputs := make(map[string]interface{})
if args == nil {
inputs["advertiseMode"] = nil
inputs["advertisedGroups"] = nil
inputs["advertisedIpRanges"] = nil
inputs["advertisedRoutePriority"] = nil
inputs["interface"] = nil
inputs["name"] = nil
inputs["peerAsn"] = nil
inputs["peerIpAddress"] = nil
inputs["project"] = nil
inputs["region"] = nil
inputs["router"] = nil
} else {
inputs["advertiseMode"] = args.AdvertiseMode
inputs["advertisedGroups"] = args.AdvertisedGroups
inputs["advertisedIpRanges"] = args.AdvertisedIpRanges
inputs["advertisedRoutePriority"] = args.AdvertisedRoutePriority
inputs["interface"] = args.Interface
inputs["name"] = args.Name
inputs["peerAsn"] = args.PeerAsn
inputs["peerIpAddress"] = args.PeerIpAddress
inputs["project"] = args.Project
inputs["region"] = args.Region
inputs["router"] = args.Router
}
inputs["ipAddress"] = nil
inputs["managementType"] = nil
s, err := ctx.RegisterResource("gcp:compute/routerPeer:RouterPeer", name, true, inputs, opts...)
if err != nil {
return nil, err
}
return &RouterPeer{s: s}, nil
}
// GetRouterPeer gets an existing RouterPeer resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetRouterPeer(ctx *pulumi.Context,
name string, id pulumi.ID, state *RouterPeerState, opts ...pulumi.ResourceOpt) (*RouterPeer, error) {
inputs := make(map[string]interface{})
if state != nil {
inputs["advertiseMode"] = state.AdvertiseMode
inputs["advertisedGroups"] = state.AdvertisedGroups
inputs["advertisedIpRanges"] = state.AdvertisedIpRanges
inputs["advertisedRoutePriority"] = state.AdvertisedRoutePriority
inputs["interface"] = state.Interface
inputs["ipAddress"] = state.IpAddress
inputs["managementType"] = state.ManagementType
inputs["name"] = state.Name
inputs["peerAsn"] = state.PeerAsn
inputs["peerIpAddress"] = state.PeerIpAddress
inputs["project"] = state.Project
inputs["region"] = state.Region
inputs["router"] = state.Router
}
s, err := ctx.ReadResource("gcp:compute/routerPeer:RouterPeer", name, id, inputs, opts...)
if err != nil {
return nil, err
}
return &RouterPeer{s: s}, nil
}
// URN is this resource's unique name assigned by Pulumi.
func (r *RouterPeer) URN() pulumi.URNOutput {
return r.s.URN()
}
// ID is this resource's unique identifier assigned by its provider.
func (r *RouterPeer) ID() pulumi.IDOutput {
return r.s.ID()
}
// User-specified flag to indicate which mode to use for advertisement. Valid values of this enum field are: 'DEFAULT',
// 'CUSTOM'
func (r *RouterPeer) AdvertiseMode() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["advertiseMode"])
}
// User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: *
// 'ALL_SUBNETS': Advertises all available subnets, including peer VPC subnets. * 'ALL_VPC_SUBNETS': Advertises the
// router's own VPC subnets. * 'ALL_PEER_VPC_SUBNETS': Advertises peer subnets of the router's VPC network. Note that this
// field can only be populated if advertiseMode is 'CUSTOM' and overrides the list defined for the router (in the "bgp"
// message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no
// custom groups.
func (r *RouterPeer) AdvertisedGroups() pulumi.ArrayOutput {
return (pulumi.ArrayOutput)(r.s.State["advertisedGroups"])
}
// User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if
// advertiseMode is 'CUSTOM' and is advertised to all peers of the router. These IP ranges will be advertised in addition
// to any specified groups. Leave this field blank to advertise no custom IP ranges.
func (r *RouterPeer) AdvertisedIpRanges() pulumi.ArrayOutput {
return (pulumi.ArrayOutput)(r.s.State["advertisedIpRanges"])
}
// The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the
// routes with the lowest priority value win.
func (r *RouterPeer) AdvertisedRoutePriority() pulumi.IntOutput {
return (pulumi.IntOutput)(r.s.State["advertisedRoutePriority"])
}
// Name of the interface the BGP peer is associated with.
func (r *RouterPeer) Interface() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["interface"])
}
// IP address of the interface inside Google Cloud Platform. Only IPv4 is supported.
func (r *RouterPeer) IpAddress() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["ipAddress"])
}
// The resource that configures and manages this BGP peer. * 'MANAGED_BY_USER' is the default value and can be managed by
// you or other users * 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and managed by Cloud Interconnect,
// specifically by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type
// of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.
func (r *RouterPeer) ManagementType() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["managementType"])
}
// Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be
// 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must
// be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last
// character, which cannot be a dash.
func (r *RouterPeer) Name() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["name"])
}
// Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value.
func (r *RouterPeer) PeerAsn() pulumi.IntOutput {
return (pulumi.IntOutput)(r.s.State["peerAsn"])
}
// IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported.
func (r *RouterPeer) PeerIpAddress() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["peerIpAddress"])
}
func (r *RouterPeer) Project() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["project"])
}
// Region where the router and BgpPeer reside. If it is not provided, the provider region is used.
func (r *RouterPeer) Region() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["region"])
}
// The name of the Cloud Router in which this BgpPeer will be configured.
func (r *RouterPeer) Router() pulumi.StringOutput {
return (pulumi.StringOutput)(r.s.State["router"])
}
// Input properties used for looking up and filtering RouterPeer resources.
type RouterPeerState struct {
// User-specified flag to indicate which mode to use for advertisement. Valid values of this enum field are: 'DEFAULT',
// 'CUSTOM'
AdvertiseMode interface{}
// User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: *
// 'ALL_SUBNETS': Advertises all available subnets, including peer VPC subnets. * 'ALL_VPC_SUBNETS': Advertises the
// router's own VPC subnets. * 'ALL_PEER_VPC_SUBNETS': Advertises peer subnets of the router's VPC network. Note that this
// field can only be populated if advertiseMode is 'CUSTOM' and overrides the list defined for the router (in the "bgp"
// message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no
// custom groups.
AdvertisedGroups interface{}
// User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if
// advertiseMode is 'CUSTOM' and is advertised to all peers of the router. These IP ranges will be advertised in addition
// to any specified groups. Leave this field blank to advertise no custom IP ranges.
AdvertisedIpRanges interface{}
// The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the
// routes with the lowest priority value win.
AdvertisedRoutePriority interface{}
// Name of the interface the BGP peer is associated with.
Interface interface{}
// IP address of the interface inside Google Cloud Platform. Only IPv4 is supported.
IpAddress interface{}
// The resource that configures and manages this BGP peer. * 'MANAGED_BY_USER' is the default value and can be managed by
// you or other users * 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and managed by Cloud Interconnect,
// specifically by an InterconnectAttachment of type PARTNER. Google automatically creates, updates, and deletes this type
// of BGP peer when the PARTNER InterconnectAttachment is created, updated, or deleted.
ManagementType interface{}
// Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be
// 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must
// be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last
// character, which cannot be a dash.
Name interface{}
// Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value.
PeerAsn interface{}
// IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported.
PeerIpAddress interface{}
Project interface{}
// Region where the router and BgpPeer reside. If it is not provided, the provider region is used.
Region interface{}
// The name of the Cloud Router in which this BgpPeer will be configured.
Router interface{}
}
// The set of arguments for constructing a RouterPeer resource.
type RouterPeerArgs struct {
// User-specified flag to indicate which mode to use for advertisement. Valid values of this enum field are: 'DEFAULT',
// 'CUSTOM'
AdvertiseMode interface{}
// User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: *
// 'ALL_SUBNETS': Advertises all available subnets, including peer VPC subnets. * 'ALL_VPC_SUBNETS': Advertises the
// router's own VPC subnets. * 'ALL_PEER_VPC_SUBNETS': Advertises peer subnets of the router's VPC network. Note that this
// field can only be populated if advertiseMode is 'CUSTOM' and overrides the list defined for the router (in the "bgp"
// message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no
// custom groups.
AdvertisedGroups interface{}
// User-specified list of individual IP ranges to advertise in custom mode. This field can only be populated if
// advertiseMode is 'CUSTOM' and is advertised to all peers of the router. These IP ranges will be advertised in addition
// to any specified groups. Leave this field blank to advertise no custom IP ranges.
AdvertisedIpRanges interface{}
// The priority of routes advertised to this BGP peer. Where there is more than one matching route of maximum length, the
// routes with the lowest priority value win.
AdvertisedRoutePriority interface{}
// Name of the interface the BGP peer is associated with.
Interface interface{}
// Name of this BGP peer. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be
// 1-63 characters long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must
// be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last
// character, which cannot be a dash.
Name interface{}
// Peer BGP Autonomous System Number (ASN). Each BGP interface may use a different value.
PeerAsn interface{}
// IP address of the BGP interface outside Google Cloud Platform. Only IPv4 is supported.
PeerIpAddress interface{}
Project interface{}
// Region where the router and BgpPeer reside. If it is not provided, the provider region is used.
Region interface{}
// The name of the Cloud Router in which this BgpPeer will be configured.
Router interface{}
}
|
{
return nil, errors.New("missing required argument 'PeerIpAddress'")
}
|
distance_tree.py
|
import re
import nltk
import numpy
word_tags = ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'JJ', 'JJR',
'JJS', 'LS', 'MD', 'NN', 'NNS', 'NNP', 'NNPS', 'PDT',
'POS', 'PRP', 'PRP$', 'RB', 'RBR', 'RBS', 'RP',
'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBP',
'VBZ', 'WDT', 'WP', 'WP$', 'WRB']
currency_tags_words = ['#', '$', 'C$', 'A$']
eclipse = ['*', '*?*', '0', '*T*', '*ICH*', '*U*', '*RNR*', '*EXP*', '*PPA*', '*NOT*']
punctuation_tags = ['.', ',', ':', '-LRB-', '-RRB-', '\'\'', '``']
punctuation_words = ['.', ',', ':', '-LRB-', '-RRB-', '\'\'', '``',
'--', ';', '-', '?', '!', '...', '-LCB-',
'-RCB-']
deleted_tags = ['TOP', '-NONE-', ',', ':', '``', '\'\'']
def
|
(label):
labels = label.split('+')
new_arc = []
for sub_labels in labels:
if sub_labels == 'ADVP':
sub_labels = 'PRT'
new_arc.append(sub_labels)
label = '+'.join(new_arc)
return label
def process_none(tree):
if isinstance(tree, nltk.Tree):
label = tree.label()
if label == '-NONE-':
return None
else:
tr = []
for node in tree:
new_node = process_none(node)
if new_node is not None:
tr.append(new_node)
if len(tr) == 0:
return None
else:
return nltk.Tree(label, tr)
else:
return tree
def build_nltk_tree(depth, arc, tag, sen, arc_dict, tag_dict, stag_dict, stags=None):
"""
stags are the stanford predicted tags present in the train/valid/test files.
"""
assert len(sen) > 0
assert len(depth) == len(sen) - 1, ("%s_%s" % (len(depth), len(sen)))
if stags:
assert len(stags) == len(tag)
if len(sen) == 1:
tag_list = str(tag_dict[tag[0]]).split('+')
tag_list.reverse()
# if stags, put the real stanford pos TAG for the word and leave the
# unary chain on top.
if stags is not None:
assert len(stags) > 0
tag_list.insert(0, str(stag_dict[stags[0]]))
word = str(sen[0])
for t in tag_list:
word = nltk.Tree(t, [word])
assert isinstance(word, nltk.Tree)
return word
else:
idx = numpy.argmax(depth)
node0 = build_nltk_tree(
depth[:idx], arc[:idx], tag[:idx + 1], sen[:idx + 1],
arc_dict, tag_dict, stag_dict, stags[:idx + 1] if stags else None)
node1 = build_nltk_tree(
depth[idx + 1:], arc[idx + 1:], tag[idx + 1:], sen[idx + 1:],
arc_dict, tag_dict, stag_dict, stags[idx + 1:] if stags else None)
if node0.label() != '<empty>' and node1.label() != '<empty>':
tr = [node0, node1]
elif node0.label() == '<empty>' and node1.label() != '<empty>':
tr = [c for c in node0] + [node1]
elif node0.label() != '<empty>' and node1.label() == '<empty>':
tr = [node0] + [c for c in node1]
elif node0.label() == '<empty>' and node1.label() == '<empty>':
tr = [c for c in node0] + [c for c in node1]
arc_list = str(arc_dict[arc[idx]]).split('+')
arc_list.reverse()
for a in arc_list:
if isinstance(tr, nltk.Tree):
tr = [tr]
tr = nltk.Tree(a, tr)
return tr
def mrg(tr):
if isinstance(tr, str):
return '( %s )' % tr
# return tr + ' '
else:
s = '('
for subtr in tr:
s += mrg(subtr) + ' '
s += ')'
return s
def get_brackets(tree, start_idx=0, root=False):
assert isinstance(tree, nltk.Tree)
label = tree.label()
label = label.replace('ADVP', 'PRT')
brackets = set()
if isinstance(tree[0], nltk.Tree):
end_idx = start_idx
for node in tree:
node_brac, next_idx = get_brackets(node, end_idx)
brackets.update(node_brac)
end_idx = next_idx
if not root:
brackets.add((start_idx, end_idx, label))
else:
end_idx = start_idx + 1
return brackets, end_idx
def normalize(x):
return x / (sum(x) + 1e-8)
def tree2list(tree, parent_arc=[]):
if isinstance(tree, nltk.Tree):
label = tree.label()
if isinstance(tree[0], nltk.Tree):
label = re.split('-|=', tree.label())[0]
root_arc_list = parent_arc + [label]
root_arc = '+'.join(root_arc_list)
if len(tree) == 1:
root, arc, tag = tree2list(tree[0], parent_arc=root_arc_list)
elif len(tree) == 2:
c0, arc0, tag0 = tree2list(tree[0])
c1, arc1, tag1 = tree2list(tree[1])
root = [c0, c1]
arc = arc0 + [root_arc] + arc1
tag = tag0 + tag1
else:
c0, arc0, tag0 = tree2list(tree[0])
c1, arc1, tag1 = tree2list(nltk.Tree('<empty>', tree[1:]))
if bin == 0:
root = [c0] + c1
else:
root = [c0, c1]
arc = arc0 + [root_arc] + arc1
tag = tag0 + tag1
return root, arc, tag
else:
if len(parent_arc) == 1:
parent_arc.insert(0, '<empty>')
# parent_arc[-1] = '<POS>'
del parent_arc[-1]
return str(tree), [], ['+'.join(parent_arc)]
def get_distance(root):
if isinstance(root, list):
dist_list = []
depth_list = []
for child in root:
dist, depth = get_distance(child)
dist_list.append(dist)
depth_list.append(depth)
max_depth = max(depth_list)
out = dist_list[0]
for dist in dist_list[1:]:
out.append(max_depth)
out.extend(dist)
return out, max_depth + 1
else:
return [], 1
|
process_arc
|
submission-clone-all.js
|
import { selected } from "../selectors"
import { submissionCloneFunc } from "./submission-clone"
import { cloneDestination } from "../../settings/selectors"
import { name } from "../../assignment/selectors"
import { clone } from "../../../lib/cloneutils"
import { getAssignmentFolder } from "../../../lib/pathutils"
import Promise from "bluebird"
const submissionClone = submissionCloneFunc(clone)
// PUBLIC: Async thunk action for cloning all selected submissions.
export const submissionCloneAll = () => {
return (dispatch, getState) => {
const basePath = cloneDestination(getState())
const assignmentName = name(getState())
const cloneDirectory = getAssignmentFolder(basePath, assignmentName)
const selectedSubmissions = selected(getState())
return Promise.map(selectedSubmissions, submission => {
return dispatch(submissionClone(submission, cloneDirectory))
},
|
{concurrency: 3})
}
}
|
// TODO: dynamically set this based on system
|
lib.rs
|
#![cfg_attr(not(feature = "std"), no_std)]
/// For more guidance on Substrate modules, see the example module
/// https://github.com/paritytech/substrate/blob/master/frame/example/src/lib.rs
// TODO documentation!
use sp_runtime::traits::{BlakeTwo256, Hash, One, SaturatedConversion, Zero};
use codec::{Decode, Encode};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResult, ensure,
traits::Randomness, StorageMap,
};
use generic_asset::{AssetOptions, Owner, PermissionLatest};
use system::ensure_signed;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
pub trait Trait: generic_asset::Trait {
// TODO: Add other types and constants required configure this module.
// type Hashing = BlakeTwo256;
type Randomness: Randomness<Self::Hash>;
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_error! {
/// Error for the generic-asset module.
pub enum Error for Module<T: Trait> {
VaultAlreadySet,
PoolAlreadyExists,
NotEnoughAssets,
NoSuchPool,
NotEnoughReserve,
ZeroAmount,
}
}
decl_event!(
pub enum Event<T>
where
AccountId = <T as system::Trait>::AccountId,
{
// Just a dummy event.
// Event `Something` is declared with a parameter of the type `u32` and `AccountId`
// To emit this event, we call the deposit function, from our runtime functions
SomethingStored(u32, AccountId),
}
);
// This module's storage items.
decl_storage! {
trait Store for Module<T: Trait> as XykStorage {
// alicethepool wonderland
VaultId get(vault_id): T::AccountId;
Pools get(asset_pool): map hasher(blake2_256) (T::AssetId, T::AssetId) => T::Balance;
LiquidityAssets get(liquidity_asset): map hasher(blake2_256) (T::AssetId, T::AssetId) => T::AssetId;
LiquidityPools get(liquidity_pool): map hasher(blake2_256) T::AssetId => (T::AssetId, T::AssetId);
Nonce get (fn nonce): u32;
}
}
// The module's dispatchable functions.
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
fn set_vault_id(origin) -> DispatchResult{
let sender = ensure_signed(origin)?;
ensure!(
!<VaultId<T>>::exists(),
Error::<T>::VaultAlreadySet,
);
<VaultId<T>>::put(sender);
Ok(())
}
fn create_pool(
origin,
first_asset_id: T::AssetId,
first_asset_amount: T::Balance,
second_asset_id: T::AssetId,
second_asset_amount: T::Balance
) -> DispatchResult {
let sender = ensure_signed(origin.clone())?;
let vault_address: T::AccountId = <VaultId<T>>::get();
// TODO ensure assets exists ?
// TODO asset1 != asset2
ensure!(
!<Pools<T>>::contains_key((first_asset_id, second_asset_id)),
Error::<T>::PoolAlreadyExists,
);
ensure!(
!<Pools<T>>::contains_key((second_asset_id,first_asset_id)),
Error::<T>::PoolAlreadyExists,
);
ensure!(
<generic_asset::Module<T>>::free_balance(&first_asset_id, &sender) >= first_asset_amount,
Error::<T>::NotEnoughAssets,
);
ensure!(
<generic_asset::Module<T>>::free_balance(&second_asset_id, &sender) >= second_asset_amount,
Error::<T>::NotEnoughAssets,
);
<Pools<T>>::insert(
(first_asset_id, second_asset_id), first_asset_amount
);
<Pools<T>>::insert(
(second_asset_id, first_asset_id), second_asset_amount
);
let liquidity_asset_id = <generic_asset::Module<T>>::next_asset_id();
<LiquidityAssets<T>>::insert(
(first_asset_id, second_asset_id), liquidity_asset_id
);
<LiquidityPools<T>>::insert(
liquidity_asset_id, (first_asset_id, second_asset_id)
);
let initial_liquidity = first_asset_amount + second_asset_amount; //for example, doesn't really matter
Self::create_asset(origin, initial_liquidity);
<generic_asset::Module<T>>::make_transfer_with_event(
&first_asset_id,
&sender,
&vault_address,
first_asset_amount
)?;
<generic_asset::Module<T>>::make_transfer_with_event(
&second_asset_id,
&sender,
&vault_address,
second_asset_amount
)?;
Ok(())
}
// you will sell your sold_asset_amount of sold_asset_id to get some amount of bought_asset_id
fn sell_asset (
origin,
sold_asset_id: T::AssetId,
bought_asset_id: T::AssetId,
sold_asset_amount: T::Balance,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
// TODO ensure sender has enough assets
ensure!(
<Pools<T>>::contains_key((sold_asset_id,bought_asset_id)),
Error::<T>::NoSuchPool,
);
let input_reserve = <Pools<T>>::get((sold_asset_id, bought_asset_id));
let output_reserve = <Pools<T>>::get((bought_asset_id, sold_asset_id));
let bought_asset_amount = Self::calculate_sell_price(
input_reserve,
output_reserve,
sold_asset_amount,
);
ensure!(
<generic_asset::Module<T>>::free_balance(&sold_asset_id, &sender) >= sold_asset_amount,
Error::<T>::NotEnoughAssets,
);
let vault = <VaultId<T>>::get();
<generic_asset::Module<T>>::make_transfer_with_event(
&sold_asset_id,
&sender,
&vault,
sold_asset_amount,
)?;
|
<generic_asset::Module<T>>::make_transfer_with_event(
&bought_asset_id,
&vault,
&sender,
bought_asset_amount,
)?;
<Pools<T>>::insert(
(sold_asset_id, bought_asset_id),
input_reserve + sold_asset_amount,
);
<Pools<T>>::insert(
(bought_asset_id, sold_asset_id),
output_reserve - bought_asset_amount,
);
Ok(())
}
fn buy_asset (
origin,
sold_asset_id: T::AssetId,
bought_asset_id: T::AssetId,
bought_asset_amount: T::Balance,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(
<Pools<T>>::contains_key((sold_asset_id,bought_asset_id)),
Error::<T>::NoSuchPool,
);
let input_reserve = <Pools<T>>::get((sold_asset_id, bought_asset_id));
let output_reserve = <Pools<T>>::get((bought_asset_id, sold_asset_id));
ensure!(
output_reserve > bought_asset_amount,
Error::<T>::NotEnoughReserve,
);
let sold_asset_amount = Self::calculate_buy_price(
input_reserve,
output_reserve,
bought_asset_amount,
);
ensure!(
<generic_asset::Module<T>>::free_balance(&sold_asset_id, &sender) >= sold_asset_amount,
Error::<T>::NotEnoughAssets,
);
let vault = <VaultId<T>>::get();
<generic_asset::Module<T>>::make_transfer_with_event(
&sold_asset_id,
&sender,
&vault,
sold_asset_amount,
)?;
<generic_asset::Module<T>>::make_transfer_with_event(
&bought_asset_id,
&vault,
&sender,
bought_asset_amount,
)?;
<Pools<T>>::insert(
(sold_asset_id, bought_asset_id),
input_reserve + sold_asset_amount,
);
<Pools<T>>::insert(
(bought_asset_id, sold_asset_id),
output_reserve - bought_asset_amount,
);
Ok(())
}
fn mint_liquidity (
origin,
first_asset_id: T::AssetId,
second_asset_id: T::AssetId,
first_asset_amount: T::Balance,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
let vault = <VaultId<T>>::get();
//get liquidity_asset_id of selected pool
let liquidity_asset_id = Self::get_liquidity_asset(
first_asset_id,
second_asset_id
);
ensure!(
(<Pools<T>>::contains_key((first_asset_id, second_asset_id)) || <Pools<T>>::contains_key((second_asset_id, first_asset_id))),
Error::<T>::NoSuchPool,
);
let first_asset_reserve = <Pools<T>>::get((first_asset_id, second_asset_id));
let second_asset_reserve = <Pools<T>>::get((second_asset_id, first_asset_id));
let second_asset_amount = first_asset_amount * second_asset_reserve / first_asset_reserve + 1.saturated_into::<T::Balance>();
let total_liquidity_assets = <generic_asset::Module<T>>::total_issuance(liquidity_asset_id);
let liquidity_assets_minted = first_asset_amount * total_liquidity_assets / first_asset_reserve;
ensure!(
!first_asset_amount.is_zero() && !second_asset_amount.is_zero(),
Error::<T>::ZeroAmount,
);
ensure!(
<generic_asset::Module<T>>::free_balance(&first_asset_id, &sender) >= first_asset_amount,
Error::<T>::NotEnoughAssets,
);
ensure!(
<generic_asset::Module<T>>::free_balance(&second_asset_id, &sender) >= second_asset_amount,
Error::<T>::NotEnoughAssets,
);
<generic_asset::Module<T>>::make_transfer_with_event(
&first_asset_id,
&sender,
&vault,
first_asset_amount,
)?;
<generic_asset::Module<T>>::make_transfer_with_event(
&second_asset_id,
&sender,
&vault,
second_asset_amount,
)?;
<Pools<T>>::insert(
(&first_asset_id, &second_asset_id),
first_asset_reserve + first_asset_amount,
);
<Pools<T>>::insert(
(&second_asset_id, &first_asset_id),
second_asset_reserve + second_asset_amount,
);
<generic_asset::Module<T>>::mint_free(
&liquidity_asset_id,
&vault,
&sender,
&liquidity_assets_minted,
)?;
Ok(())
}
fn burn_liquidity (
origin,
first_asset_id: T::AssetId,
second_asset_id: T::AssetId,
liquidity_asset_amount: T::Balance,
) -> DispatchResult {
let sender = ensure_signed(origin)?;
let vault = <VaultId<T>>::get();
//get liquidity_asset_id of selected pool
let liquidity_asset_id = Self::get_liquidity_asset(first_asset_id, second_asset_id);
ensure!(
<Pools<T>>::contains_key((first_asset_id, second_asset_id)),
Error::<T>::NoSuchPool,
);
ensure!(
<generic_asset::Module<T>>::free_balance(&liquidity_asset_id, &sender) >= liquidity_asset_amount,
Error::<T>::NotEnoughAssets,
);
let first_asset_reserve = <Pools<T>>::get((first_asset_id, second_asset_id));
let second_asset_reserve = <Pools<T>>::get((second_asset_id, first_asset_id));
let first_asset_amount = first_asset_reserve * liquidity_asset_amount / <generic_asset::Module<T>>::total_issuance(liquidity_asset_id);
let second_asset_amount = second_asset_reserve * liquidity_asset_amount / <generic_asset::Module<T>>::total_issuance(liquidity_asset_id);
<generic_asset::Module<T>>::make_transfer_with_event(
&first_asset_id,
&vault,
&sender,
first_asset_amount,
)?;
<generic_asset::Module<T>>::make_transfer_with_event(
&second_asset_id,
&vault,
&sender,
second_asset_amount,
)?;
<Pools<T>>::insert(
(&first_asset_id, &second_asset_id),
first_asset_reserve - first_asset_amount,
);
<Pools<T>>::insert(
(&second_asset_id, &first_asset_id),
second_asset_reserve - second_asset_amount,
);
//TODO burn_free of liqudity_pool_id asset to sender in an amount of += liquidity_assets_minted
<generic_asset::Module<T>>::burn_free(
&liquidity_asset_id,
&vault,
&sender,
&liquidity_asset_amount,
)?;
Ok(())
}
}
}
impl<T: Trait> Module<T> {
fn generate_random_hash() -> T::Hash {
let nonce = <Nonce>::get();
let random_seed = T::Randomness::random_seed();
let new_random = (random_seed, nonce)
.using_encoded(|b| BlakeTwo256::hash(b))
.using_encoded(|mut b| u64::decode(&mut b))
.expect("Hash must be bigger than 8 bytes; Qed");
let new_nonce = <Nonce>::get() + 1;
<Nonce>::put(new_nonce);
return (new_random).using_encoded(<T as system::Trait>::Hashing::hash);
}
pub fn calculate_sell_price(
input_reserve: T::Balance,
output_reserve: T::Balance,
sell_amount: T::Balance,
) -> T::Balance {
// input_amount_with_fee: uint256 = input_amount * 997
let input_amount_with_fee = sell_amount * 997.saturated_into::<T::Balance>();
// numerator: uint256 = input_amount_with_fee * output_reserve
let numerator = input_amount_with_fee * output_reserve;
// denominator: uint256 = (input_reserve * 1000) + input_amount_with_fee
let denominator =
(input_reserve * 1000.saturated_into::<T::Balance>()) + input_amount_with_fee;
numerator / denominator
}
pub fn calculate_buy_price(
input_reserve: T::Balance,
output_reserve: T::Balance,
buy_amount: T::Balance,
) -> T::Balance {
// numerator: uint256 = input_reserve * output_amount * 1000
let numerator = input_reserve * buy_amount * 1000.saturated_into::<T::Balance>();
// denominator: uint256 = (output_reserve - output_amount) * 997
let denominator = (output_reserve - buy_amount) * 997.saturated_into::<T::Balance>();
numerator / denominator + 1.saturated_into::<T::Balance>()
}
pub fn get_liquidity_asset(
first_asset_id: T::AssetId,
second_asset_id: T::AssetId,
) -> T::AssetId {
if <LiquidityAssets<T>>::contains_key((first_asset_id, second_asset_id)) {
return <LiquidityAssets<T>>::get((first_asset_id, second_asset_id));
} else {
return <LiquidityAssets<T>>::get((second_asset_id, first_asset_id));
}
}
fn create_asset(
origin: T::Origin,
amount: T::Balance
) -> DispatchResult {
let vault: T::AccountId = <VaultId<T>>::get();
let sender = ensure_signed(origin)?;
let default_permission = generic_asset::PermissionLatest {
update: Owner::Address(vault.clone()),
mint: Owner::Address(vault.clone()),
burn: Owner::Address(vault.clone()),
};
<generic_asset::Module<T>>::create_asset(
None,
Some(sender),
generic_asset::AssetOptions {
initial_issuance: amount,
permissions: default_permission,
},
)?;
Ok(())
}
fn get_free_balance(
assetId: T::AssetId,
from: T::AccountId
) -> T::Balance {
return <generic_asset::Module<T>>::free_balance(&assetId, &from);
}
fn get_total_issuance(
assetId: T::AssetId
) -> T::Balance {
return <generic_asset::Module<T>>::total_issuance(&assetId);
}
// //Read-only function to be used by RPC
// pub fn get_exchange_input_price(
// input_asset_id: T::AssetId,
// output_asset_id: T::AssetId,
// input_amount: T::Balance,
// ) -> T::Balance {
// let pool = <Pools<T>>::get((input_asset_id, output_asset_id));
// let output_amount = Self::calculate_input_price(
// pool.first_asset_amount,
// pool.second_asset_amount,
// input_amount,
// );
// output_amount
// }
// //Read-only function to be used by RPC
// pub fn get_exchange_output_price(
// input_asset_id: T::AssetId,
// output_asset_id: T::AssetId,
// output_amount: T::Balance,
// ) -> T::Balance {
// let pool = <Pools<T>>::get((input_asset_id, output_asset_id));
// let input_amount = Self::calculate_output_price(
// pool.first_asset_amount,
// pool.second_asset_amount,
// output_amount,
// );
// input_amount
// }
}
| |
4-bayes_opt.py
|
#!/usr/bin/env python3
"""
4-bayes_opt.py
"""
import numpy as np
from scipy.stats import norm
GP = __import__('2-gp').GaussianProcess
class BayesianOptimization:
"""
Class that instantiates a Bayesian optimization
on a noiseless 1D Gaussian process
"""
def __init__(self, f, X_init, Y_init, bounds,
ac_samples, l=1, sigma_f=1, xsi=0.01, minimize=True):
|
def acquisition(self):
"""function that calculates the next best sample location"""
# Compute mu and sigma in a call to predict() on gp
mu, sigma = self.gp.predict(self.X_s)
# print("mu:", mu, mu.shape)
# print("sigma:", sigma, sigma.shape)
# Note: sigma of shape (s,)
Z = np.zeros(sigma.shape)
if self.minimize is True:
f_plus = np.min(self.gp.Y)
Z_NUM = f_plus - mu - self.xsi
else:
f_plus = np.max(self.gp.Y)
Z_NUM = mu - f_plus - self.xsi
for i in range(sigma.shape[0]):
if sigma[i] > 0:
Z[i] = Z_NUM[i] / sigma[i]
else:
Z[i] = 0
# Compute the Expected Improvement (EI)
EI = np.zeros(sigma.shape)
for i in range(sigma.shape[0]):
if sigma[i] > 0:
EI[i] = Z_NUM[i] * norm.cdf(Z[i]) + sigma[i] * norm.pdf(Z[i])
else:
EI[i] = 0
X_next = self.X_s[np.argmax(EI)]
# print("EI:", EI)
# print("self.X_s:", self.X_s)
return X_next, EI
|
"""define and initialize variables and methods"""
self.f = f
self.gp = GP(X_init, Y_init, l, sigma_f)
self.X_s = np.linspace(bounds[0], bounds[1],
num=ac_samples)[..., np.newaxis]
self.xsi = xsi
self.minimize = minimize
|
horizontal_pod_autoscaler.rs
|
// Generated from definition io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler
/// configuration of a horizontal pod autoscaler.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct HorizontalPodAutoscaler {
/// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
pub metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta>,
/// behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
pub spec: Option<crate::api::autoscaling::v1::HorizontalPodAutoscalerSpec>,
/// current information about the autoscaler.
pub status: Option<crate::api::autoscaling::v1::HorizontalPodAutoscalerStatus>,
}
// Begin autoscaling/v1/HorizontalPodAutoscaler
// Generated from operation createAutoscalingV1NamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// create a HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_namespaced_horizontal_pod_autoscaler(
namespace: &str,
body: &crate::api::autoscaling::v1::HorizontalPodAutoscaler,
optional: crate::CreateOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::post(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteAutoscalingV1CollectionNamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// delete collection of HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>>` constructor, or [`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_namespaced_horizontal_pod_autoscaler(
namespace: &str,
delete_optional: crate::DeleteOptional<'_>,
list_optional: crate::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<crate::List<Self>>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteAutoscalingV1NamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// delete a HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<Self>>` constructor, or [`crate::DeleteResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the HorizontalPodAutoscaler
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_namespaced_horizontal_pod_autoscaler(
name: &str,
namespace: &str,
optional: crate::DeleteOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = http::Request::delete(__url);
let __body = serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listAutoscalingV1HorizontalPodAutoscalerForAllNamespaces
impl HorizontalPodAutoscaler {
/// list or watch objects of kind HorizontalPodAutoscaler
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_horizontal_pod_autoscaler_for_all_namespaces(
optional: crate::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = "/apis/autoscaling/v1/horizontalpodautoscalers?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listAutoscalingV1NamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// list or watch objects of kind HorizontalPodAutoscaler
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_namespaced_horizontal_pod_autoscaler(
namespace: &str,
optional: crate::ListOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchAutoscalingV1NamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// partially update the specified HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the HorizontalPodAutoscaler
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_horizontal_pod_autoscaler(
name: &str,
namespace: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchAutoscalingV1NamespacedHorizontalPodAutoscalerStatus
impl HorizontalPodAutoscaler {
/// partially update status of the specified HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the HorizontalPodAutoscaler
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_namespaced_horizontal_pod_autoscaler_status(
name: &str,
namespace: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::patch(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation readAutoscalingV1NamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// read the specified HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedHorizontalPodAutoscalerResponse`]`>` constructor, or [`ReadNamespacedHorizontalPodAutoscalerResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the HorizontalPodAutoscaler
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_horizontal_pod_autoscaler(
name: &str,
namespace: &str,
optional: ReadNamespacedHorizontalPodAutoscalerOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedHorizontalPodAutoscalerResponse>), crate::RequestError> {
let ReadNamespacedHorizontalPodAutoscalerOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`HorizontalPodAutoscaler::read_namespaced_horizontal_pod_autoscaler`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedHorizontalPodAutoscalerOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedHorizontalPodAutoscalerResponse as Response>::try_from_parts` to parse the HTTP response body of [`HorizontalPodAutoscaler::read_namespaced_horizontal_pod_autoscaler`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedHorizontalPodAutoscalerResponse {
Ok(crate::api::autoscaling::v1::HorizontalPodAutoscaler),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedHorizontalPodAutoscalerResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedHorizontalPodAutoscalerResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedHorizontalPodAutoscalerResponse::Other(result), read))
},
}
}
}
// Generated from operation readAutoscalingV1NamespacedHorizontalPodAutoscalerStatus
impl HorizontalPodAutoscaler {
/// read status of the specified HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadNamespacedHorizontalPodAutoscalerStatusResponse`]`>` constructor, or [`ReadNamespacedHorizontalPodAutoscalerStatusResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the HorizontalPodAutoscaler
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_namespaced_horizontal_pod_autoscaler_status(
name: &str,
namespace: &str,
optional: ReadNamespacedHorizontalPodAutoscalerStatusOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<ReadNamespacedHorizontalPodAutoscalerStatusResponse>), crate::RequestError> {
let ReadNamespacedHorizontalPodAutoscalerStatusOptional {
pretty,
} = optional;
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`HorizontalPodAutoscaler::read_namespaced_horizontal_pod_autoscaler_status`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadNamespacedHorizontalPodAutoscalerStatusOptional<'a> {
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadNamespacedHorizontalPodAutoscalerStatusResponse as Response>::try_from_parts` to parse the HTTP response body of [`HorizontalPodAutoscaler::read_namespaced_horizontal_pod_autoscaler_status`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadNamespacedHorizontalPodAutoscalerStatusResponse {
Ok(crate::api::autoscaling::v1::HorizontalPodAutoscaler),
Other(Result<Option<serde_json::Value>, serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadNamespacedHorizontalPodAutoscalerStatusResponse {
fn try_from_parts(status_code: http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
http::StatusCode::OK => {
let result = match serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadNamespacedHorizontalPodAutoscalerStatusResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadNamespacedHorizontalPodAutoscalerStatusResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceAutoscalingV1NamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// replace the specified HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the HorizontalPodAutoscaler
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_horizontal_pod_autoscaler(
name: &str,
namespace: &str,
body: &crate::api::autoscaling::v1::HorizontalPodAutoscaler,
optional: crate::ReplaceOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation replaceAutoscalingV1NamespacedHorizontalPodAutoscalerStatus
impl HorizontalPodAutoscaler {
/// replace status of the specified HorizontalPodAutoscaler
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the HorizontalPodAutoscaler
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_namespaced_horizontal_pod_autoscaler_status(
name: &str,
namespace: &str,
body: &crate::api::autoscaling::v1::HorizontalPodAutoscaler,
optional: crate::ReplaceOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers/{name}/status?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::put(__url);
let __body = serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(http::header::CONTENT_TYPE, http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchAutoscalingV1HorizontalPodAutoscalerForAllNamespaces
impl HorizontalPodAutoscaler {
/// list or watch objects of kind HorizontalPodAutoscaler
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_horizontal_pod_autoscaler_for_all_namespaces(
optional: crate::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = "/apis/autoscaling/v1/horizontalpodautoscalers?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchAutoscalingV1NamespacedHorizontalPodAutoscaler
impl HorizontalPodAutoscaler {
/// list or watch objects of kind HorizontalPodAutoscaler
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `namespace`
///
/// object name and auth scope, such as for teams and projects
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_namespaced_horizontal_pod_autoscaler(
namespace: &str,
optional: crate::WatchOptional<'_>,
) -> Result<(http::Request<Vec<u8>>, fn(http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/autoscaling/v1/namespaces/{namespace}/horizontalpodautoscalers?",
namespace = crate::percent_encoding::percent_encode(namespace.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// End autoscaling/v1/HorizontalPodAutoscaler
impl crate::Resource for HorizontalPodAutoscaler {
const API_VERSION: &'static str = "autoscaling/v1";
const GROUP: &'static str = "autoscaling";
const KIND: &'static str = "HorizontalPodAutoscaler";
const VERSION: &'static str = "v1";
}
impl crate::ListableResource for HorizontalPodAutoscaler {
const LIST_KIND: &'static str = concat!("HorizontalPodAutoscaler", "List");
}
impl crate::Metadata for HorizontalPodAutoscaler {
type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> Option<&<Self as crate::Metadata>::Ty> {
self.metadata.as_ref()
}
}
impl<'de> serde::Deserialize<'de> for HorizontalPodAutoscaler {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_metadata,
Key_spec,
Key_status,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct
|
;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"metadata" => Field::Key_metadata,
"spec" => Field::Key_spec,
"status" => Field::Key_status,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = HorizontalPodAutoscaler;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(<Self::Value as crate::Resource>::KIND)
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_spec: Option<crate::api::autoscaling::v1::HorizontalPodAutoscalerSpec> = None;
let mut value_status: Option<crate::api::autoscaling::v1::HorizontalPodAutoscalerStatus> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::API_VERSION {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION));
}
},
Field::Key_kind => {
let value_kind: String = serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::KIND {
return Err(serde::de::Error::invalid_value(serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND));
}
},
Field::Key_metadata => value_metadata = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_spec => value_spec = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_status => value_status = serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(HorizontalPodAutoscaler {
metadata: value_metadata,
spec: value_spec,
status: value_status,
})
}
}
deserializer.deserialize_struct(
<Self as crate::Resource>::KIND,
&[
"apiVersion",
"kind",
"metadata",
"spec",
"status",
],
Visitor,
)
}
}
impl serde::Serialize for HorizontalPodAutoscaler {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
<Self as crate::Resource>::KIND,
2 +
self.metadata.as_ref().map_or(0, |_| 1) +
self.spec.as_ref().map_or(0, |_| 1) +
self.status.as_ref().map_or(0, |_| 1),
)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?;
if let Some(value) = &self.metadata {
serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", value)?;
}
if let Some(value) = &self.spec {
serde::ser::SerializeStruct::serialize_field(&mut state, "spec", value)?;
}
if let Some(value) = &self.status {
serde::ser::SerializeStruct::serialize_field(&mut state, "status", value)?;
}
serde::ser::SerializeStruct::end(state)
}
}
|
Visitor
|
Panel.d.ts
|
import AccessibleContext = require('nashorn/javax/accessibility/AccessibleContext');
import LayoutManager = require('nashorn/java/awt/LayoutManager');
import Container = require('nashorn/java/awt/Container');
import Accessible = require('nashorn/javax/accessibility/Accessible');
import AccessibleRole = require('nashorn/javax/accessibility/AccessibleRole');
declare class Panel {
accessibleContext : AccessibleContext;
constructor();
constructor(arg1 : LayoutManager);
}
declare interface Panel extends Container, Accessible {}
declare module Panel {
class
|
{
accessibleRole : AccessibleRole;
}
interface AccessibleAWTPanel extends Container.AccessibleAWTContainer {}}
export = Panel
|
AccessibleAWTPanel
|
select_view.rs
|
use Cursive;
use Printer;
use With;
use align::{Align, HAlign, VAlign};
use direction::Direction;
use event::{Callback, Event, EventResult, Key, MouseButton, MouseEvent};
use menu::MenuTree;
use std::borrow::Borrow;
use std::cell::Cell;
use std::cmp::min;
use std::rc::Rc;
use theme::ColorStyle;
use unicode_width::UnicodeWidthStr;
use vec::Vec2;
use view::{Position, ScrollBase, View};
use views::MenuPopup;
/// View to select an item among a list.
///
/// It contains a list of values of type T, with associated labels.
///
/// # Examples
///
/// ```no_run
/// # extern crate cursive;
/// # use cursive::Cursive;
/// # use cursive::views::{SelectView, Dialog, TextView};
/// # use cursive::align::HAlign;
/// # fn main() {
/// let mut time_select = SelectView::new().h_align(HAlign::Center);
/// time_select.add_item("Short", 1);
/// time_select.add_item("Medium", 5);
/// time_select.add_item("Long", 10);
///
/// time_select.set_on_submit(|s, time| {
/// s.pop_layer();
/// let text = format!("You will wait for {} minutes...", time);
/// s.add_layer(Dialog::around(TextView::new(text))
/// .button("Quit", |s| s.quit()));
/// });
///
/// let mut siv = Cursive::new();
/// siv.add_layer(Dialog::around(time_select)
/// .title("How long is your wait?"));
/// # }
///
/// ```
pub struct SelectView<T = String> {
items: Vec<Item<T>>,
enabled: bool,
// the focus needs to be manipulable from callbacks
focus: Rc<Cell<usize>>,
scrollbase: ScrollBase,
// This is a custom callback to include a &T.
// It will be called whenever "Enter" is pressed.
on_submit: Option<Rc<Fn(&mut Cursive, &T)>>,
// This callback is called when the selection is changed.
on_select: Option<Rc<Fn(&mut Cursive, &T)>>,
align: Align,
// `true` if we show a one-line view, with popup on selection.
popup: bool,
// We need the last offset to place the popup window
// We "cache" it during the draw, so we need interior mutability.
last_offset: Cell<Vec2>,
last_size: Vec2,
}
impl<T: 'static> Default for SelectView<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: 'static> SelectView<T> {
/// Creates a new empty SelectView.
pub fn new() -> Self {
SelectView {
items: Vec::new(),
enabled: true,
focus: Rc::new(Cell::new(0)),
scrollbase: ScrollBase::new(),
on_select: None,
on_submit: None,
align: Align::top_left(),
popup: false,
last_offset: Cell::new(Vec2::zero()),
last_size: Vec2::zero(),
}
}
/// Turns `self` into a popup select view.
///
/// Chainable variant.
pub fn popup(self) -> Self {
self.with(|s| s.set_popup(true))
}
/// Turns `self` into a popup select view.
pub fn set_popup(&mut self, popup: bool) {
self.popup = popup;
}
/// Disables this view.
///
/// A disabled view cannot be selected.
pub fn disable(&mut self) {
self.enabled = false;
}
/// Disables this view.
///
/// Chainable variant.
pub fn disabled(self) -> Self {
self.with(Self::disable)
}
/// Re-enables this view.
pub fn enable(&mut self) {
self.enabled = true;
}
/// Enable or disable this view.
pub fn set_enabled(&mut self, enabled: bool) {
self.enabled = enabled;
}
/// Returns `true` if this view is enabled.
pub fn is_enabled(&self) -> bool {
self.enabled
}
/// Sets a callback to be used when an item is selected.
pub fn set_on_select<F>(&mut self, cb: F)
where
F: Fn(&mut Cursive, &T) + 'static,
{
self.on_select = Some(Rc::new(cb));
}
/// Sets a callback to be used when an item is selected.
///
/// Chainable variant.
pub fn on_select<F>(self, cb: F) -> Self
where
F: Fn(&mut Cursive, &T) + 'static,
{
self.with(|s| s.set_on_select(cb))
}
/// Sets a callback to be used when `<Enter>` is pressed.
///
/// The item currently selected will be given to the callback.
///
/// Here, `V` can be `T` itself, or a type that can be borrowed from `T`.
pub fn set_on_submit<F, R, V: ?Sized>(&mut self, cb: F)
where
F: 'static + Fn(&mut Cursive, &V) -> R,
T: Borrow<V>,
{
self.on_submit = Some(Rc::new(move |s, t| {
cb(s, t.borrow());
}));
}
/// Sets a callback to be used when `<Enter>` is pressed.
///
/// The item currently selected will be given to the callback.
///
/// Chainable variant.
pub fn on_submit<F, V: ?Sized>(self, cb: F) -> Self
where
F: Fn(&mut Cursive, &V) + 'static,
T: Borrow<V>,
{
self.with(|s| s.set_on_submit(cb))
}
/// Sets the alignment for this view.
pub fn align(mut self, align: Align) -> Self {
self.align = align;
self
}
/// Sets the vertical alignment for this view.
/// (If the view is given too much space vertically.)
pub fn v_align(mut self, v: VAlign) -> Self {
self.align.v = v;
self
}
/// Sets the horizontal alignment for this view.
pub fn h_align(mut self, h: HAlign) -> Self {
self.align.h = h;
self
}
/// Returns the value of the currently selected item.
///
/// Panics if the list is empty.
pub fn selection(&self) -> Rc<T> {
Rc::clone(&self.items[self.focus()].value)
}
/// Removes all items from this view.
pub fn clear(&mut self) {
self.items.clear();
self.focus.set(0);
}
/// Adds a item to the list, with given label and value.
pub fn add_item<S: Into<String>>(&mut self, label: S, value: T) {
self.items.push(Item::new(label.into(), value));
}
/// Gets an item at given idx or None.
///
/// ```
/// use cursive::Cursive;
/// use cursive::views::{SelectView, TextView};
/// let select = SelectView::new()
/// .item("Short", 1);
/// assert_eq!(select.get_item(0), Some(("Short", &1)));
/// ```
pub fn get_item(&self, i: usize) -> Option<(&str, &T)> {
self.items
.get(i)
.map(|item| (item.label.as_ref(), &*item.value))
}
/// Gets a mut item at given idx or None.
pub fn get_item_mut(&mut self, i: usize) -> Option<(&mut String, &mut T)> {
if i >= self.items.len() {
None
} else {
let item = &mut self.items[i];
if let Some(t) = Rc::get_mut(&mut item.value) {
let label = &mut item.label;
Some((label, t))
} else {
None
}
}
}
/// Removes an item from the list.
///
/// Returns a callback in response to the selection change.
///
/// You should run this callback with a `&mut Cursive`.
pub fn remove_item(&mut self, id: usize) -> Callback {
self.items.remove(id);
let focus = self.focus();
if focus >= id && focus > 0 {
self.focus.set(focus - 1);
}
self.make_select_cb().unwrap_or_else(Callback::dummy)
}
/// Chainable variant of add_item
pub fn item<S: Into<String>>(self, label: S, value: T) -> Self {
self.with(|s| s.add_item(label, value))
}
/// Adds all items from from an iterator.
pub fn add_all<S, I>(&mut self, iter: I)
where
S: Into<String>,
I: IntoIterator<Item = (S, T)>,
{
for (s, t) in iter {
self.add_item(s, t);
}
}
/// Adds all items from from an iterator.
///
/// Chainable variant.
pub fn with_all<S, I>(self, iter: I) -> Self
where
S: Into<String>,
I: IntoIterator<Item = (S, T)>,
{
self.with(|s| s.add_all(iter))
}
fn draw_item(&self, printer: &Printer, i: usize) {
let l = self.items[i].label.width();
let x = self.align.h.get_offset(l, printer.size.x);
printer.print_hline((0, 0), x, " ");
printer.print((x, 0), &self.items[i].label);
if l < printer.size.x {
assert!((l + x) <= printer.size.x);
printer.print_hline((x + l, 0), printer.size.x - (l + x), " ");
}
}
/// Returns the id of the item currently selected.
///
/// Returns `None` if the list is empty.
pub fn selected_id(&self) -> Option<usize>
|
/// Returns the number of items in this list.
pub fn len(&self) -> usize {
self.items.len()
}
/// Returns `true` if this list has no item.
pub fn is_empty(&self) -> bool {
self.items.is_empty()
}
fn focus(&self) -> usize {
self.focus.get()
}
/// Moves the selection to the given position.
///
/// Returns a callback in response to the selection change.
///
/// You should run this callback with a `&mut Cursive`.
pub fn set_selection(&mut self, i: usize) -> Callback {
// TODO: Check if `i >= self.len()` ?
// assert!(i < self.len(), "SelectView: trying to select out-of-bound");
// Or just cap the ID?
let i = if self.len() == 0 {
0
} else {
min(i, self.len() - 1)
};
self.focus.set(i);
self.scrollbase.scroll_to(i);
self.make_select_cb().unwrap_or_else(Callback::dummy)
}
/// Sets the selection to the given position.
///
/// Chainable variant.
///
/// Does not apply `on_select` callbacks.
pub fn selected(self, i: usize) -> Self {
self.with(|s| {
s.set_selection(i);
})
}
/// Moves the selection up by the given number of rows.
///
/// Returns a callback in response to the selection change.
///
/// You should run this callback with a `&mut Cursive`:
///
/// ```rust
/// # use cursive::Cursive;
/// # use cursive::views::SelectView;
/// # fn main() {}
/// fn select_up(siv: &mut Cursive, view: &mut SelectView<()>) {
/// let cb = view.select_up(1);
/// cb(siv);
/// }
/// ```
pub fn select_up(&mut self, n: usize) -> Callback {
self.focus_up(n);
let focus = self.focus();
self.scrollbase.scroll_to(focus);
self.make_select_cb().unwrap_or_else(Callback::dummy)
}
/// Moves the selection down by the given number of rows.
///
/// Returns a callback in response to the selection change.
///
/// You should run this callback with a `&mut Cursive`.
pub fn select_down(&mut self, n: usize) -> Callback {
self.focus_down(n);
let focus = self.focus();
self.scrollbase.scroll_to(focus);
self.make_select_cb().unwrap_or_else(Callback::dummy)
}
// Low-level focus change. Does not fix scrollbase.
fn focus_up(&mut self, n: usize) {
let focus = self.focus().saturating_sub(n);
self.focus.set(focus);
}
// Low-level focus change. Does not fix scrollbase.
fn focus_down(&mut self, n: usize) {
let focus = min(self.focus() + n, self.items.len().saturating_sub(1));
self.focus.set(focus);
}
fn submit(&mut self) -> EventResult {
let cb = self.on_submit.clone().unwrap();
let v = self.selection();
// We return a Callback Rc<|s| cb(s, &*v)>
EventResult::Consumed(Some(Callback::from_fn(move |s| cb(s, &v))))
}
fn on_event_regular(&mut self, event: Event) -> EventResult {
let mut fix_scroll = true;
match event {
Event::Key(Key::Up) if self.focus() > 0 => self.focus_up(1),
Event::Key(Key::Down) if self.focus() + 1 < self.items.len() => {
self.focus_down(1)
}
Event::Key(Key::PageUp) => self.focus_up(10),
Event::Key(Key::PageDown) => self.focus_down(10),
Event::Key(Key::Home) => self.focus.set(0),
Event::Key(Key::End) => {
self.focus.set(self.items.len().saturating_sub(1))
}
Event::Mouse {
event: MouseEvent::WheelDown,
..
} if self.scrollbase.can_scroll_down() =>
{
fix_scroll = false;
self.scrollbase.scroll_down(5);
}
Event::Mouse {
event: MouseEvent::WheelUp,
..
} if self.scrollbase.can_scroll_up() =>
{
fix_scroll = false;
self.scrollbase.scroll_up(5);
}
Event::Mouse {
event: MouseEvent::Press(MouseButton::Left),
position,
offset,
} if position
.checked_sub(offset)
.map(|position| {
self.scrollbase.start_drag(position, self.last_size.x)
})
.unwrap_or(false) =>
{
fix_scroll = false;
}
Event::Mouse {
event: MouseEvent::Hold(MouseButton::Left),
position,
offset,
} => {
// If the mouse is dragged, we always consume the event.
fix_scroll = false;
let position = position.saturating_sub(offset);
self.scrollbase.drag(position);
}
Event::Mouse {
event: MouseEvent::Press(_),
position,
offset,
} => if let Some(position) = position.checked_sub(offset) {
let scrollbar_size = if self.scrollbase.scrollable() {
(2, 0)
} else {
(0, 0)
};
let clickable_size =
self.last_size.saturating_sub(scrollbar_size);
if position < clickable_size {
fix_scroll = false;
let focus = position.y + self.scrollbase.start_line;
if focus < self.len() {
// Only select actual items
self.focus.set(focus);
}
}
},
Event::Mouse {
event: MouseEvent::Release(MouseButton::Left),
position,
offset,
} => {
fix_scroll = false;
self.scrollbase.release_grab();
if self.on_submit.is_some() {
if let Some(position) = position.checked_sub(offset) {
let scrollbar_size = if self.scrollbase.scrollable() {
(2, 0)
} else {
(0, 0)
};
let clickable_size =
self.last_size.saturating_sub(scrollbar_size);
if position < clickable_size
&& (position.y + self.scrollbase.start_line)
== self.focus()
{
return self.submit();
}
}
}
}
Event::Key(Key::Enter) if self.on_submit.is_some() => {
return self.submit();
}
Event::Char(c) => {
// Starting from the current focus,
// find the first item that match the char.
// Cycle back to the beginning of
// the list when we reach the end.
// This is achieved by chaining twice the iterator
let iter = self.items.iter().chain(self.items.iter());
if let Some((i, _)) = iter.enumerate()
.skip(self.focus() + 1)
.find(|&(_, item)| item.label.starts_with(c))
{
// Apply modulo in case we have a hit
// from the chained iterator
self.focus.set(i % self.items.len());
} else {
return EventResult::Ignored;
}
}
_ => return EventResult::Ignored,
}
if fix_scroll {
let focus = self.focus();
self.scrollbase.scroll_to(focus);
}
EventResult::Consumed(self.make_select_cb())
}
/// Returns a callback from selection change.
fn make_select_cb(&self) -> Option<Callback> {
self.on_select.clone().map(|cb| {
let v = self.selection();
Callback::from_fn(move |s| cb(s, &v))
})
}
fn open_popup(&mut self) -> EventResult {
// Build a shallow menu tree to mimick the items array.
// TODO: cache it?
let mut tree = MenuTree::new();
for (i, item) in self.items.iter().enumerate() {
let focus = Rc::clone(&self.focus);
let on_submit = self.on_submit.as_ref().cloned();
let value = Rc::clone(&item.value);
tree.add_leaf(item.label.clone(), move |s| {
// TODO: What if an item was removed in the meantime?
focus.set(i);
if let Some(ref on_submit) = on_submit {
on_submit(s, &value);
}
});
}
// Let's keep the tree around,
// the callback will want to use it.
let tree = Rc::new(tree);
let focus = self.focus();
// This is the offset for the label text.
// We'll want to show the popup so that the text matches.
// It'll be soo cool.
let item_length = self.items[focus].label.len();
let text_offset = (self.last_size.x.saturating_sub(item_length)) / 2;
// The total offset for the window is:
// * the last absolute offset at which we drew this view
// * shifted to the right of the text offset
// * shifted to the top of the focus (so the line matches)
// * shifted top-left of the border+padding of the popup
let offset = self.last_offset.get();
let offset = offset + (text_offset, 0);
let offset = offset.saturating_sub((0, focus));
let offset = offset.saturating_sub((2, 1));
// And now, we can return the callback that will create the popup.
EventResult::with_cb(move |s| {
// The callback will want to work with a fresh Rc
let tree = Rc::clone(&tree);
// We'll relativise the absolute position,
// So that we are locked to the parent view.
// A nice effect is that window resizes will keep both
// layers together.
let current_offset = s.screen().offset();
let offset = offset.signed() - current_offset;
// And finally, put the view in view!
s.screen_mut().add_layer_at(
Position::parent(offset),
MenuPopup::new(tree).focus(focus),
);
})
}
// A popup view only does one thing: open the popup on Enter.
fn on_event_popup(&mut self, event: Event) -> EventResult {
match event {
// TODO: add Left/Right support for quick-switch?
Event::Key(Key::Enter) => self.open_popup(),
Event::Mouse {
event: MouseEvent::Release(MouseButton::Left),
position,
offset,
} if position.fits_in_rect(offset, self.last_size) =>
{
self.open_popup()
}
_ => EventResult::Ignored,
}
}
}
impl SelectView<String> {
/// Convenient method to use the label as value.
pub fn add_item_str<S: Into<String>>(&mut self, label: S) {
let label = label.into();
self.add_item(label.clone(), label);
}
/// Chainable variant of add_item_str
pub fn item_str<S: Into<String>>(self, label: S) -> Self {
self.with(|s| s.add_item_str(label))
}
/// Adds all strings from an iterator.
///
/// # Examples
///
/// ```
/// # use cursive::views::SelectView;
/// let mut select_view = SelectView::new();
/// select_view.add_all_str(vec!["a", "b", "c"]);
/// ```
pub fn add_all_str<S, I>(&mut self, iter: I)
where
S: Into<String>,
I: IntoIterator<Item = S>,
{
for s in iter {
self.add_item_str(s);
}
}
/// Adds all strings from an iterator.
///
/// Chainable variant.
pub fn with_all_str<S, I>(self, iter: I) -> Self
where
S: Into<String>,
I: IntoIterator<Item = S>,
{
self.with(|s| s.add_all_str(iter))
}
}
impl<T: 'static> View for SelectView<T> {
fn draw(&self, printer: &Printer) {
self.last_offset.set(printer.offset);
if self.popup {
let style = if !self.enabled {
ColorStyle::secondary()
} else if !printer.focused {
ColorStyle::primary()
} else {
ColorStyle::highlight()
};
let x = match printer.size.x.checked_sub(1) {
Some(x) => x,
None => return,
};
printer.with_color(style, |printer| {
// Prepare the entire background
printer.print_hline((1, 0), x, " ");
// Draw the borders
printer.print((0, 0), "<");
printer.print((x, 0), ">");
let label = &self.items[self.focus()].label;
// And center the text?
let offset = HAlign::Center.get_offset(label.len(), x + 1);
printer.print((offset, 0), label);
});
} else {
let h = self.items.len();
let offset = self.align.v.get_offset(h, printer.size.y);
let printer =
&printer.sub_printer(Vec2::new(0, offset), printer.size, true);
self.scrollbase.draw(printer, |printer, i| {
printer.with_selection(i == self.focus(), |printer| {
if i != self.focus() && !self.enabled {
printer
.with_color(ColorStyle::secondary(), |printer| {
self.draw_item(printer, i)
});
} else {
self.draw_item(printer, i);
}
});
});
}
}
fn required_size(&mut self, req: Vec2) -> Vec2 {
// Items here are not compressible.
// So no matter what the horizontal requirements are,
// we'll still return our longest item.
let w = self.items
.iter()
.map(|item| item.label.width())
.max()
.unwrap_or(1);
if self.popup {
Vec2::new(w + 2, 1)
} else {
let h = self.items.len();
let scrolling = req.y < h;
// Add 2 spaces for the scrollbar if we need
let w = if scrolling { w + 2 } else { w };
// Don't request more than we're offered - we can scroll,
// after all
Vec2::new(w, min(h, req.y))
}
}
fn on_event(&mut self, event: Event) -> EventResult {
if self.popup {
self.on_event_popup(event)
} else {
self.on_event_regular(event)
}
}
fn take_focus(&mut self, _: Direction) -> bool {
self.enabled && !self.items.is_empty()
}
fn layout(&mut self, size: Vec2) {
self.last_size = size;
if !self.popup {
self.scrollbase.set_heights(size.y, self.items.len());
}
}
}
struct Item<T> {
label: String,
value: Rc<T>,
}
impl<T> Item<T> {
fn new(label: String, value: T) -> Self {
Item {
label: label,
value: Rc::new(value),
}
}
}
|
{
if self.items.is_empty() {
None
} else {
Some(self.focus())
}
}
|
user_style_sheet.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
#[cfg(any(feature = "v2_6", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_6")))]
use crate::UserContentInjectedFrames;
#[cfg(any(feature = "v2_6", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_6")))]
use crate::UserStyleLevel;
#[cfg(any(feature = "v2_6", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_6")))]
use glib::translate::*;
glib::wrapper! {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct UserStyleSheet(Shared<ffi::WebKitUserStyleSheet>);
match fn {
ref => |ptr| ffi::webkit_user_style_sheet_ref(ptr),
unref => |ptr| ffi::webkit_user_style_sheet_unref(ptr),
type_ => || ffi::webkit_user_style_sheet_get_type(),
}
}
impl UserStyleSheet {
#[doc(alias = "webkit_user_style_sheet_new")]
pub fn new(
source: &str,
injected_frames: UserContentInjectedFrames,
level: UserStyleLevel,
allow_list: &[&str],
block_list: &[&str],
) -> UserStyleSheet {
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::webkit_user_style_sheet_new(
source.to_glib_none().0,
injected_frames.into_glib(),
level.into_glib(),
allow_list.to_glib_none().0,
block_list.to_glib_none().0,
))
}
}
#[cfg(any(feature = "v2_22", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_22")))]
#[doc(alias = "webkit_user_style_sheet_new_for_world")]
#[doc(alias = "new_for_world")]
pub fn for_world(
source: &str,
injected_frames: UserContentInjectedFrames,
level: UserStyleLevel,
world_name: &str,
allow_list: &[&str],
block_list: &[&str],
) -> UserStyleSheet
|
}
|
{
assert_initialized_main_thread!();
unsafe {
from_glib_full(ffi::webkit_user_style_sheet_new_for_world(
source.to_glib_none().0,
injected_frames.into_glib(),
level.into_glib(),
world_name.to_glib_none().0,
allow_list.to_glib_none().0,
block_list.to_glib_none().0,
))
}
}
|
locked_balance.rs
|
use super::*;
use frame_support::sp_runtime::traits::Zero;
use frame_support::traits::LockIdentifier;
use hydradx_traits::LockedBalance;
pub struct
|
<T>(sp_std::marker::PhantomData<T>);
impl<T: orml_tokens::Config + pallet_balances::Config + frame_system::Config>
LockedBalance<AssetId, T::AccountId, Balance> for MultiCurrencyLockedBalance<T>
where
AssetId: Into<<T as orml_tokens::Config>::CurrencyId>,
Balance: From<<T as orml_tokens::Config>::Balance>,
Balance: From<<T as pallet_balances::Config>::Balance>,
{
fn get_by_lock(lock_id: LockIdentifier, currency_id: AssetId, who: T::AccountId) -> Balance {
if currency_id == NativeAssetId::get() {
match pallet_balances::Pallet::<T>::locks(who)
.into_iter()
.find(|lock| lock.id == lock_id)
{
Some(lock) => lock.amount.into(),
None => Zero::zero(),
}
} else {
match orml_tokens::Pallet::<T>::locks(who, currency_id.into())
.into_iter()
.find(|lock| lock.id == lock_id)
{
Some(lock) => lock.amount.into(),
None => Zero::zero(),
}
}
}
}
|
MultiCurrencyLockedBalance
|
main.go
|
package main
import (
_ "flying-star/docs"
"flying-star/internal/api"
"fmt"
)
// @title GoIn
// @version 2.0
// @description GoIn
// @securityDefinitions.apikey ApiKeyAuth
// @in header
// @name Authorization
// @contact.name GoIn
// @contact.url localhost:8088
// @host localhost:8088
func main() {
// 启动api服务
apiMsg := api.Run()
for {
select {
case _, ok := <-apiMsg:
if !ok {
fm
|
t.Println("API服务异常退出,请检查服务配置是否错误")
return
}
}
}
}
|
|
fibonacci.rs
|
//! Example of sync actor. It can be used for cpu bound tasks. Only one sync
//! actor runs within arbiter's thread. Sync actor processes one message at a
//! time. Sync arbiter can start multiple threads with separate instance of
//! actor in each.
use actix::prelude::*;
struct Fibonacci(pub u32);
impl Message for Fibonacci {
type Result = Result<u64, ()>;
}
struct SyncActor;
impl Actor for SyncActor {
type Context = SyncContext<Self>;
}
impl Handler<Fibonacci> for SyncActor {
type Result = Result<u64, ()>;
fn handle(&mut self, msg: Fibonacci, _: &mut Self::Context) -> Self::Result {
if msg.0 == 0 {
Err(())
} else if msg.0 == 1 {
Ok(1)
} else {
let mut i = 0;
let mut sum = 0;
let mut last = 0;
let mut curr = 1;
while i < msg.0 - 1 {
sum = last + curr;
last = curr;
curr = sum;
i += 1;
}
Ok(sum)
}
}
}
#[actix::main]
async fn main()
|
{
// start sync arbiter with 3 threads
let addr = SyncArbiter::start(3, || SyncActor);
// send 5 messages
for n in 5..10 {
println!("{:?}", addr.send(Fibonacci(n)).await.unwrap());
}
System::current().stop();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.